blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7fdd0c4d775f4ed1c0a20245a65dba10c0f496c | 04ff5f92e73d97d8698e23b501f60ddf76566543 | /onnxmltools/convert/lightgbm/_parse.py | 13ab874d66db7e75580b8316e83952983d8f4fad | [
"MIT"
] | permissive | wenbingl/onnxmltools | 06bba3b93b83d2aadaa629ee7e1673a4f01834f5 | 1eb3e4f8d025e2039fca58b848d625ed3974f7a3 | refs/heads/master | 2021-06-02T06:22:54.283120 | 2018-11-14T05:34:42 | 2018-11-14T19:01:56 | 135,391,325 | 0 | 0 | MIT | 2018-05-30T06:13:32 | 2018-05-30T05:15:43 | Python | UTF-8 | Python | false | false | 3,903 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..common._container import LightGbmModelContainer
from ..common._topology import *
from lightgbm import LGBMClassifier, LGBMRegressor
lightgbm_classifier_list = [LGBMClassifier]
# Associate scikit-learn types with our operator names. If two scikit-learn models share a single name, it means their
# are equivalent in terms of conversion.
lightgbm_operator_name_map = {LGBMClassifier: 'LgbmClassifier',
LGBMRegressor: 'LgbmRegressor'}
def _get_lightgbm_operator_name(model_type):
'''
Get operator name of the input argument
:param model_type: A scikit-learn object (e.g., SGDClassifier and Binarizer)
:return: A string which stands for the type of the input model in our conversion framework
'''
if model_type not in lightgbm_operator_name_map:
raise ValueError("No proper operator name found for '%s'" % model_type)
return lightgbm_operator_name_map[model_type]
def _parse_lightgbm_simple_model(scope, model, inputs):
'''
This function handles all non-pipeline models.
:param scope: Scope object
:param model: A lightgbm object
:param inputs: A list of variables
:return: A list of output variables which will be passed to next stage
'''
this_operator = scope.declare_local_operator(_get_lightgbm_operator_name(type(model)), model)
this_operator.inputs = inputs
if type(model) in lightgbm_classifier_list:
# For classifiers, we may have two outputs, one for label and the other one for probabilities of all classes.
# Notice that their types here are not necessarily correct and they will be fixed in shape inference phase
label_variable = scope.declare_local_variable('label', FloatTensorType())
probability_map_variable = scope.declare_local_variable('probabilities', FloatTensorType())
this_operator.outputs.append(label_variable)
this_operator.outputs.append(probability_map_variable)
else:
# We assume that all scikit-learn operator can only produce a single float tensor.
variable = scope.declare_local_variable('variable', FloatTensorType())
this_operator.outputs.append(variable)
return this_operator.outputs
def _parse_lightgbm(scope, model, inputs):
'''
This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input
model's type.
:param scope: Scope object
:param model: A scikit-learn object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: The output variables produced by the input model
'''
return _parse_lightgbm_simple_model(scope, model, inputs)
def parse_lightgbm(model, initial_types=None, targeted_onnx=onnx.__version__,
custom_conversion_functions=None, custom_shape_calculators=None):
raw_model_container = LightGbmModelContainer(model)
topology = Topology(raw_model_container, initial_types=initial_types, targeted_onnx=targeted_onnx,
custom_conversion_functions=custom_conversion_functions,
custom_shape_calculators=custom_shape_calculators)
scope = topology.declare_scope('__root__')
inputs = []
for var_name, initial_type in initial_types:
inputs.append(scope.declare_local_variable(var_name, initial_type))
for variable in inputs:
raw_model_container.add_input(variable)
outputs = _parse_lightgbm(scope, model, inputs)
for variable in outputs:
raw_model_container.add_output(variable)
return topology
| [
"wenli@microsoft.com"
] | wenli@microsoft.com |
20cf6309f2e6f3ea3b36a710b1a42366894ae688 | da98a74f6ab511dce5b0d65c99538c18aa57f3cc | /criteo_test/affm_tuning/affm_lgbEncoder_tuning.py | 648834ac69a97d4080eddde30312affdb259cdb4 | [] | no_license | sunjiaxin111/Master-s-graduation-design | 90bd4f2648adc3d23a73069d30c7c2d4129bccce | d514e7a6aa1048dc92509fb08a752193ecdbfb81 | refs/heads/master | 2020-05-09T12:26:00.172805 | 2019-04-13T02:45:09 | 2019-04-13T02:45:09 | 181,111,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,697 | py | # -*- coding: UTF-8 -*-
import argparse
import ast
import logging
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score, log_loss
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
class FeatureDictionary(object):
def __init__(self, trainfile=None, testfile=None,
dfTrain=None, dfTest=None, numeric_cols=[], ignore_cols=[]):
assert not ((trainfile is None) and (dfTrain is None)), "trainfile or dfTrain at least one is set"
assert not ((trainfile is not None) and (dfTrain is not None)), "only one can be set"
assert not ((testfile is None) and (dfTest is None)), "testfile or dfTest at least one is set"
assert not ((testfile is not None) and (dfTest is not None)), "only one can be set"
self.trainfile = trainfile
self.testfile = testfile
self.dfTrain = dfTrain
self.dfTest = dfTest
self.numeric_cols = numeric_cols
self.ignore_cols = ignore_cols
self.gen_feat_dict()
def gen_feat_dict(self):
if self.dfTrain is None:
dfTrain = pd.read_csv(self.trainfile)
else:
dfTrain = self.dfTrain
if self.dfTest is None:
dfTest = pd.read_csv(self.testfile)
else:
dfTest = self.dfTest
df = pd.concat([dfTrain, dfTest])
self.feat_dict = {}
tc = 0
for col in df.columns:
if col in self.ignore_cols:
continue
if col in self.numeric_cols:
# map to a single index
self.feat_dict[col] = tc
tc += 1
else:
us = df[col].unique()
self.feat_dict[col] = dict(zip(us, range(tc, len(us) + tc)))
tc += len(us)
self.feat_dim = tc
class DataParser(object):
def __init__(self, feat_dict):
self.feat_dict = feat_dict
def parse(self, infile=None, df=None):
assert not ((infile is None) and (df is None)), "infile or df at least one is set"
assert not ((infile is not None) and (df is not None)), "only one can be set"
if infile is None:
dfi = df.copy()
else:
dfi = pd.read_csv(infile)
y = dfi["Label"].values.tolist()
dfi.drop(["Label"], axis=1, inplace=True)
# dfi for feature index
# dfv for feature value which can be either binary (1/0) or float (e.g., 10.24)
dfv = dfi.copy()
for col in dfi.columns:
if col in self.feat_dict.ignore_cols:
dfi.drop(col, axis=1, inplace=True)
dfv.drop(col, axis=1, inplace=True)
continue
if col in self.feat_dict.numeric_cols:
dfi[col] = self.feat_dict.feat_dict[col]
else:
dfi[col] = dfi[col].map(self.feat_dict.feat_dict[col])
dfv[col] = 1.
# list of list of feature indices of each sample in the dataset
Xi = dfi.values.tolist()
# list of list of feature values of each sample in the dataset
Xv = dfv.values.tolist()
return Xi, Xv, y
class DeepAFFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, attention_size=10, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_ffm=True, use_deep=True, use_attention=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True):
assert (use_ffm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size # denote as M, size of the feature dictionary
self.field_size = field_size # denote as F, size of the feature fields
self.embedding_size = embedding_size # denote as K, size of the feature embedding
self.attention_size = attention_size
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_ffm = use_ffm
self.use_deep = use_deep
self.use_attention = use_attention
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better
# 直接把测试集的结果也保存下来,就可以直接取出valid上最优的结果了
self.train_auc, self.valid_auc, self.test_auc = [], [], []
self.train_logloss, self.valid_logloss, self.test_logloss = [], [], []
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph() # 新建图
with self.graph.as_default(): # 把该图作为默认图
tf.set_random_seed(self.random_seed) # 设置随机数种子
np.random.seed(self.random_seed)
self.feat_index = tf.placeholder(tf.int32, shape=[None, None], name="feat_index") # 使用样本数*field_size
self.feat_value = tf.placeholder(tf.float32, shape=[None, None], name="feat_value") # 使用样本数*field_size
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # 使用样本数 * 1
self.dropout_keep_fm = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_fm") # fm层dropout保留的比例
self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None],
name="dropout_keep_deep") # deep层dropout保留的比例
self.train_phase = tf.placeholder(tf.bool, name="train_phase") # 是否是训练阶段的flag
self.weights = self._initialize_weights() # 初始化权重,即变量
# model
# embedding查表
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # 使用样本数*field_size*field_size*embedding_size
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings,
tf.reshape(self.feat_value, shape=[-1, self.field_size, 1, 1]))
# ---------- first order term ----------
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"],
self.feat_index) # None * field_size * 1
# reduce_sum函数用于在某一维度求和
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F
# ---------- element_wise ---------------
element_wise_product_list = []
for i in range(self.field_size):
for j in range(i + 1, self.field_size):
element_wise_product_list.append(
tf.multiply(self.embeddings[:, i, j, :], self.embeddings[:, j, i, :])) # None * K
self.element_wise_product = tf.stack(element_wise_product_list) # (F * F - 1 / 2) * None * K
self.element_wise_product = tf.transpose(self.element_wise_product, perm=[1, 0, 2],
name='element_wise_product') # None * (F * F - 1 / 2) * K
self.element_wise_product = tf.nn.dropout(self.element_wise_product, self.dropout_keep_fm[1]) # None * K
if self.use_attention:
# attention part
num_interactions = int(self.field_size * (self.field_size - 1) / 2)
# wx+b -> relu(wx+b) -> h*relu(wx+b)
self.attention_wx_plus_b = tf.reshape(
tf.add(tf.matmul(tf.reshape(self.element_wise_product, shape=(-1, self.embedding_size)),
self.weights['attention_w']),
self.weights['attention_b']),
shape=[-1, num_interactions, self.attention_size]) # N * ( F * F - 1 / 2) * A
self.attention_exp = tf.exp(tf.reduce_sum(tf.multiply(tf.nn.relu(self.attention_wx_plus_b),
self.weights['attention_h']),
axis=2, keep_dims=True)) # N * ( F * F - 1 / 2) * 1
self.attention_exp_sum = tf.reduce_sum(self.attention_exp, axis=1, keep_dims=True) # N * 1 * 1
self.attention_out = tf.div(self.attention_exp, self.attention_exp_sum,
name='attention_out') # N * ( F * F - 1 / 2) * 1
self.attention_x_product = tf.reduce_sum(tf.multiply(self.attention_out, self.element_wise_product),
axis=1,
name='afm') # N * K
self.second_order_part_sum = tf.matmul(self.attention_x_product, self.weights['attention_p']) # N * 1
else:
self.second_order_part_sum = tf.reshape(tf.reduce_sum(self.element_wise_product, axis=[1, 2]),
shape=[-1, 1])
# ---------- Deep component ----------
self.y_deep = tf.reshape(self.embeddings, shape=[-1,
self.field_size * self.field_size * self.embedding_size]) # None * (F*F*K)
# self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0]) # 这一行存疑,在输入层是否需要dropout
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]),
self.weights["bias_%d" % i]) # None * layer[i] * 1
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase,
scope_bn="bn_%d" % i) # None * layer[i] * 1
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[1 + i]) # dropout at each Deep layer
# ---------- DeepFFM ----------
if self.use_ffm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.second_order_part_sum, self.y_deep], axis=1)
elif self.use_ffm:
concat_input = tf.concat([self.y_first_order, self.second_order_part_sum], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out)
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d" % i])
# optimizer
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
# init
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
logging.warning("#params: %d" % total_parameters)
def _init_session(self):
config = tf.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _initialize_weights(self):
weights = dict()
# embedding权重矩阵
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.field_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size * field_size * embedding_size
# wx+b中的w
weights["feature_bias"] = tf.Variable(
tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
# attention part
if self.use_attention:
glorot = np.sqrt(2.0 / (self.attention_size + self.embedding_size))
weights['attention_w'] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.embedding_size, self.attention_size)),
dtype=tf.float32, name='attention_w')
weights['attention_b'] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(self.attention_size,)),
dtype=tf.float32, name='attention_b')
weights['attention_h'] = tf.Variable(np.random.normal(loc=0, scale=1, size=(self.attention_size,)),
dtype=tf.float32, name='attention_h')
weights['attention_p'] = tf.Variable(np.ones((self.embedding_size, 1)), dtype=np.float32,
name='attention_p')
# deep layers
num_layer = len(self.deep_layers) # 隐藏层层数
input_size = self.field_size * self.field_size * self.embedding_size # dnn的输入
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # 计算后续用到的标准差
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
if self.use_ffm and self.use_deep:
input_size = self.field_size + 1 + self.deep_layers[-1]
elif self.use_ffm:
input_size = self.field_size + 1
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32) # layers[i-1]*layers[i]
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32) # wx+b中的b
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously 这里的打乱是随机的,会导致每次执行程序的结果不同
def shuffle_in_unison_scary(self, a, b, c):
np.random.seed(2019)
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
loss, opt = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
return loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None, Xi_test=None, Xv_test=None, y_test=None,
early_stopping=False):
"""
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)
:param y_train: label of each sample in the training set
:param Xi_valid: list of list of feature indices of each sample in the validation set
:param Xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = Xv_valid is not None
for epoch in range(self.epoch):
t1 = time.time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# evaluate training and validation datasets
train_auc = self.evaluate(Xi_train, Xv_train, y_train, roc_auc_score)
self.train_auc.append(train_auc)
train_logloss = self.evaluate(Xi_train, Xv_train, y_train, log_loss)
self.train_logloss.append(train_logloss)
if has_valid:
valid_auc = self.evaluate(Xi_valid, Xv_valid, y_valid, roc_auc_score)
self.valid_auc.append(valid_auc)
valid_logloss = self.evaluate(Xi_valid, Xv_valid, y_valid, log_loss)
self.valid_logloss.append(valid_logloss)
test_auc = self.evaluate(Xi_test, Xv_test, y_test, roc_auc_score)
self.test_auc.append(test_auc)
test_logloss = self.evaluate(Xi_test, Xv_test, y_test, log_loss)
self.test_logloss.append(test_logloss)
if self.verbose > 0 and epoch % self.verbose == 0:
if has_valid:
logging.warning("[%d] train-auc=%.4f, valid-auc=%.4f [%.1f s]"
% (epoch + 1, train_auc, valid_auc, time.time() - t1))
else:
logging.warning("[%d] train-auc=%.4f [%.1f s]"
% (epoch + 1, train_auc, time.time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_auc):
break
def training_termination(self, valid_result):
# 这里应该是>=,因为有可能从第一个epoch就过拟合了
if len(valid_result) >= 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.label: y_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y, eval_metric):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv)
return eval_metric(y, y_pred)
def _load_data(config):
dfTrain = pd.read_csv(config['TRAIN_FILE'], sep='\t')
dfTest = pd.read_csv(config['TEST_FILE'], sep='\t')
cols = [c for c in dfTrain.columns if c not in config['IGNORE_COLS']]
X_train = dfTrain[cols].values
y_train = dfTrain["Label"].values
X_test = dfTest[cols].values
y_test = dfTest["Label"].values
# 生成类别特征列表
cate_list = []
for feat in cols:
if feat.startswith('leaf'):
cate_list.append(feat)
cat_features_indices = [i for i, c in enumerate(cols) if c in cate_list]
return dfTrain, dfTest, X_train, y_train, X_test, y_test, cat_features_indices
def _run_base_model_dfm(dfTrain, dfTest, dfm_params, config):
# 生成数值特征列表和忽略列表
numeric_list = []
ignore_list = ['Label']
features = list(dfTrain.columns)
features.remove("Label")
for feat in features:
if feat.startswith('cross_entropy'):
if config['use_cross_entropy']:
numeric_list.append(feat)
else:
ignore_list.append(feat)
elif feat.startswith('pos_ratio'):
if config['use_pos_ratio']:
numeric_list.append(feat)
else:
ignore_list.append(feat)
# 这里默认除了数值型就是类别型,所以如果没有使用交叉熵信息或者正例率特征,需要特殊处理
fd = FeatureDictionary(dfTrain=dfTrain, dfTest=dfTest,
numeric_cols=numeric_list,
ignore_cols=ignore_list)
data_parser = DataParser(feat_dict=fd)
Xi_train, Xv_train, y_train = data_parser.parse(df=dfTrain)
Xi_test, Xv_test, y_test = data_parser.parse(df=dfTest)
dfm_params["feature_size"] = fd.feat_dim
dfm_params["field_size"] = len(Xi_train[0])
# 从train中取25%为valid
Xi_train_ = Xi_train[:int(len(Xi_train) * 0.75)]
Xv_train_ = Xv_train[:int(len(Xv_train) * 0.75)]
y_train_ = y_train[:int(len(y_train) * 0.75)]
Xi_valid_ = Xi_train[int(len(Xi_train) * 0.75):]
Xv_valid_ = Xv_train[int(len(Xv_train) * 0.75):]
y_valid_ = y_train[int(len(y_train) * 0.75):]
dfm = DeepAFFM(**dfm_params)
dfm.fit(Xi_train_, Xv_train_, y_train_, Xi_valid_, Xv_valid_, y_valid_, Xi_test, Xv_test, y_test,
early_stopping=True)
# 看效果应在函数里面看,因为y_train_这些变量在训练时被打乱了
# 训练集效果
logging.warning("训练集logloss: %.8f" % dfm.train_logloss[-5])
logging.warning("训练集auc: %.8f" % dfm.train_auc[-5])
# 验证集效果
logging.warning("验证集logloss: %.8f" % dfm.valid_logloss[-5])
logging.warning("验证集auc: %.8f" % dfm.valid_auc[-5])
# 测试集效果
logging.warning("测试集logloss: %.8f" % dfm.test_logloss[-5])
logging.warning("测试集auc: %.8f" % dfm.test_auc[-5])
return dfm.valid_auc[-5], dfm.valid_logloss[-5]
def parse_args():
parser = argparse.ArgumentParser(description="Run deepAFFM.")
parser.add_argument('--use_cross_entropy', type=ast.literal_eval, default=True,
help='use_cross_entropy type: True, False.')
parser.add_argument('--use_pos_ratio', type=ast.literal_eval, default=True,
help='use_cross_entropy type: True, False.')
parser.add_argument('--attention_size_list', nargs='?', default='[2,4]',
help='attention_size_list')
parser.add_argument('--embedding_size_list', nargs='?', default='[2,4]',
help='embedding_size_list')
parser.add_argument('--dropout_fm_0_list', nargs='?', default='[0.5,0.7,0.9]',
help='dropout_fm_0_list')
parser.add_argument('--dropout_fm_1_list', nargs='?', default='[0.5,0.7,0.9]',
help='dropout_fm_1_list')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.use_cross_entropy:
logging.warning("使用交叉熵特征")
if args.use_pos_ratio:
logging.warning("使用正例率特征")
start = time.time()
df = pd.read_csv('../criteo_lgbEncoder.csv', sep='\t', nrows=100000)
new_path = "data/"
df[:int(df.shape[0] * 0.8)].to_csv(new_path + "train.csv", index=None, sep='\t')
df[int(df.shape[0] * 0.8):].to_csv(new_path + "test.csv", index=None, sep='\t')
# 原config信息
# set the path-to-files
config = {}
config['TRAIN_FILE'] = "./data/train.csv"
config['TEST_FILE'] = "./data/test.csv"
config['RANDOM_SEED'] = 2019
config['IGNORE_COLS'] = ["Label"]
config['use_cross_entropy'] = args.use_cross_entropy
config['use_pos_ratio'] = args.use_pos_ratio
# load data
dfTrain, dfTest, X_train, y_train, X_test, y_test, cat_features_indices = _load_data(config)
# ------------------ FFM Model ------------------
# params
params = {
"use_ffm": True,
"use_deep": False,
"use_attention": True,
"attention_size": 10,
"embedding_size": 8,
"dropout_fm": [1.0, 1.0],
"deep_layers": [16, 16],
"dropout_deep": [0.5, 0.5, 0.5],
"deep_layers_activation": tf.nn.relu,
"epoch": 9999999,
"batch_size": 256,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 1,
"batch_norm_decay": 0.995,
"l2_reg": 0.01,
"verbose": True,
"random_seed": config['RANDOM_SEED']
}
best_params = {}
# ------------------ FFM Model tuning------------------
logging.warning("FFM")
# (调参)
logging.warning('调参')
# 设置双重条件,起始的auc和logloss设置为使用lgb时的效果
max_valid_auc = float('-Inf')
min_valid_logloss = float('Inf')
for attention_size in eval(args.attention_size_list):
for embedding_size in eval(args.embedding_size_list):
for dropout_fm_1 in eval(args.dropout_fm_0_list):
for dropout_fm_2 in eval(args.dropout_fm_1_list):
logging.warning("attention_size: %d" % attention_size)
logging.warning("embedding_size: %d" % embedding_size)
logging.warning("dropout_fm:[%.1f, %.1f]" % (dropout_fm_1, dropout_fm_2))
params['attention_size'] = attention_size
params['embedding_size'] = embedding_size
params['dropout_fm'] = [dropout_fm_1, dropout_fm_2]
valid_auc, valid_logloss = _run_base_model_dfm(dfTrain, dfTest, params, config)
if max_valid_auc < valid_auc and min_valid_logloss > valid_logloss:
max_valid_auc = valid_auc
min_valid_logloss = valid_logloss
best_params['attention_size'] = attention_size
best_params['embedding_size'] = embedding_size
best_params['dropout_fm'] = [dropout_fm_1, dropout_fm_2]
if 'attention_size' in best_params:
params['attention_size'] = best_params['attention_size']
logging.warning("best attention_size: %d" % best_params['attention_size'])
if 'embedding_size' in best_params:
params['embedding_size'] = best_params['embedding_size']
logging.warning("best embedding_size: %d" % best_params['embedding_size'])
if 'dropout_fm' in best_params:
params['dropout_fm'] = best_params['dropout_fm']
logging.warning("best dropout_fm:[%.1f, %.1f]" % (best_params['dropout_fm'][0], best_params['dropout_fm'][1]))
# 调完参数后,再运行一遍模型得到最后的效果
logging.warning("调参结束!")
logging.warning(params)
_run_base_model_dfm(dfTrain, dfTest, params, config)
elapsed = (time.time() - start)
print("Time used:", elapsed)
| [
"153454521@qq.com"
] | 153454521@qq.com |
d131a9c9e52c0f2d733a93cdeddf66a364c86799 | 9125a70b2f1fc3850999a8c9f21a5103849ff33f | /app/intent_news/__init__.py | b251dafbf783b6e473032dd3df3afa45d45660c3 | [
"MIT"
] | permissive | drabekj/OttoBot-Alexa-Skill | 334899260bb96e44eac2ae3ba1d7be6aa9badcbd | 76a4e5d9d1ac22eb171545c8b9ce558bb95e269f | refs/heads/master | 2022-12-16T17:53:54.384116 | 2018-07-02T15:57:39 | 2018-07-02T15:57:39 | 127,158,636 | 2 | 0 | MIT | 2022-09-16T17:47:09 | 2018-03-28T15:11:15 | Python | UTF-8 | Python | false | false | 4,402 | py | from app import AlexaRequest, ResponseBuilder
from app.utils import Ticker2Name
from app.utils.FeedReader import FeedReader
from static import strings
news_count = 3
def handle_news(request):
"""
Generate response to intent type NewsAboutCompanyIntent based on the stage of the dialog.
:type request AlexaRequest
:return: JSON response including appropriate response based on the stage of the dialog.
"""
if request.dialog_state() == "STARTED":
return _handle_dialog_read_titles(request)
if request.dialog_state() == "IN_PROGRESS":
if request.get_slot_value('articleNo') is None:
if request.get_intent_confirmation_status() == "CONFIRMED":
# Ask user which article he wants details about
return _handle_dialog_which_one(request)
else:
# User doesn't want more info
message = strings.INTENT_GENERAL_OK
return ResponseBuilder.create_response(request, message=message)
else:
# Send user full article in a card
return _handle_dialog_send_article(request)
def _handle_dialog_read_titles(request):
"""
Create a response with titles of articles about given company, if ticker supported.
:type request AlexaRequest
"""
error_occured = False
ticker = request.get_slot_value('stockTicker')
# if value contains spaces => not valid
if ticker is None or (' ' in ticker) == True:
message = strings.ERROR_NEWS_BAD_TICKER
error_occured = True
headings = FeedReader(ticker).get_articles_titles(limit=news_count)
if not headings:
message = strings.ERROR_NEWS_NO_NEWS.format(ticker)
error_occured = True
if error_occured:
return ResponseBuilder \
.create_response(request, message=message, is_ssml=False)
message = _build_read_titles_msg(headings, ticker)
return ResponseBuilder \
.create_response(request, message=message, is_ssml=True) \
.with_dialog_confirm_intent()
def _build_read_titles_msg(headings, ticker):
"""Build SSML response text for read titles."""
company = Ticker2Name.ticker_to_name(ticker)
msg_body = ""
ssml_strong_break = "<break time='700ms'/>"
for heading in headings:
msg_body += ssml_strong_break + heading + "."
msg_intro = strings.INTENT_NEWS_ABOUT_COMPANY_INTRO.format(company)
msg_end = ssml_strong_break + strings.INTENT_NEWS_ABOUT_COMPANY_ASK_MORE_INFO
message = "<speak>" + msg_intro + msg_body + msg_end + "</speak>"
return message
def _handle_dialog_which_one(request):
"""Create a response asking which article user wants to know about to get articleNo."""
message = strings.INTENT_NEWS_ABOUT_COMPANY_ASK_ARTICLE_NO
return ResponseBuilder.create_response(request, message=message) \
.with_dialog_elicit_slot()
def _handle_dialog_send_article(request):
"""Create a response with card containing requested article."""
reprompt = strings.INTENT_GENERAL_REPROMPT
ticker = request.get_slot_value('stockTicker')
company = Ticker2Name.ticker_to_name(ticker)
article_no = int(request.get_slot_value('articleNo')) - 1
# last news
if article_no == -1:
article_no = news_count - 1
feed_reader = FeedReader(ticker)
article_title = feed_reader.get_articles_titles()[article_no]
article_body = feed_reader.get_article_body(article_no)
if article_body is None:
# Article not found
message = strings.INTENT_NEWS_ABOUT_COMPANY_FAIL_ARTICLE_NOT_FOUND
return ResponseBuilder.create_response(request, message=message) \
.with_reprompt(reprompt)
# Build response
message = strings.INTENT_NEWS_ABOUT_COMPANY_ARTICLE_SENT
card_title = strings.INTENT_NEWS_ABOUT_COMPANY_ARTICLE_CARD_TITLE \
.format(company)
card_content = strings.INTENT_NEWS_ABOUT_COMPANY_ARTICLE_CARD_CONTENT \
.format(article_body)
send_as_card = True
if send_as_card:
return ResponseBuilder.create_response(request, message=message) \
.with_reprompt(reprompt) \
.with_card(card_title, content=card_content, subtitle=article_title)
else:
# Present article as speech
return ResponseBuilder.create_response(request, message=card_content) \
.with_reprompt(reprompt)
| [
"drabek.honza@gmail.com"
] | drabek.honza@gmail.com |
ca6491995930cd9ff3fba245a2c4eeaa755f813e | 02f4d6352e177e4291bd0bece59d3309092a8ff5 | /testimonials/admin.py | 7a4efeb5ef0668731505066e3a9144199c259f5f | [] | no_license | odedahay/django-connect-care | 8966502090434373637f06d1fe32b56f2ae0f96f | 52e733418b822aac3fc3af3225d56e50d72b422b | refs/heads/main | 2023-02-11T23:24:50.990813 | 2020-12-13T03:04:04 | 2020-12-13T03:04:04 | 314,047,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from django.contrib import admin
from .models import Testimonial
class TestimonialAdmin(admin.ModelAdmin):
list_display = ['order_by', 'name', 'is_published']
list_display_links = ('name',)
list_editable = ('order_by','is_published',)
admin.site.register(Testimonial,TestimonialAdmin)
| [
"odedahay@yahoo.com"
] | odedahay@yahoo.com |
923acf4f5f73b2ab128ff8f4eaa090e1bfc5dc97 | bb711368738bdd6bf97e4894e5ba4635d9312187 | /wavelet.py | f9b36e9db5bcd15a880021726eddb0f603b7005e | [] | no_license | woo3937/IRcamera | 3138e3554b68b1f62788ace1117c73a817025bce | 4cb06349423230262f96803a6bd40fe323336edd | refs/heads/master | 2021-01-01T15:40:58.663097 | 2013-12-30T02:56:25 | 2013-12-30T02:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | from __future__ import division
from pylab import *
s = arange(16)
#N = length(S);
N = len(s)
#s1 = S(1:2:N-1) + sqrt(3)*S(2:2:N); # 1, 3, 5, 7... and 2, 4, 6,
i = arange(N/2, dtype=int)*2
s1 = s[i] + sqrt(3) * s[i+1]
#d1 = S(2:2:N) - sqrt(3)/4*s1 - (sqrt(3)-2)/4*[s1(N/2); s1(1:N/2-1)];
w = concatenate(([s1[N/2-1]], s1[0:N/2-1]))
d1 = s[i+1] - sqrt(3)/4*s1 - (sqrt(3)-2)/4*w
#s2 = s1 - [d1(2:N/2); d1(1)];
#s = (sqrt(3)-1)/sqrt(2) * s2;
#d = (sqrt(3)+1)/sqrt(2) * d1;
| [
"sieve121@umn.edu"
] | sieve121@umn.edu |
02d0975ccfed593c0dbe3f38396ff2c0909959e7 | 0309b4b9397cd7c3dbf4e6adbe8e60d70b0e1e65 | /queueserver/versions/version_1_17.py | 43aca40fd0bd297cd10a35c40f0c596f752e0f63 | [] | no_license | JLyne/QueueServer | 4c11f6c15846cff932f6e375038b342336b596a1 | 5e216cbe85d970e9a40b4508a07cae994fb3992c | refs/heads/master | 2023-03-08T02:52:01.338167 | 2023-02-24T21:40:06 | 2023-02-24T21:40:06 | 234,971,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from quarry.types.nbt import TagInt, TagRoot, TagCompound
from queueserver.versions import Version_1_16_2
from queueserver.protocol import Protocol
class Version_1_17(Version_1_16_2):
protocol_version = 755
chunk_format = '1.17'
def __init__(self, protocol: Protocol, bedrock: False):
super(Version_1_17, self).__init__(protocol, bedrock)
def get_dimension_settings(self, name: str):
settings = super().get_dimension_settings(name)
settings['min_y'] = TagInt(0)
settings['height'] = TagInt(256)
return settings
def get_viewpoint_entity_type(self):
return 74
def send_spawn(self):
self.protocol.send_packet("player_position_and_look",
self.protocol.buff_type.pack("dddff?", 8, 70, -8, 0, 0, 0b00000),
self.protocol.buff_type.pack_varint(0),
self.protocol.buff_type.pack("?", False))
def send_reset_world(self):
data = [
self.protocol.buff_type.pack_nbt(TagRoot({'': TagCompound({})})), # Heightmap
self.protocol.buff_type.pack_varint(0), # Data size
self.protocol.buff_type.pack_varint(0), # Block entity count
self.protocol.buff_type.pack("?", True), # Trust edges
self.protocol.buff_type.pack_varint(0), # Sky light mask
self.protocol.buff_type.pack_varint(0), # Block light mask
self.protocol.buff_type.pack_varint(0), # Empty sky light mask
self.protocol.buff_type.pack_varint(0), # Empty block light mask
self.protocol.buff_type.pack_varint(0), # Sky light array count
self.protocol.buff_type.pack_varint(0), # Block light array count
]
for x in range(-8, 8):
for y in range(-8, 8):
self.protocol.send_packet("chunk_data", self.protocol.buff_type.pack("ii", x, y), *data)
| [
"jim+github@not-null.co.uk"
] | jim+github@not-null.co.uk |
7003e0a54996ab11f191b69ba52a16d4379ab39f | 8a83a9af4259f676bc777a267c7c75b48db9096a | /venv/bin/isort | 5608788d5ac09c91ceea9b77784f782187cffda6 | [] | no_license | ahmo10/problem | 56c7f2878e14448b0727db8c87d4355e4d111b1a | 59efc5c8b3fc6d10ad67f91d4ef873ce589a62fb | refs/heads/master | 2020-03-23T22:09:06.990334 | 2018-07-24T14:12:14 | 2018-07-24T14:12:14 | 142,156,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | #!/home/ahmed/Desktop/ahmed/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ahmey.ibra@gmail.com"
] | ahmey.ibra@gmail.com | |
8c0c52b748d955145638dc894ec4ae3cb1d45f2c | 412e327f41ec7c7a8e9389740bc849ebe173059e | /python/plotting_functions/empirical_models.py | 9ed2d32f68fb5b2853449dd63df59983721f34d8 | [] | no_license | erolsson/railway_ballast | 2b617b91ae720ef86cd1e5c89b08a34b92996fd5 | cfc86c22cc5e2f857c24ba1a01c2541edf839e3b | refs/heads/master | 2023-07-25T16:54:10.529328 | 2023-07-23T12:49:32 | 2023-07-23T12:49:32 | 186,101,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,174 | py | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style
from abaqus_python_interface import ABQInterface
from comparison_of_models import get_path_points_for_fem_simulation
abq = ABQInterface("abq2018")
matplotlib.style.use('classic')
plt.rc('text', usetex=True)
plt.rc('font', serif='Computer Modern Roman')
plt.rcParams.update({'font.size': 20})
plt.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}", r"\usepackage{xcolor}"]
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman'],
'monospace': ['Computer Modern Typewriter']})
odb_directory = os.path.expanduser('~/railway_ballast/odbs/')
figure_directory = os.path.expanduser('~/railway_ballast/Figures/')
def selig_2(n, _):
return 4.318*n**0.17
def shenton_2(n, axle_load):
ks = 1.1
return ks*axle_load/20*(0.69*n**0.2 + 2.7e-6*n)
def thom_1(n, _):
s = (np.log10(n) - 2.4)**2
s[np.log10(n) < 2.4] = 0
return s
def hettler(n, axle_load):
r = 0.00095
c = 0.43
f = axle_load*9.82/2
return r*f**1.6*(1+c*np.log(n))
def main():
fig = plt.figure(0)
cycles = np.array([float(10**i) for i in range(7)] + [2e6])
plt.semilogx(cycles, thom_1(cycles, 0), 'g', lw=2)
plt.semilogx(cycles, selig_2(cycles, 0), 'k', lw=2)
path_points = get_path_points_for_fem_simulation('sleepers_high')
for load, line in zip([17.5, 22.5, 30.], [':', '-', '--']):
plt.semilogx(cycles, hettler(cycles, load), line + 'm', lw=2)
plt.semilogx(cycles, shenton_2(cycles, load), line + 'c', lw=2)
frequencies = [5., 10.]
colors = ['r', 'b', 'g', 'k']
symbols = ['x', 'o', 's', 'd']
for f, c, sym in zip(frequencies, colors, symbols):
settlement = [0, 0]
for k, n in enumerate([1e3, 1e6]):
step_name = 'cycles_' + str(int(n))
odb_filename = (odb_directory + '/results_sleepers_high_'
+ str(load).replace('.', '_') + 't_' + str(int(f)) + 'Hz.odb')
up = abq.get_data_from_path(odb_filename, path_points, 'UP', 'UP2', output_position='NODAL',
step_name=step_name)
settlement[k] = -up[0]*1000
plt.semilogx(1e6, settlement[1] - 0*settlement[0], c + sym, lw=3, ms=12, mew=2)
plt.xlabel('Load Cycles [-]')
plt.ylabel('Settlement [mm]')
fig.set_size_inches(13., 6., forward=True)
plt.xlim(1, 2e6)
plt.ylim(0, 35.)
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([0.07, 0.12, 0.53, box.height])
load_labels = [
plt.plot([1., 1.], [-1, -2], 'w', label=r'\textbf{Loads}')[0],
plt.plot([1., 1.], [-1, -2], ':k', lw=2, label='17.5 t')[0],
plt.plot([1., 1.], [-1, -2], '-k', lw=2, label='22.5 t')[0],
plt.plot([1., 1.], [-1, -2], '--k', lw=2, label='30.0 t')[0]
]
legend = ax.legend(handles=load_labels, loc='upper left', bbox_to_anchor=(0.03, 1.), numpoints=1)
plt.gca().add_artist(legend)
labels = [
plt.plot([1., 1.], [-1, -2], 'w', label=r'\textbf{Frequencies}')[0],
plt.plot([1., 1.], [-1, -2], 'rx', lw=3, label='5 Hz', ms=12, mew=2)[0],
plt.plot([1., 1.], [-1, -2], 'bo', lw=3, label='10 Hz', ms=12, mew=2)[0],
plt.plot([1., 1.], [-1, -2], 'w', lw=3, label='white', ms=12, mew=2)[0],
plt.plot([1., 1.], [-1, -2], 'w', lw=3, label=r'\textbf{Empirical models}', ms=12, mew=2)[0],
plt.plot([1., 1.], [-1, -2], 'm', lw=2, label=r'Hettler [27]', alpha=0.5)[0],
plt.plot([1., 1.], [-1, -2], 'k', lw=2, label=r'Selig and Waters [28]', alpha=0.5)[0],
plt.plot([1., 1.], [-1, -2], 'c', lw=2, label=r'Shenton [3]', alpha=0.5)[0],
plt.plot([1., 1.], [-1, -2], 'g', lw=2, label=r'Thom and Oakley [29]', alpha=0.5)[0],
]
legend = ax.legend(handles=labels, loc='upper left', bbox_to_anchor=(1., 1.035), numpoints=1)
legend.get_texts()[3].set_color("white")
plt.gca().add_artist(legend)
plt.savefig(figure_directory + '/empirical_models.png', dpi=600)
plt.show()
if __name__ == '__main__':
main()
| [
"erolsson@kth.se"
] | erolsson@kth.se |
2601e3df70b850fb1c8d807bad793d77edb21a11 | 90a40a62bdc4ec5e2a2e95aef53dfacc4366babc | /learner/run.py | 99c25147cb7322afc8f2bc5fe0faf2d558226fd9 | [
"MIT"
] | permissive | xiaohui-zhangxh/spider | acf9de2d318988346b7d33222926450a3cbe59ba | 76fa461e0058f80ac57bcf9c247f16abacdb3a11 | refs/heads/master | 2020-12-28T21:28:26.337951 | 2016-07-29T04:56:47 | 2016-07-29T04:56:47 | 64,440,695 | 0 | 0 | null | 2016-07-29T01:36:41 | 2016-07-29T01:36:41 | null | UTF-8 | Python | false | false | 1,446 | py | #!/usr/bin/env python
import os
import sys
lib_path = os.path.realpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'lib'))
if lib_path not in sys.path:
sys.path[0:0] = [lib_path]
import utils
import clusterers
import processors
import simplejson as json
import os
import argparse
def main(args):
path = utils.get_data_path(args.site[0])
urls = utils.load_urls(path)
for count in range(2, len(urls) + 1):
print '[learner] clustering with %d urls' % count
# load data
data = [utils.load_data(path, id) for id, url in enumerate(urls)]
data = data[:count]
# process data
processor = processors.Processor(data)
features = processor.extract()
# clustering
clusterer = clusterers.DBSCAN()
labels = clusterer.cluster(features).labels_
# score
clusters = processor.score(labels)
with open(os.path.join(path, 'clusters.%03d.json' % count), 'w') as f:
f.write(json.dumps(clusters, indent=2, ensure_ascii=False).encode('utf8'))
def parse_args():
"""
Parse commandline arguments
"""
parser = argparse.ArgumentParser(description='Run the whole pipeline on site pages.')
parser.add_argument('site', metavar='site', type=str, nargs=1, help='site id, for example: theverge, npr, nytimes')
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
| [
"zhou@ziyan.info"
] | zhou@ziyan.info |
6d9ca94951ed0aedd43662adb6bc92c2892e327b | c22c176e4db84f48c2dee757c4cb57cdb506eb8b | /xunit_tools/xunit_parse.py | 332ceb7064d7d1171de9db4f78fcad04182500a2 | [
"MIT"
] | permissive | charlesthomas/xunit_tools | bc6aa9891398bb7fb0f246a4331bf03f237aa647 | ce71d95a595829377b271777e0b7faba4dd75803 | refs/heads/master | 2021-07-09T14:44:29.634649 | 2019-04-10T23:24:29 | 2019-04-10T23:24:29 | 73,010,693 | 0 | 1 | MIT | 2021-03-20T00:00:52 | 2016-11-06T18:35:33 | Python | UTF-8 | Python | false | false | 1,709 | py | import os.path
from cgi import escape
import xml.etree.ElementTree as ElementTree
from render_objects import HTMLObject
from test_objects import TestSuite
class XUnitParse(HTMLObject):
template = 'xunit'
def __init__(self, filepath):
self.filepath = filepath
self.suite = None
self.root = ElementTree.parse(filepath).getroot()
if self.root.attrib.get('name', None) is None:
self.root.attrib.update(name=self.filename)
@property
def filename(self):
return os.path.splitext(os.path.basename(self.filepath))[0]
def parse(self):
kwargs = self.root.attrib
kwargs['filename'] = self.filename
self.suite = TestSuite(**kwargs)
for case in self.root:
testcase = self.suite.add_case(case)
passed = True
for res in case:
if res.tag == 'system-out':
continue
passed = False
if res.tag == 'skipped':
self.suite.increment_skip()
if res.tag == 'error' and 'AssertionError' in res.text:
res.tag = 'failure'
self.suite.increment_fail()
testcase.add_result(rtype=res.tag, stacktrace=escape(res.text),
message=res.attrib.get('message', None),
etype=res.attrib.get('type', None))
if passed:
testcase.add_result(rtype='passed')
self.suite.increment_pass()
self.render_kwargs = {'suite': self.suite}
return self.suite
@property
def title(self):
return self.root.attrib['name']
| [
"ch@rlesthom.as"
] | ch@rlesthom.as |
aa5b5cf536cd17830810b4449e944260520c859f | 6f1ef8212efe16b89154739a2387de5f0552c3b3 | /py4e/pay4.py | df72a8321e27dcb1100a6196d000f9e75904aaf2 | [] | no_license | t0etag/Python | a612af508b2adda3e493aa57cfc03ebbfa6df06e | e763eaf892502f44f97e978ef87669fe87f016f9 | refs/heads/master | 2021-07-15T18:40:38.253241 | 2021-01-27T21:16:02 | 2021-01-27T21:16:02 | 232,670,829 | 0 | 0 | null | 2021-01-27T21:16:03 | 2020-01-08T22:16:12 | Python | UTF-8 | Python | false | false | 605 | py | """
Calculate pay from user input using a function
Give employee 1.5 times hourly pay above 40 hours
"""
hours = input("Enter hours: ")
try:
hours = float(hours)
except:
print("Please enter a number.")
rate = input("Enter pay rate: ")
try:
rate = float(rate)
except:
print("Please enter a number.")
otHours = 0.0
otRate = 1.5
def computePay(hours, rate):
pay = hours * rate
return(pay)
if(hours > 40):
otHours = hours - 40
print(otHours)
pay = 40 * rate + otHours * (otRate * rate)
else:
pay = computePay(hours, rate)
print(pay)
| [
"salazar.tony@heb.com"
] | salazar.tony@heb.com |
53e99b620ef41d3431b8a2e2f1b1752ba7ed7f5c | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_11510-2806/sdB_ec_11510-2806_coadd.py | b8a00e983beb135ab5eedc62448fb22de2b74e60 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[178.398542,-28.380747], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ec_11510-2806/sdB_ec_11510-2806_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ec_11510-2806/sdB_ec_11510-2806_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
1f4602250ee385eaad603ab4f460fe716074575b | 30e53103c9f976cc456d2e0a39b04ac54810473c | /venv/bin/pip3.6 | b379085f5eded6fbccb3c9c745a7de21f5b26a89 | [] | no_license | AnjaliAstro/BlogApp | 6d6d2851f9be3855b3fbef87ebd5ae2d45206605 | 26de239d219b350cffa9a4b89d08de32ab75b902 | refs/heads/master | 2020-06-14T06:39:35.368846 | 2019-09-25T17:58:55 | 2019-09-25T17:58:55 | 194,935,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | 6 | #!/home/anjali/Documents/BlogProject/BlogProject/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"anjali12astro@gmail.com"
] | anjali12astro@gmail.com |
5f1ef23e880c57aa0cfe6ab9a0b517813e7f795d | 126fa31d137c763d46c6bcb65e9bdd727930ddb1 | /src/groups/migrations/0003_auto_20180830_0032.py | 0e750a3dcba88d4b023d03844a066be2784051ce | [] | no_license | amaanabbasi/groupio | 42741d3a1be8f145e978c7238180f11560d8b62f | bd3747661e2131856e073a647d69427b7aed34d0 | refs/heads/master | 2022-12-21T09:14:34.846836 | 2018-10-05T20:01:13 | 2018-10-05T20:01:13 | 145,425,215 | 0 | 0 | null | 2022-11-22T02:52:54 | 2018-08-20T14:00:46 | Python | UTF-8 | Python | false | false | 1,007 | py | # Generated by Django 2.1 on 2018-08-29 19:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0002_auto_20180830_0013'),
]
operations = [
migrations.AddField(
model_name='groupadmin',
name='admin',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='group',
name='groupAdmin',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='groups.GroupAdmin'),
),
migrations.AlterField(
model_name='groupadmin',
name='adminName',
field=models.CharField(max_length=50),
),
]
| [
"amaanabbasi99@gmail.com"
] | amaanabbasi99@gmail.com |
abc4a456983553001ed9d2b36f7c0723495cbd03 | 5d36864f5f9f1b737c4718703ee53c3aa715e398 | /CourseGrading/5.2.2税后工资.py | d6b574d185bd95c73c29cf67a43a14974ae1ed02 | [] | no_license | xzl995/Python | d909274c9aba8ae9f18029a5f2069b1bb3418b9a | 48d4add7a1d46b2e3773bdf096e834852115014d | refs/heads/master | 2020-04-14T11:51:35.407548 | 2019-01-02T10:54:55 | 2019-01-02T10:54:55 | 163,824,702 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | """
【问题描述】
假设税前工资和税率如下(s代表税前工资,t代表税率):
s<1000 t=0%
1000<=s<2000 t=10%
2000<=s<3000 t=15%
3000<=s<4000 t=20%
4000<=s t=25%
编写一程序,要求用户输入税前工资额,然后计算税后工资额。
【输入形式】
从键盘输入税前工资s,可以是浮点数。
【输出形式】
输出税后工资额,保留小数后两位。
【输入样例】
3000
【输出样例】
2400.00
【样例说明】
税前工资为3000,所以税率为20%,扣除税后工资为2400.00
"""
s = float(input())
if s < 1000:
s = s
elif 1000 <= s < 2000:
s = s - s * 0.1
elif 2000 <= s < 3000:
s = s - s * 0.15
elif 3000 <= s < 4000:
s = s - s * 0.2
elif s >= 4000:
s = s - s * 0.25
print("%.2f" % s)
| [
"595696893@qq.com"
] | 595696893@qq.com |
bd8578f27702f7410b5d161fe70220d50c474f6f | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/macle_20220823/a/main.py | 6a8b437c02a6e70ad83a3b115857a7dd91d8088e | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 281 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
s = [input().rstrip() for _ in range(3)]
t = input().rstrip()
ans = ""
for ti in t:
ans += s[int(ti) - 1]
print(ans)
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
c6dffb1a18ad74e5775deea011d9d3dcee62c38d | 77d3578e266da563d7d53c3de20e5a6708ffc317 | /basic_loop.py | 955427e9f7f33dd079416f230892ac95d28ce7a0 | [] | no_license | danbongas/Python_Basics | 3844cb2549aa0c588254242587dc11e447556f32 | 50fc7b67b361cbdb01cb994ebe47f79705d72a05 | refs/heads/master | 2020-12-15T12:51:52.192875 | 2020-01-21T13:59:05 | 2020-01-21T13:59:05 | 235,108,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py |
print("Counting Leap Years from 1990 to 2020:\n")
for num in range(1990,2020):
if num%4==0:
print("Is a Leap Year: ",num)
| [
"noreply@github.com"
] | danbongas.noreply@github.com |
25434ceef7fdb5b1281814f9ea5c6c17aeead479 | 6608256b317319111d22ad91497880fb6e996a00 | /thrift/server/TServer.py | d344fdd736efa06b00205d0ff574dd5feb992f8c | [
"MIT"
] | permissive | PythonRebirth/sk | 9165ade9893000ad90b9a6b20a8cfe55e0a7139f | d20f757752461c7ca68d0e5c086a490bcc47df94 | refs/heads/main | 2023-04-14T09:17:48.138580 | 2021-05-03T10:56:00 | 2021-05-03T10:56:00 | 359,256,965 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,247 | py | from six.moves import queue
import logging
import os
import threading
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
logger = logging.getLogger(__name__)
class TServer(object):
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logger.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
self.threads = num
def serveThread(self):
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception as x:
logger.exception(x)
def serveClient(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception as x:
logger.exception(x)
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception as x:
logger.exception(x)
class TForkingServer(TServer):
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError as e:
logger.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid:
self.children.append(pid)
self.collect_children()
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as e:
logger.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
| [
"noreply@github.com"
] | PythonRebirth.noreply@github.com |
93f87c58022dcd4d1c31d610604097e70ed4ec82 | a2f2c2484bea09b8f9a4cca01c9cf797a80b8de3 | /Module 6/P5HW2_MathQuiz_JamesLeach.py | b7d108324b4ea31844dd34084054f3f2bb2c364c | [] | no_license | JamesFTCC/cti110 | 711dfd4281a3df832e718a50cd7a708713caaf6f | 6b13a9e1cd59a74cfc58648e4c4dfd35f0c3c4eb | refs/heads/master | 2020-07-25T00:46:27.041842 | 2019-12-03T19:20:00 | 2019-12-03T19:20:00 | 208,102,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | # This program has the user add or subtract random numbers.
# 11/5/19
# CTI-110 P5HW2 - Math Quiz
# James Leach
# Inputs whether the user would like to add, subtract, or exit. Then inputs their answer to the problem.
# Calculates whether the users answer was correct.
# Outputs if the user got the problem right or wrong.
import random
def main():
print('MAIN MENU')
print('---------')
print('1) Add Random Numbers')
print('2) Subtract Random Numbers')
print('3) Exit')
quiz=int(input())
if quiz == 1:
add_numbers()
elif quiz == 2:
subtract_numbers()
def add_numbers():
x = random.randint(100, 999)
y = random.randint(100, 999)
correct = x + y
print(' ', x)
print('+', y)
answer = int(input())
if correct == answer:
print('Good Job! You got it right!')
print('')
main()
else:
print('Sorry, the correct answer was',correct)
print('')
main()
def subtract_numbers():
x = random.randint(100, 999)
y = random.randint(100, 999)
correct = x - y
print(' ', x)
print('-', y)
answer = int(input())
if correct == answer:
print('Good Job! You got it right!')
print('')
main()
else:
print('Sorry, the correct answer was',correct)
print('')
main()
main()
| [
"noreply@github.com"
] | JamesFTCC.noreply@github.com |
1a32e93c35c00f322aac2c66393a0c69af62d393 | 59150a7613e2ba56dc94b0f0e236d2950bc3f854 | /Practice/06/Python/06.py | 31093027e926bb25c0183dd2a71e4c897e843a81 | [] | no_license | DONR69/Programming | 9817aecf2708836c3e3d909bb922058eddbff5a1 | 6c91e33c5da5d931deb4f54d3bca51bfe5cd8082 | refs/heads/main | 2023-04-03T12:28:24.336901 | 2021-04-19T05:37:35 | 2021-04-19T05:37:35 | 334,436,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from math import sqrt
a = float(input("a = "))
b = float(input("b = "))
c = float(input("c = "))
if (a == 0 and b == 0):
if (c == 0):
print("Все корни верны")
else:
print("Нет корней")
elif (a == 0):
if (c == 0):
print("Корнем уравнения является 0")
else:
f = -1 * (c / b)
print("Корень равен ", f)
elif (b == 0):
if (c == 0):
print("Корень уравнения 0")
elif ((-1 * (c / a)) > 0):
x = sqrt((-1 * (c / a)))
print("Первый корень равен, x,\n ")
x = -1 * sqrt((-1 * (c / a)))
print("Второй корень равен, x ")
else:
print("Корни невещественные")
elif (c == 0):
print("Первый корень равен 0")
x = b / a;
print("Второй корень равен ")
else:
if ((b * b - 4 * a * c) > 0):
x = (-1 * b + sqrt(b * b - 4 * a * c)) / (2 * a)
print("Первый корень равен ", x)
x = (-1 * b - sqrt(b * b - 4 * a * c)) / (2 * a)
print("Второй корень равен ")
elif ((b * b - 4 * a * c) < 0):
print("Корни невещественные")
else:
x = (-1 * b + sqrt(b * b - 4 * a * c)) / (2 * a)
print(x)
| [
"02dandy02@mail.ru"
] | 02dandy02@mail.ru |
a165393e1acfe0730470c64181e82318c7400cc7 | 6f9a9a7c5c4138f763d13469f8739a02982fef5b | /products/migrations/0001_initial.py | 52206e033c621000eab11345b9ee52cec63ffe2a | [] | no_license | RogueAngelfire/vixens_Vegan_delights_v1 | 4fc1cda905f8c1bf9ff3c808eb83e1b8d7f46e2d | 5ce73871e486a585e5f967ecbb9ed3a11be088ad | refs/heads/master | 2022-12-23T00:03:27.522172 | 2020-09-30T16:35:02 | 2020-09-30T16:35:02 | 294,390,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | # Generated by Django 3.1.1 on 2020-09-29 14:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=245)),
('freindly_name', models.CharField(blank=True, max_length=254, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(blank=True, max_length=254, null=True)),
('name', models.CharField(max_length=254)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('rating', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True)),
('image_url', models.URLField(blank=True, max_length=1024, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.category')),
],
),
]
| [
"info@robincollins.co.uk"
] | info@robincollins.co.uk |
6a9a936b2b1e8268f6ac049692741576ac3653e7 | cc8b3a58189b7292c6e097783f42a93d925154df | /semana3/mvc/controllers/visitas.py | 01e2a26a6b3a53d74eaec075d56290ba2f175765 | [] | no_license | Jesus-Manuel-Huerta-Najera/repl | 3b9cd2ef8d4b16615284ef3a5ba4f0c81d55d62d | 26b83ad8f69081958d1789366faf8dc9b49d8254 | refs/heads/master | 2022-08-21T15:56:52.314090 | 2020-05-27T00:04:34 | 2020-05-27T00:04:34 | 267,140,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import web
from datetime import date
from datetime import datetime
class Visitas:
def GET(self, name):
try:
cookie = web.cookies()
visitas = 0
today = date.today()
now1 = datetime.now()
now = now1.hour
web.setcookie("Hora",str(now),expires="", domain= None)
web.setcookie("Fecha",str(today),expires="", domain= None)
if name:
web.setcookie("name",name,expires="", domain= None)
else:
name = "Anónimo"
web.setcookie("name",name,expires="", domain= None)
if cookie.get("visitas"):
visitas = int(cookie.get("visitas"))
visitas += 1
web.setcookie("visitas",str(visitas),expires="", domain= None)
else:
web.setcookie("visitas",str(visitas),expires="", domain= None)
visitas = "1"
return "Visitas: " + str(visitas) + ", Nombre: " + name + ", Fecha: " + str(today) + ",Hora: " + str(now)
except Exception as e:
return "Error"+str(e.args) | [
"replituser@example.com"
] | replituser@example.com |
35182ec13a455ebdc11c9376fb9efeff112588ec | 18c0123deb1543dc8d4b329ee4fe8ced6ac74fdb | /icdscraper/icdscraper/settings.py | f6f442373d15f656e351bcb6f0fc5d357e195550 | [] | no_license | cabbysaurus/MimicProject | 45c4030c67af0f98be29b565e2bf7efa66f7c583 | 129b89d26be9dedf8755ce94d0d10e64c0dabf9e | refs/heads/master | 2021-06-27T19:59:35.625868 | 2017-09-17T18:56:11 | 2017-09-17T18:56:11 | 103,061,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,168 | py | # -*- coding: utf-8 -*-
# Scrapy settings for icdscraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'icdscraper'
SPIDER_MODULES = ['icdscraper.spiders']
NEWSPIDER_MODULE = 'icdscraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'icdscraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'icdscraper.middlewares.IcdscraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'icdscraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'icdscraper.pipelines.IcdscraperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"noreply@github.com"
] | cabbysaurus.noreply@github.com |
9bc080ab237167122f2eec6e5f9195eff14595a0 | 4a2e472d9c97a3e36f3fbf5a5f3011512f0dd41f | /project.py | 05676b30b14c8986d7be455339520002d2fd8778 | [] | no_license | Priyansh99/Sentiment-analysis | 0d93fc4a7fec0ca8b4f2234ba8897231e905ca3e | b7e433e8edca4efc21d148662125b92872e102f8 | refs/heads/master | 2020-08-03T09:09:29.329841 | 2019-09-29T17:08:44 | 2019-09-29T17:08:44 | 211,696,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from tkinter import *
def spam():
root.destroy()
import spam
def review():
root.destroy()
import review
root=Tk()
root.state('zoomed')
root.resizable(width=False,height=False)
root.configure(background='yellow')
l=Label(root,text='Sentiment Analysis',bg='yellow',font=('',40,'bold'))
l.pack()
b1=Button(root,command=spam,text='Spam Detection',font=('',20,'bold'))
b1.place(x=300,y=100)
b2=Button(root,command=review,text='Review Analysis',font=('',20,'bold'))
b2.place(x=300,y=200)
root.mainloop()
| [
"priyanshumishra04@gmail.com"
] | priyanshumishra04@gmail.com |
dc3d0fdd27576ef10a85831c24b6499af822fcaa | 6d874f49ce97919c5345ff64f235ab84f28b6261 | /DjangoStaticProject/manage.py | 1f231b6cbfe7f088d00751649803ec2943163562 | [] | no_license | SarikaKakulate/djangoprojectclass | 55cb40fda1735129257ffd8184c96e4b97a1bb92 | 911109884f164800e5d91b0cf0e5f5dd723555db | refs/heads/main | 2023-04-17T13:10:00.601831 | 2021-05-01T06:59:32 | 2021-05-01T06:59:32 | 363,334,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoStaticProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"kakulatesarika144@gmail.com"
] | kakulatesarika144@gmail.com |
96040baf52fe08f173c22cd5fc1e1d079f214044 | 1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03 | /cpos/esb/basic/config.py | 78b827c6fc657e2dc476c3ac36a560c47302c568 | [] | no_license | yizhong120110/CPOS | a05858c84e04ce4aa48b3bfb43ee49264ffc5270 | 68ddf3df6d2cd731e6634b09d27aff4c22debd8e | refs/heads/master | 2021-09-01T17:59:53.802095 | 2017-12-28T05:43:06 | 2017-12-28T05:43:06 | 106,247,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | # -*- coding: utf-8 -*-
#1、读取config目录下有哪些文件
#2、将文件列表中的config_local单独拿出来
#3、取出每个文件中的固定名称的列表
#4、将列表中的变量绑定到settings上
#5、不能有重复的变量出现
import os
import glob
import copy
from cpos.foundation.conf.core import Settings ,settings
def get_settings(fileslst,modulepath,globalsettings):
cfg_local = []
mod_reg_lst = []
for filepath in fileslst:
mod_reg = os.path.split(filepath)[-1].split('.')[0]
if mod_reg == "config_local":
# 最后处理
cfg_local = [(mod_reg ,filepath)]
continue
else:
mod_reg_lst.append( (mod_reg ,filepath) )
mod_reg_lst += cfg_local
for mod_reg ,filepath in mod_reg_lst:
tt = Settings()
modname = '%s.%s'%(modulepath, mod_reg)
print("注册配置模块", modname)
bef_reg = copy.deepcopy(set(tt._dict.keys()))
tt.register(modname)
regi_objs = list(set(list(tt._dict.keys())) - bef_reg)
for var_tt in regi_objs:
if globalsettings._dict.get(var_tt):
if mod_reg!="config_local":
raise RuntimeError("【%s】中的【%s】变量名重复,请检查修改"%(modname,var_tt))
else:
print("【%s】中的【%s】变量名重复,将直接覆盖"%(modname,var_tt))
globalsettings._dict[var_tt] = tt._dict.get(var_tt)
return globalsettings
fileslst = glob.glob(os.path.abspath(os.path.join(os.path.split(__file__)[0],'configs', 'config_*.py')))
settings = get_settings(fileslst, 'cpos.esb.basic.configs', settings)
| [
"yizhong120110@gmail.com"
] | yizhong120110@gmail.com |
e3e23f21af215bc81eac4c178539e42f010040b5 | 9d6f975f28e6fd3492fd7f005960e34880c413bf | /Modules/CameraModule.py | d2911b312122fafdaa3dcecf455641529bcfec0a | [] | no_license | OmarMaysour/RoadEye | 67cbfcdc63f6d1935168c8001f422885a9e06468 | c9c06eb251f1740a396584af55473819b133b547 | refs/heads/master | 2020-04-24T05:42:16.382943 | 2019-06-19T16:35:13 | 2019-06-19T16:35:13 | 171,739,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import cv2
def get_camera_module_instance():
if not _CameraModule._CameraModuleInstance:
_CameraModule._CameraModuleInstance = _CameraModule()
return _CameraModule._CameraModuleInstance
class _CameraModule:
_CameraModuleInstance = None
def __init__(self):
self._cap = cv2.VideoCapture('video.mp4')
# self._check_video_source()
def __del__(self):
if self._cap.isOpened():
self._cap.release()
def _check_video_source(self):
if not self._cap.isOpened():
raise Exception("Unable to access camera")
def get_frame(self):
_, frame = self._cap.read()
return frame
| [
"omarmaysour@hotmail.com"
] | omarmaysour@hotmail.com |
083e2022c505be71b9ea2449a1859cd906422d79 | 23af0bfdbcb369e38aef3b8e0a158a6471a1dbb1 | /Aula21/ex12.py | 6a6e86843faaf43e1589d9d6da257c3055e1ffd3 | [
"MIT"
] | permissive | danicon/MD3-Curso_Python | 334bbf80a2e17c33f1f86bb20fe26f763cdc4a33 | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | refs/heads/main | 2023-02-26T16:26:02.898775 | 2021-02-01T14:51:44 | 2021-02-01T14:51:44 | 322,028,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | def ficha(jog='<desconhecido>', gol=0):
print(f'O jogador {jog} fez {gol} gol(s) no campeonato.')
n = str(input('Nome do Jogador: '))
g = str(input('Número de Gols: '))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(gol=g)
else:
ficha(n, g) | [
"dancon.alferes@gmail.com"
] | dancon.alferes@gmail.com |
5f9814efef2e2136dbba11dd27dae9788c25efb8 | bc441bb06b8948288f110af63feda4e798f30225 | /database_delivery_sdk/api/dbservice/batch_update_owner_pb2.py | 4e540301020980bc3b732b5946f5d4b94e5de433 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 7,992 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: batch_update_owner.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='batch_update_owner.proto',
package='dbservice',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x18\x62\x61tch_update_owner.proto\x12\tdbservice\x1a\x1bgoogle/protobuf/empty.proto\"\xc1\x01\n BatchUpdateDBServiceOwnerRequest\x12U\n\x10update_dbservice\x18\x01 \x01(\x0b\x32;.dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice\x1a\x46\n\x0fUpdateDbservice\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x16\n\x0einstanceIdList\x18\x02 \x03(\t\x12\r\n\x05owner\x18\x03 \x03(\t\"\x82\x01\n(BatchUpdateDBServiceOwnerResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_BATCHUPDATEDBSERVICEOWNERREQUEST_UPDATEDBSERVICE = _descriptor.Descriptor(
name='UpdateDbservice',
full_name='dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceIdList', full_name='dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice.instanceIdList', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owner', full_name='dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice.owner', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=262,
)
_BATCHUPDATEDBSERVICEOWNERREQUEST = _descriptor.Descriptor(
name='BatchUpdateDBServiceOwnerRequest',
full_name='dbservice.BatchUpdateDBServiceOwnerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update_dbservice', full_name='dbservice.BatchUpdateDBServiceOwnerRequest.update_dbservice', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BATCHUPDATEDBSERVICEOWNERREQUEST_UPDATEDBSERVICE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=262,
)
_BATCHUPDATEDBSERVICEOWNERRESPONSEWRAPPER = _descriptor.Descriptor(
name='BatchUpdateDBServiceOwnerResponseWrapper',
full_name='dbservice.BatchUpdateDBServiceOwnerResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='dbservice.BatchUpdateDBServiceOwnerResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='dbservice.BatchUpdateDBServiceOwnerResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='dbservice.BatchUpdateDBServiceOwnerResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='dbservice.BatchUpdateDBServiceOwnerResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=265,
serialized_end=395,
)
_BATCHUPDATEDBSERVICEOWNERREQUEST_UPDATEDBSERVICE.containing_type = _BATCHUPDATEDBSERVICEOWNERREQUEST
_BATCHUPDATEDBSERVICEOWNERREQUEST.fields_by_name['update_dbservice'].message_type = _BATCHUPDATEDBSERVICEOWNERREQUEST_UPDATEDBSERVICE
_BATCHUPDATEDBSERVICEOWNERRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['BatchUpdateDBServiceOwnerRequest'] = _BATCHUPDATEDBSERVICEOWNERREQUEST
DESCRIPTOR.message_types_by_name['BatchUpdateDBServiceOwnerResponseWrapper'] = _BATCHUPDATEDBSERVICEOWNERRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchUpdateDBServiceOwnerRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateDBServiceOwnerRequest', (_message.Message,), {
'UpdateDbservice' : _reflection.GeneratedProtocolMessageType('UpdateDbservice', (_message.Message,), {
'DESCRIPTOR' : _BATCHUPDATEDBSERVICEOWNERREQUEST_UPDATEDBSERVICE,
'__module__' : 'batch_update_owner_pb2'
# @@protoc_insertion_point(class_scope:dbservice.BatchUpdateDBServiceOwnerRequest.UpdateDbservice)
})
,
'DESCRIPTOR' : _BATCHUPDATEDBSERVICEOWNERREQUEST,
'__module__' : 'batch_update_owner_pb2'
# @@protoc_insertion_point(class_scope:dbservice.BatchUpdateDBServiceOwnerRequest)
})
_sym_db.RegisterMessage(BatchUpdateDBServiceOwnerRequest)
_sym_db.RegisterMessage(BatchUpdateDBServiceOwnerRequest.UpdateDbservice)
BatchUpdateDBServiceOwnerResponseWrapper = _reflection.GeneratedProtocolMessageType('BatchUpdateDBServiceOwnerResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _BATCHUPDATEDBSERVICEOWNERRESPONSEWRAPPER,
'__module__' : 'batch_update_owner_pb2'
# @@protoc_insertion_point(class_scope:dbservice.BatchUpdateDBServiceOwnerResponseWrapper)
})
_sym_db.RegisterMessage(BatchUpdateDBServiceOwnerResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
7985a7769558cbe54367b70678bdb6c4863410cf | 3e33d1cf28d537dbe97f4e93ed9a5c866da8c89e | /backend/mysite/settings.py | abf968b65e2a4bd8ad3a32ad5eee2f9540210101 | [
"MIT"
] | permissive | alantanlc/ntucourses | 69a03b04c158a296c44f98cc7491422cec7f5bfc | eb3de4912aafc3c4326c04bcb3da64c39a3079e8 | refs/heads/master | 2023-08-11T09:08:49.982124 | 2020-09-20T06:18:32 | 2020-09-20T06:18:32 | 244,103,674 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,595 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
from dotenv import load_dotenv
load_dotenv()
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'mysecretkey');
# SECURITY WARNING: don't run with debug turned on in production!
if os.getenv('ENVIRONMENT', 'PROD') == 'PROD':
DEBUG = False
else:
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'scraping',
'api',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.getenv('HOST', 'localhost'),
'PORT': os.getenv('PORT', '5432'),
'NAME': os.getenv('NAME', 'mydatabasename'),
'USER': os.getenv('DATABASE_USER', 'mydatabaseuser'),
'PASSWORD': os.getenv('PASSWORD', 'mydatabasepassword'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = './static/'
CORS_ORIGIN_ALLOW_ALL = True
| [
"alanwuha91@gmail.com"
] | alanwuha91@gmail.com |
7bb235a4ddc86182a4ee0c65f798d97c9a6d1a5d | 9c782acda72eb104b8830c7223fff107329bf64e | /S12/Assignment B/model/MyResNet.py | 5eb1e87e52634f917b5d171c2203f25d86961d9c | [] | no_license | kavyashruthi/EVA4-Assignment | b1ca650b960116756926878ff52352a80271273e | 96ad6a8f1f93353608e7e739d71d4cc34caea1c0 | refs/heads/master | 2022-07-05T20:02:17.184362 | 2020-05-11T15:16:07 | 2020-05-11T15:16:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MyResNet(nn.Module):
def __init__(self):
super(MyResNet,self).__init__()
self.prepLayer=nn.Sequential(nn.Conv2d(in_channels=3,out_channels=64,kernel_size=(3,3),stride=1,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer1= nn.Sequential(self.cmbr(64,128),self.resblk(128,128))
self.layer2 = self.cmbr(128,256)
self.layer3= nn.Sequential(self.cmbr(256,512),self.resblk(512,512))
self.maxpool = nn.MaxPool2d(4,4)
self.fc = nn.Conv2d(in_channels=512,out_channels=10,kernel_size=(1,1),bias=False,padding=0,stride=1)
def cmbr(self,in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=(3,3),bias=False,padding=1,stride=1),
nn.MaxPool2d(2,2),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def resblk(self,in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=(3,3),padding=1,stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=(3,3),padding=1,stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),)
def forward(self,x):
# PrepLayer
x = self.prepLayer(x)
#layer 1
x= self.layer1(x)
#layer 2
x = self.layer2(x)
#layer 3
x = self.layer3(x)
x = self.maxpool(x)
x = self.fc(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1) | [
"noreply@github.com"
] | kavyashruthi.noreply@github.com |
d6dd47b7030374c7eff40b219ce7bd921666948d | df69498e7a5ab7ddea1ff32b59c99ac38ebdeb43 | /bfs.py | 2c289796889bf92741e2c7cd06e777bbf2d2223f | [] | no_license | ajeetjaiswal02/Leetcode-Codes | 453a34b2e14e4e769faa132b7616c9873b576eb1 | ca24dc91bf5adccb46be082a2c9dce4b949da72c | refs/heads/master | 2023-08-25T05:43:22.857879 | 2021-10-01T06:12:11 | 2021-10-01T06:12:11 | 288,520,504 | 2 | 3 | null | 2021-10-01T06:12:11 | 2020-08-18T17:28:28 | Python | UTF-8 | Python | false | false | 483 | py | graph = {
'A' : ['B','C'],
'B' : ['D', 'E'],
'C' : ['F'],
'D' : [],
'E' : ['F'],
'F' : []
}
seen = [] # List to keep track of seen nodes.
queue = [] #Initialize a queue
def bfs(seen, graph, node):
seen.append(node)
queue.append(node)
while queue:
s = queue.pop(0)
print (s, end = " ")
for neighbour in graph[s]:
if neighbour not in seen:
seen.append(neighbour)
queue.append(neighbour)
# Driver Code
bfs(seen, graph, 'A') | [
"ajeetjaiswal103@gmail.com"
] | ajeetjaiswal103@gmail.com |
5d1d22ba7b3b602bf22d05ab2c1be226b4dfff18 | c53b3e120c59557daaa2fa5b7626413105eb5965 | /tendenci/apps/base/templatetags/smart_if.py | a39097fef611e17962bb40c2b05603b307facd68 | [] | no_license | chendong0444/ams | 8483334d9b687708d533190b62c1fa4fd4690f2c | f2ac4ecc076b223c262f2cde4fa3b35b4a5cd54e | refs/heads/master | 2021-05-01T03:59:18.682836 | 2018-07-23T06:33:41 | 2018-07-23T06:33:41 | 121,194,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,844 | py | """
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
"""
import unittest
from django import template
register = template.Library()
#==============================================================================
# Calculation objects
#==============================================================================
class BaseCalc(object):
def __init__(self, var1, var2=None, negate=False):
self.var1 = var1
self.var2 = var2
self.negate = negate
def resolve(self, context):
try:
var1, var2 = self.resolve_vars(context)
outcome = self.calculate(var1, var2)
except:
outcome = False
if self.negate:
return not outcome
return outcome
def resolve_vars(self, context):
var2 = self.var2 and self.var2.resolve(context)
return self.var1.resolve(context), var2
def calculate(self, var1, var2):
raise NotImplementedError()
class Or(BaseCalc):
def calculate(self, var1, var2):
return var1 or var2
class And(BaseCalc):
def calculate(self, var1, var2):
return var1 and var2
class Equals(BaseCalc):
def calculate(self, var1, var2):
return var1 == var2
class Greater(BaseCalc):
def calculate(self, var1, var2):
return var1 > var2
class GreaterOrEqual(BaseCalc):
def calculate(self, var1, var2):
return var1 >= var2
class In(BaseCalc):
def calculate(self, var1, var2):
return var1 in var2
#==============================================================================
# Tests
#==============================================================================
class TestVar(object):
"""
A basic self-resolvable object similar to a Django template variable. Used
to assist with tests.
"""
def __init__(self, value):
self.value = value
def resolve(self, context):
return self.value
class SmartIfTests(unittest.TestCase):
def setUp(self):
self.true = TestVar(True)
self.false = TestVar(False)
self.high = TestVar(9000)
self.low = TestVar(1)
def assertCalc(self, calc, context=None):
"""
Test a calculation is True, also checking the inverse "negate" case.
"""
context = context or {}
self.assert_(calc.resolve(context))
calc.negate = not calc.negate
self.assertFalse(calc.resolve(context))
def assertCalcFalse(self, calc, context=None):
"""
Test a calculation is False, also checking the inverse "negate" case.
"""
context = context or {}
self.assertFalse(calc.resolve(context))
calc.negate = not calc.negate
self.assert_(calc.resolve(context))
def test_or(self):
self.assertCalc(Or(self.true))
self.assertCalcFalse(Or(self.false))
self.assertCalc(Or(self.true, self.true))
self.assertCalc(Or(self.true, self.false))
self.assertCalc(Or(self.false, self.true))
self.assertCalcFalse(Or(self.false, self.false))
def test_and(self):
self.assertCalc(And(self.true, self.true))
self.assertCalcFalse(And(self.true, self.false))
self.assertCalcFalse(And(self.false, self.true))
self.assertCalcFalse(And(self.false, self.false))
def test_equals(self):
self.assertCalc(Equals(self.low, self.low))
self.assertCalcFalse(Equals(self.low, self.high))
def test_greater(self):
self.assertCalc(Greater(self.high, self.low))
self.assertCalcFalse(Greater(self.low, self.low))
self.assertCalcFalse(Greater(self.low, self.high))
def test_greater_or_equal(self):
self.assertCalc(GreaterOrEqual(self.high, self.low))
self.assertCalc(GreaterOrEqual(self.low, self.low))
self.assertCalcFalse(GreaterOrEqual(self.low, self.high))
def test_in(self):
list_ = TestVar([1,2,3])
invalid_list = TestVar(None)
self.assertCalc(In(self.low, list_))
self.assertCalcFalse(In(self.low, invalid_list))
def test_parse_bits(self):
var = IfParser([True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'and', True]).parse()
self.assertFalse(var.resolve({}))
var = IfParser(['not', False, 'and', 'not', False]).parse()
self.assert_(var.resolve({}))
var = IfParser(['not', 'not', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, 'not', '=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'not', 'not', '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '!=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([3, '>', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '<', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([2, 'not', 'in', [2, 3]]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'or', 1, '=', 2]).parse()
self.assert_(var.resolve({}))
def test_boolean(self):
var = IfParser([True, 'and', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', False, 'or', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'and', True, 'or', False]).parse()
self.assertFalse(var.resolve({}))
def test_invalid(self):
self.assertRaises(ValueError, IfParser(['not']).parse)
self.assertRaises(ValueError, IfParser(['==']).parse)
self.assertRaises(ValueError, IfParser([1, 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '>', 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '==', 'not', 'not']).parse)
self.assertRaises(ValueError, IfParser([1, 2]).parse)
OPERATORS = {
'=': (Equals, True),
'==': (Equals, True),
'!=': (Equals, False),
'>': (Greater, True),
'>=': (GreaterOrEqual, True),
'<=': (Greater, False),
'<': (GreaterOrEqual, False),
'or': (Or, True),
'and': (And, True),
'in': (In, True),
}
BOOL_OPERATORS = ('or', 'and')
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
self.tokens = tokens
def _get_tokens(self):
return self._tokens
def _set_tokens(self, tokens):
self._tokens = tokens
self.len = len(tokens)
self.pos = 0
tokens = property(_get_tokens, _set_tokens)
def parse(self):
if self.at_end():
raise self.error_class('No variables provided.')
var1 = self.get_bool_var()
while not self.at_end():
op, negate = self.get_operator()
var2 = self.get_bool_var()
var1 = op(var1, var2, negate=negate)
return var1
def get_token(self, eof_message=None, lookahead=False):
negate = True
token = None
pos = self.pos
while token is None or token == 'not':
if pos >= self.len:
if eof_message is None:
raise self.error_class()
raise self.error_class(eof_message)
token = self.tokens[pos]
negate = not negate
pos += 1
if not lookahead:
self.pos = pos
return token, negate
def at_end(self):
return self.pos >= self.len
def create_var(self, value):
return TestVar(value)
def get_bool_var(self):
"""
Returns either a variable by itself or a non-boolean operation (such as
``x == 0`` or ``x < 0``).
This is needed to keep correct precedence for boolean operations (i.e.
``x or x == 0`` should be ``x or (x == 0)``, not ``(x or x) == 0``).
"""
var = self.get_var()
if not self.at_end():
op_token = self.get_token(lookahead=True)[0]
if isinstance(op_token, basestring) and (op_token not in
BOOL_OPERATORS):
op, negate = self.get_operator()
return op(var, self.get_var(), negate=negate)
return var
def get_var(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting a variable.')
if isinstance(token, basestring) and token in OPERATORS:
raise self.error_class('Expected variable, got operator (%s).' %
token)
var = self.create_var(token)
if negate:
return Or(var, negate=True)
return var
def get_operator(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting an operator.')
if not isinstance(token, basestring) or token not in OPERATORS:
raise self.error_class('%s is not a valid operator.' % token)
if self.at_end():
raise self.error_class('No variable provided after "%s".' % token)
op, true = OPERATORS[token]
if not true:
negate = not negate
return op, negate
#==============================================================================
# Actual templatetag code.
#==============================================================================
class TemplateIfParser(IfParser):
error_class = template.TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
return super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return self.template_parser.compile_filter(value)
class SmartIfNode(template.Node):
def __init__(self, var, nodelist_true, nodelist_false=None):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.var = var
def render(self, context):
if self.var.resolve(context):
return self.nodelist_true.render(context)
if self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def __repr__(self):
return "<Smart If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
if self.nodelist_false:
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
if self.nodelist_false:
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
@register.tag('if')
def smart_if(parser, token):
"""
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
"""
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false)
if __name__ == '__main__':
unittest.main()
| [
"chendong@shinezone.com"
] | chendong@shinezone.com |
123883e959e4b75ab8ee2008effa0577f7927177 | 846ec53edeaa6ec13ccab9ad3fa9b811ea52f1a5 | /src/aminer/precision/LoadArtifacts.py | e68e9e39057afe5a108679ceead1f99bbf35b760 | [] | no_license | wyxzou/Citations | 05cc42f03836549b64926fa8c47f5831b0ab5ad1 | 7fc4a5dc0551464b4e4cf492363a4ed5bf254222 | refs/heads/master | 2023-08-27T21:48:51.126505 | 2021-10-29T15:47:02 | 2021-10-29T15:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from utility import EnglishTextProcessor
# In[3]:
etp = EnglishTextProcessor()
# In[4]:
from gensim.models import FastText
# In[5]:
import pickle
# In[9]:
vec = pickle.load(open('vec.model', 'rb'))
fasttext = FastText.load('fasttext.model')
# In[ ]:
| [
"spandan.garg1706@gmail.com"
] | spandan.garg1706@gmail.com |
82cf63aa62ab404f9840bfcd4673805dafb46344 | fa0fa16b894504e1844af3ae3b0991e5a6525d3c | /MorphPlot.py | 9e7cbc9fc21741de9ff36795e891a030b4530bda | [] | no_license | Projet-Guerledan/image-processing | 471d0ac2e9d19ce139a33032a0c23ed6442d6efe | 678e26a4f67ddc65edf5dd16f938f33475a953d1 | refs/heads/master | 2021-01-19T22:07:42.220513 | 2017-02-20T15:31:51 | 2017-02-20T15:31:51 | 82,573,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | #
# MorphPlot class
#
# This class simplifies testing of OpenCV morphological operations
# using the cv2.morphologyEx function
#
# Arguments:
# - input: [STRING] input file
# - output: [STRING] output file
# - *optionsKernel: [DICT] kernel types
# - *optionsTransform: [DICT] morphological transform operations
# - kernelMaxSize: [INTEGER] size of transform kernel
# - **nextProcess: reference to next process
#
# * See default_transforms and defaul_kernels for an example of how to declare
# these variables.
#
# ** nextProcess is a reference to the next step. Should be defined by
# setOrder function in TestPlots.py
#
import cv2
import numpy as np
import argparse
from Misc import *
# Default options for transforms and kernel types
default_transforms = {
0: ['Erode',cv2.MORPH_ERODE],
1: ['Dilate',cv2.MORPH_DILATE],
2: ['Open',cv2.MORPH_OPEN],
3: ['Close',cv2.MORPH_CLOSE]}
#4: ['Top-hat',cv2.MORPH_TOPHAT]}
default_kernels = {
0: ['Round',cv2.MORPH_ELLIPSE],
1: ['Rect',cv2.MORPH_RECT]}
class MorphPlot:
#
# Class constructor
#
def __init__(self,input='',output='',optionsKernel=default_kernels,optionsTransform=default_transforms,kernelMaxSize = 30,nextProcess = 0):
self.inputImagePath = input
self.outputImagePath = output
self.optionsKernel = optionsKernel
self.optionsTransform = optionsTransform
self.switchKernel = createSwitch(self.optionsKernel)
self.switchTransform = createSwitch(self.optionsTransform)
self.nextProcess = nextProcess
self.kernelMaxSize = kernelMaxSize
# initial kernel and transform
self.kernelType = self.optionsKernel[0][1]
self.kernel = cv2.getStructuringElement(self.kernelType,(1,1))
self.transformType = self.optionsTransform[0][1]
#
# Initialise plot and first image
#
def init_image(self,winName):
self.winName = winName
# create window and trackbars
cv2.namedWindow(self.winName)
self.inputImage = cv2.imread(self.inputImagePath,0)
cv2.createTrackbar('kernel radius',self.winName,0,self.kernelMaxSize,self.update)
if len(self.optionsKernel) > 1:
cv2.createTrackbar(self.switchKernel,self.winName,0,len(self.optionsKernel)-1,self.update)
if len(self.optionsTransform) > 1:
cv2.createTrackbar(self.switchTransform,self.winName,0,len(self.optionsTransform)-1,self.update)
# calculate first transform
self.outputImage = cv2.morphologyEx(self.inputImage, self.transformType, self.kernel)
if len(self.outputImagePath) != 0:
cv2.imwrite(self.outputImagePath,self.outputImage)
cv2.imshow(self.winName,self.outputImage)
# Update methods called by OpenCV whenever the trackbars are set
# to a new position
def update(self,x):
# create and show new image
def updateImage(kernelType,transformType):
radius = 1 + cv2.getTrackbarPos('kernel radius',self.winName)
self.kernel = cv2.getStructuringElement(kernelType,(radius,radius))
self.outputImage = cv2.morphologyEx(self.inputImage, transformType, self.kernel)
cv2.imshow(self.winName,self.outputImage)
# select new kernel
def updateKernelType(switchKernel,optionsKernel):
s = cv2.getTrackbarPos(switchKernel,self.winName)
self.kernelType = optionsKernel[s][1]
# select new transform type
def updateTransformType(switchTransform,optionsTransform):
s = cv2.getTrackbarPos(switchTransform,self.winName)
self.transformType = optionsTransform[s][1]
# refresh input image and update
self.inputImage = cv2.imread(self.inputImagePath,0)
if len(self.optionsKernel) > 1:
updateKernelType(self.switchKernel,self.optionsKernel)
if len(self.optionsTransform) > 1:
updateTransformType(self.switchTransform,self.optionsTransform)
updateImage(self.kernelType,self.transformType)
# save image to output file
if len(self.outputImagePath) != 0:
cv2.imwrite(self.outputImagePath,self.outputImage)
# call update of next process (if it exists)
if self.nextProcess != 0:
self.nextProcess.update(0)
| [
"s.evandro@hotmail.com"
] | s.evandro@hotmail.com |
ab2b03f5d2ec311a01e06d18a2a9f1cfe019ab16 | b140b104b6de0c8a924db008a48d9798e046919e | /byte/backup_ver1.py | a5230376aee7205cea0680146eadff09aaaadfe9 | [] | no_license | saibi/python | ad206fbfe752198492c939578607f1c31223d3c3 | fd94a623241c28dffe60350496a5c858c6f912e8 | refs/heads/main | 2023-09-01T08:20:33.379923 | 2023-08-31T01:32:17 | 2023-08-31T01:32:17 | 74,268,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import time
source = ['/home/saibi/code_test/python/tutorial', '/home/saibi/code_test/python/byte']
target_dir = '/tmp'
target = target_dir + os.sep + time.strftime('%Y%m%d%H%M%S') + '.zip'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
zip_command = "zip -r {0} {1}".format(target, ' '.join(source))
print "Zip command is:", zip_command
print "Running:"
if os.system(zip_command) == 0:
print 'Successful backup to', target_dir
else:
print 'Backup FAILED'
| [
"kimyoungmin@gmail.com"
] | kimyoungmin@gmail.com |
1865f3d5c1af297b5b994f6bd014824460e28c3b | 32ab2b00d96f9eeb68a410d3ca1ae870b1a0d3b8 | /panda.py | deb413511896a9c49cfb52bf728a83ae3cad3bb0 | [] | no_license | sedighi-mahdi/pycode | 96b44dd33c2a84088d755785d19757dd74080334 | 2528e7b36e87bb39bbd55df8d6dd633f749f7cf9 | refs/heads/master | 2020-07-26T18:26:01.214855 | 2019-10-03T08:49:48 | 2019-10-03T08:49:48 | 208,732,316 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import pandas as pd
data = pd.read_csv("state.csv")
# ده رکورد اول لیست را در خروجی چاپ می کند
print("Head -- \n", data.head(10))
# ده رکورد آخر داده را در خروجی چاپ می کند
print("\n\n Tail -- \n", data.tail(10))
#نسبت جمعتی به میلیون نفر
data['PopulationInMillions'] = data['Population']/1000000
print (data.head(5))
#تحلیلی بر روی همه ستون های داده انجام می دهد
data.describe()
#میانگین ستون نرخ قتل را محاسبه و چاپ می کند
MurderRate_mean = data.MurderRate.mean()
print("\nMurderRate Mean : ", MurderRate_mean)
#میانه ستون جمعیت را بدست آورده و چاپ می کند
Population_median = data.Population.median()
print("Population median : ", Population_median)
MurderRate_median = data.MurderRate.median()
print("\nMurderRate median : ", MurderRate_median)
| [
"sedighi.mahdi87@gmail.com"
] | sedighi.mahdi87@gmail.com |
d4ca46e2c944de9ca853678d9c3ca4f707c7a382 | 7e2351db6ee3d9bf1edaf728855c7a5934e8b7dc | /tests/test_physics.py | 6edb6bbf392ea9f8532414a0acf9d1d0adae647a | [
"MIT"
] | permissive | alstar8/habitat-sim | b8fd764fb57b8cb678c7ac03ec1f95f4b3012d01 | fc3261f89732c4712ca1db00b24487a983641d62 | refs/heads/main | 2023-08-14T11:15:50.555848 | 2021-10-03T11:39:03 | 2021-10-03T11:39:03 | 413,045,671 | 0 | 0 | MIT | 2021-10-03T10:31:02 | 2021-10-03T10:31:02 | null | UTF-8 | Python | false | false | 84,410 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
from os import path as osp
import magnum as mn
import numpy as np
import pytest
import quaternion
import examples.settings
import habitat_sim
import habitat_sim.physics
from habitat_sim.utils.common import (
quat_from_angle_axis,
quat_from_magnum,
quat_to_magnum,
random_quaternion,
)
from utils import simulate
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/objects/example_objects/"),
reason="Requires the habitat-test-scenes and habitat test objects",
)
def test_kinematics():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings[
"scene"
] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
# enable the physics simulator: also clears available actions to no-op
cfg_settings["enable_physics"] = True
cfg_settings["depth_sensor"] = True
# test loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
obj_template_mgr.load_configs("data/objects/example_objects/", True)
assert obj_template_mgr.get_num_templates() > 0
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
# test adding an object to the world
# get handle for object 0, used to test
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
assert rigid_obj_mgr.get_num_objects() > 0
assert (
len(rigid_obj_mgr.get_object_handles()) == rigid_obj_mgr.get_num_objects()
)
# test setting the motion type
cheezit_box.motion_type = habitat_sim.physics.MotionType.STATIC
assert cheezit_box.motion_type == habitat_sim.physics.MotionType.STATIC
cheezit_box.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert cheezit_box.motion_type == habitat_sim.physics.MotionType.KINEMATIC
# test kinematics
I = np.identity(4)
# test get and set translation
cheezit_box.translation = [0.0, 1.0, 0.0]
assert np.allclose(cheezit_box.translation, np.array([0.0, 1.0, 0.0]))
# test object SceneNode
assert np.allclose(
cheezit_box.translation, cheezit_box.root_scene_node.translation
)
# test get and set transform
cheezit_box.transformation = I
assert np.allclose(cheezit_box.transformation, I)
# test get and set rotation
Q = quat_from_angle_axis(np.pi, np.array([0.0, 1.0, 0.0]))
expected = np.eye(4)
expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
cheezit_box.rotation = quat_to_magnum(Q)
assert np.allclose(cheezit_box.transformation, expected)
assert np.allclose(quat_from_magnum(cheezit_box.rotation), Q)
# test object removal
rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)
assert rigid_obj_mgr.get_num_objects() == 0
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
prev_time = 0.0
for _ in range(2):
# do some kinematics here (todo: translating or rotating instead of absolute)
cheezit_box.translation = np.random.rand(3)
T = cheezit_box.transformation # noqa : F841
# test getting observation
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# check that time is increasing in the world
assert sim.get_world_time() > prev_time
prev_time = sim.get_world_time()
rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)
# test attaching/dettaching an Agent to/from physics simulation
agent_node = sim.agents[0].scene_node
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_agent = rigid_obj_mgr.add_object_by_template_handle(
obj_handle_list[0], agent_node
)
cheezit_agent.translation = np.random.rand(3)
assert np.allclose(agent_node.translation, cheezit_agent.translation)
rigid_obj_mgr.remove_object_by_id(
cheezit_agent.object_id, delete_object_node=False
) # don't delete the agent's node
assert agent_node.translation
# test get/set RigidState
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
targetRigidState = habitat_sim.bindings.RigidState(
mn.Quaternion(), np.array([1.0, 2.0, 3.0])
)
cheezit_box.rigid_state = targetRigidState
objectRigidState = cheezit_box.rigid_state
assert np.allclose(objectRigidState.translation, targetRigidState.translation)
assert objectRigidState.rotation == targetRigidState.rotation
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/objects/example_objects/"),
reason="Requires the habitat-test-scenes and habitat test objects",
)
def test_kinematics_no_physics():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings[
"scene"
] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
# enable the physics simulator: also clears available actions to no-op
cfg_settings["enable_physics"] = False
cfg_settings["depth_sensor"] = True
# test loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
obj_template_mgr.load_configs("data/objects/example_objects/", True)
assert obj_template_mgr.get_num_templates() > 0
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
# test adding an object to the world
# get handle for object 0, used to test
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
assert rigid_obj_mgr.get_num_objects() > 0
assert (
len(rigid_obj_mgr.get_object_handles()) == rigid_obj_mgr.get_num_objects()
)
# test setting the motion type
cheezit_box.motion_type = habitat_sim.physics.MotionType.STATIC
assert cheezit_box.motion_type == habitat_sim.physics.MotionType.STATIC
cheezit_box.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert cheezit_box.motion_type == habitat_sim.physics.MotionType.KINEMATIC
# test kinematics
I = np.identity(4)
# test get and set translation
cheezit_box.translation = [0.0, 1.0, 0.0]
assert np.allclose(cheezit_box.translation, np.array([0.0, 1.0, 0.0]))
# test object SceneNode
assert np.allclose(
cheezit_box.translation, cheezit_box.root_scene_node.translation
)
# test get and set transform
cheezit_box.transformation = I
assert np.allclose(cheezit_box.transformation, I)
# test get and set rotation
Q = quat_from_angle_axis(np.pi, np.array([0.0, 1.0, 0.0]))
expected = np.eye(4)
expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
cheezit_box.rotation = quat_to_magnum(Q)
assert np.allclose(cheezit_box.transformation, expected)
assert np.allclose(quat_from_magnum(cheezit_box.rotation), Q)
# test object removal
rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)
assert rigid_obj_mgr.get_num_objects() == 0
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
prev_time = 0.0
for _ in range(2):
# do some kinematics here (todo: translating or rotating instead of absolute)
cheezit_box.translation = np.random.rand(3)
T = cheezit_box.transformation # noqa : F841
# test getting observation
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# check that time is increasing in the world
assert sim.get_world_time() > prev_time
prev_time = sim.get_world_time()
rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)
# test attaching/dettaching an Agent to/from physics simulation
agent_node = sim.agents[0].scene_node
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_agent = rigid_obj_mgr.add_object_by_template_handle(
obj_handle_list[0], agent_node
)
cheezit_agent.translation = np.random.rand(3)
assert np.allclose(agent_node.translation, cheezit_agent.translation)
rigid_obj_mgr.remove_object_by_id(
cheezit_agent.object_id, delete_object_node=False
) # don't delete the agent's node
assert agent_node.translation
# test get/set RigidState
cheezit_box = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
targetRigidState = habitat_sim.bindings.RigidState(
mn.Quaternion(), np.array([1.0, 2.0, 3.0])
)
cheezit_box.rigid_state = targetRigidState
objectRigidState = cheezit_box.rigid_state
assert np.allclose(objectRigidState.translation, targetRigidState.translation)
assert objectRigidState.rotation == targetRigidState.rotation
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/objects/example_objects/"),
reason="Requires the habitat-test-scenes and habitat test objects",
)
def test_dynamics():
# This test assumes that default.phys_scene_config.json contains "physics simulator": "bullet".
# TODO: enable dynamic override of this setting in simulation config structure
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings[
"scene"
] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
# enable the physics simulator: also clears available actions to no-op
cfg_settings["enable_physics"] = True
cfg_settings["depth_sensor"] = True
# test loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
obj_template_mgr.load_configs("data/objects/example_objects/", True)
# make the simulation deterministic (C++ seed is set in reconfigure)
np.random.seed(cfg_settings["seed"])
assert obj_template_mgr.get_num_templates() > 0
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
# test adding an object to the world
obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
cheezit_box1 = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
cheezit_box2 = rigid_obj_mgr.add_object_by_template_handle(obj_handle_list[0])
assert rigid_obj_mgr.get_num_objects() > 0
assert (
len(rigid_obj_mgr.get_object_handles()) == rigid_obj_mgr.get_num_objects()
)
obj_dict = rigid_obj_mgr.get_objects_by_handle_substring("cheezit")
assert len(obj_dict) == rigid_obj_mgr.get_num_objects()
for k, v in obj_dict.items():
assert k == v.handle
assert v.is_alive
# place the objects over the table in room
cheezit_box1.translation = [-0.569043, 2.04804, 13.6156]
cheezit_box2.translation = [-0.569043, 2.04804, 12.6156]
# get object MotionType and continue testing if MotionType::DYNAMIC (implies a physics implementation is active)
if cheezit_box1.motion_type == habitat_sim.physics.MotionType.DYNAMIC:
object1_init_template = cheezit_box1.creation_attributes
object1_mass = object1_init_template.mass
grav = sim.get_gravity()
previous_object_states = [
[cheezit_box1.translation, cheezit_box1.rotation],
[cheezit_box2.translation, cheezit_box2.rotation],
]
prev_time = sim.get_world_time()
for _ in range(50):
# force application at a location other than the origin should always cause angular and linear motion
cheezit_box2.apply_force(np.random.rand(3), np.random.rand(3))
# TODO: expose object properties (such as mass) to python
# Counter the force of gravity on the object (it should not translate)
cheezit_box1.apply_force(-grav * object1_mass, np.zeros(3))
# apply torque to the "floating" object. It should rotate, but not translate
cheezit_box1.apply_torque(np.random.rand(3))
# TODO: test other physics functions
# test getting observation
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# check that time is increasing in the world
assert sim.get_world_time() > prev_time
prev_time = sim.get_world_time()
# check the object states
# 1st object should rotate, but not translate
assert np.allclose(
previous_object_states[0][0], cheezit_box1.translation
)
assert previous_object_states[0][1] != cheezit_box1.rotation
# 2nd object should rotate and translate
assert not np.allclose(
previous_object_states[1][0], cheezit_box2.translation
)
assert previous_object_states[1][1] != cheezit_box2.rotation
previous_object_states = [
[cheezit_box1.translation, cheezit_box1.rotation],
[cheezit_box2.translation, cheezit_box2.rotation],
]
# test setting DYNAMIC object to KINEMATIC
cheezit_box2.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert cheezit_box2.motion_type == habitat_sim.physics.MotionType.KINEMATIC
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# 2nd object should no longer rotate or translate
assert np.allclose(previous_object_states[1][0], cheezit_box2.translation)
assert previous_object_states[1][1] == cheezit_box2.rotation
sim.step_physics(0.1)
# test velocity get/set
test_lin_vel = np.array([1.0, 0.0, 0.0])
test_ang_vel = np.array([0.0, 1.0, 0.0])
# velocity setting for KINEMATIC objects won't be simulated, but will be recorded for bullet internal usage.
cheezit_box2.linear_velocity = test_lin_vel
assert cheezit_box2.linear_velocity == test_lin_vel
cheezit_box2.motion_type = habitat_sim.physics.MotionType.DYNAMIC
cheezit_box2.linear_velocity = test_lin_vel
cheezit_box2.angular_velocity = test_ang_vel
assert cheezit_box2.linear_velocity == test_lin_vel
assert cheezit_box2.angular_velocity == test_ang_vel
# test modifying gravity
new_object_start = np.array([100.0, 0, 0])
cheezit_box1.translation = new_object_start
new_grav = np.array([10.0, 0, 0])
sim.set_gravity(new_grav)
assert np.allclose(sim.get_gravity(), new_grav)
assert np.allclose(cheezit_box1.translation, new_object_start)
sim.step_physics(0.1)
assert cheezit_box1.translation[0] > new_object_start[0]
def test_velocity_control():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
sim.set_gravity(np.array([0.0, 0.0, 0.0]))
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
template_path = osp.abspath("data/test_assets/objects/nested_box")
template_ids = obj_template_mgr.load_configs(template_path)
object_template = obj_template_mgr.get_template_by_id(template_ids[0])
object_template.linear_damping = 0.0
object_template.angular_damping = 0.0
obj_template_mgr.register_template(object_template)
obj_attr_margin = object_template.margin
obj_handle = obj_template_mgr.get_template_handle_by_id(template_ids[0])
for iteration in range(2):
sim.reset()
box_object = rigid_obj_mgr.add_object_by_template_handle(obj_handle)
vel_control = box_object.velocity_control
if iteration == 0:
if box_object.motion_type != habitat_sim.physics.MotionType.DYNAMIC:
# Non-dynamic simulator in use. Skip 1st pass.
rigid_obj_mgr.remove_object_by_id(box_object.object_id)
continue
# verify bullet wrapper is being accessed
assert np.allclose(box_object.margin, obj_attr_margin)
elif iteration == 1:
# test KINEMATIC
box_object.motion_type = habitat_sim.physics.MotionType.KINEMATIC
# test global velocities
vel_control.linear_velocity = np.array([1.0, 0.0, 0.0])
vel_control.angular_velocity = np.array([0.0, 1.0, 0.0])
vel_control.controlling_lin_vel = True
vel_control.controlling_ang_vel = True
while sim.get_world_time() < 1.0:
# NOTE: stepping close to default timestep to get near-constant velocity control of DYNAMIC bodies.
sim.step_physics(0.00416)
ground_truth_pos = sim.get_world_time() * vel_control.linear_velocity
assert np.allclose(box_object.translation, ground_truth_pos, atol=0.01)
ground_truth_q = mn.Quaternion([[0, 0.480551, 0], 0.876967])
angle_error = mn.math.angle(ground_truth_q, box_object.rotation)
assert angle_error < mn.Rad(0.005)
sim.reset()
# test local velocities (turn in a half circle)
vel_control.lin_vel_is_local = True
vel_control.ang_vel_is_local = True
vel_control.linear_velocity = np.array([0, 0, -math.pi])
vel_control.angular_velocity = np.array([math.pi * 2.0, 0, 0])
box_object.translation = [0.0, 0.0, 0.0]
box_object.rotation = mn.Quaternion()
while sim.get_world_time() < 0.5:
# NOTE: stepping close to default timestep to get near-constant velocity control of DYNAMIC bodies.
sim.step_physics(0.008)
print(sim.get_world_time())
# NOTE: explicit integration, so expect some error
ground_truth_q = mn.Quaternion([[1.0, 0.0, 0.0], 0.0])
print(box_object.translation)
assert np.allclose(
box_object.translation, np.array([0, 1.0, 0.0]), atol=0.07
)
angle_error = mn.math.angle(ground_truth_q, box_object.rotation)
assert angle_error < mn.Rad(0.05)
rigid_obj_mgr.remove_object_by_id(box_object.object_id)
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/apartment_1.glb"),
reason="Requires the habitat-test-scenes",
)
def test_raycast():
cfg_settings = examples.settings.default_sim_settings.copy()
# configure some settings in case defaults change
cfg_settings["scene"] = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
# enable the physics simulator
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
if (
sim.get_physics_simulation_library()
!= habitat_sim.physics.PhysicsSimulationLibrary.NoPhysics
):
# only test this if we have a physics simulator and therefore a collision world
test_ray_1 = habitat_sim.geo.Ray()
test_ray_1.direction = mn.Vector3(1.0, 0, 0)
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.ray.direction == test_ray_1.direction
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 1
assert np.allclose(
raycast_results.hits[0].point, np.array([6.83063, 0, 0]), atol=0.07
)
assert np.allclose(
raycast_results.hits[0].normal,
np.array([-0.999587, 0.0222882, -0.0181424]),
atol=0.07,
)
assert abs(raycast_results.hits[0].ray_distance - 6.831) < 0.001
assert raycast_results.hits[0].object_id == -1
# add a primitive object to the world and test a ray away from the origin
cube_prim_handle = obj_template_mgr.get_template_handles("cube")[0]
cube_obj = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
cube_obj.translation = [2.0, 0.0, 2.0]
test_ray_1.origin = np.array([0.0, 0, 2.0])
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 4
assert np.allclose(
raycast_results.hits[0].point, np.array([1.89048, 0, 2]), atol=0.07
)
assert np.allclose(
raycast_results.hits[0].normal,
np.array([-0.99774, -0.0475114, -0.0475114]),
atol=0.07,
)
assert abs(raycast_results.hits[0].ray_distance - 1.89) < 0.001
assert raycast_results.hits[0].object_id == cube_obj.object_id
# test raycast against a non-collidable object.
# should not register a hit with the object.
cube_obj.collidable = False
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 3
# test raycast against a non-collidable stage.
# should not register any hits.
sim.set_stage_is_collidable(False)
raycast_results = sim.cast_ray(test_ray_1)
assert not raycast_results.has_hits()
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/apartment_1.glb"),
reason="Requires the habitat-test-scenes",
)
def test_collision_groups():
cfg_settings = examples.settings.default_sim_settings.copy()
# configure some settings in case defaults change
cfg_settings["scene"] = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
# enable the physics simulator
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
# get the rigid object attributes manager, which manages
# templates used to create objects
obj_template_mgr = sim.get_object_template_manager()
# get the rigid object manager, which provides direct
# access to objects
rigid_obj_mgr = sim.get_rigid_object_manager()
if (
sim.get_physics_simulation_library()
!= habitat_sim.physics.PhysicsSimulationLibrary.NoPhysics
):
cgh = habitat_sim.physics.CollisionGroupHelper
cg = habitat_sim.physics.CollisionGroups
# test group naming
assert cgh.get_group_name(cg.UserGroup1) == "UserGroup1"
assert cgh.get_group("UserGroup1") == cg.UserGroup1
cgh.set_group_name(cg.UserGroup1, "my_custom_group_1")
assert cgh.get_group_name(cg.UserGroup1) == "my_custom_group_1"
assert cgh.get_group("my_custom_group_1") == cg.UserGroup1
assert cgh.get_mask_for_group(cg.UserGroup1) == cgh.get_mask_for_group(
"my_custom_group_1"
)
# create a custom group behavior (STATIC and KINEMATIC only)
new_user_group_1_mask = cg.Static | cg.Kinematic
cgh.set_mask_for_group(cg.UserGroup1, new_user_group_1_mask)
cube_prim_handle = obj_template_mgr.get_template_handles("cube")[0]
cube_obj1 = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
cube_obj2 = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
# add a DYNAMIC cube in a contact free state
cube_obj1.translation = [1.0, 0.0, 4.5]
assert not cube_obj1.contact_test()
# add another in contact with the first
cube_obj2.translation = [1.1, 0.0, 4.6]
assert cube_obj1.contact_test()
assert cube_obj2.contact_test()
# override cube1 collision group to STATIC|KINEMATIC only
cube_obj1.override_collision_group(cg.UserGroup1)
assert not cube_obj1.contact_test()
assert not cube_obj2.contact_test()
# override cube2 to a new group and configure custom mask to interact with it
cgh.set_mask_for_group(cg.UserGroup1, new_user_group_1_mask | cg.UserGroup2)
# NOTE: changing group settings requires overriding object group again
cube_obj1.override_collision_group(cg.UserGroup1)
cube_obj2.override_collision_group(cg.UserGroup2)
assert cube_obj1.contact_test()
assert cube_obj2.contact_test()
# NOTE: trying to set the object's MotionType to its current type won't change the collision group
cube_obj2.motion_type = habitat_sim.physics.MotionType.DYNAMIC
assert cube_obj1.contact_test()
assert cube_obj2.contact_test()
# NOTE: changing the object's MotionType will override the group
cube_obj2.motion_type = habitat_sim.physics.MotionType.KINEMATIC
cube_obj2.motion_type = habitat_sim.physics.MotionType.DYNAMIC
assert not cube_obj1.contact_test()
assert not cube_obj2.contact_test()
# cube 1 is still using the custom group and will interact with KINEMATIC
cube_obj2.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert cube_obj1.contact_test()
assert cube_obj2.contact_test()
# test convenience bitwise mask setter
cgh.set_group_interacts_with(cg.UserGroup1, cg.Kinematic, False)
assert not cgh.get_mask_for_group(cg.UserGroup1) & cg.Kinematic
cube_obj1.override_collision_group(cg.UserGroup1)
assert not cube_obj1.contact_test()
assert not cube_obj2.contact_test()
cgh.set_group_interacts_with(cg.UserGroup1, cg.Kinematic, True)
assert cgh.get_mask_for_group(cg.UserGroup1) & cg.Kinematic
cube_obj1.override_collision_group(cg.UserGroup1)
assert cube_obj1.contact_test()
assert cube_obj2.contact_test()
# test Noncollidable
cube_obj2.override_collision_group(cg.Noncollidable)
assert not cube_obj1.contact_test()
assert not cube_obj2.contact_test()
# test Noncollidable vs Noncollidable
cube_obj1.override_collision_group(cg.Noncollidable)
assert not cube_obj1.contact_test()
assert not cube_obj2.contact_test()
def check_articulated_object_root_state(
articulated_object, target_rigid_state, epsilon=1.0e-4
):
r"""Checks the root state of the ArticulatedObject with all query methods against a target RigidState.
:param articulated_object: The ArticulatedObject to check
:param target_rigid_state: A RigidState object separating translation (vector3) rotation (quaternion)
:param epsilon: An error threshold for numeric comparisons
"""
# NOTE: basic transform properties refer to the root state
# convert target to matrix and check against scene_node transform
assert np.allclose(
articulated_object.root_scene_node.transformation,
mn.Matrix4.from_(
target_rigid_state.rotation.to_matrix(), target_rigid_state.translation
),
atol=epsilon,
)
# convert target to matrix and check against transform
assert np.allclose(
articulated_object.transformation,
mn.Matrix4.from_(
target_rigid_state.rotation.to_matrix(), target_rigid_state.translation
),
atol=epsilon,
)
assert np.allclose(
articulated_object.translation, target_rigid_state.translation, atol=epsilon
)
assert mn.math.angle(
articulated_object.rotation, target_rigid_state.rotation
) < mn.Rad(epsilon)
# check against object's rigid_state
assert np.allclose(
articulated_object.rigid_state.translation,
target_rigid_state.translation,
atol=epsilon,
)
assert mn.math.angle(
articulated_object.rigid_state.rotation, target_rigid_state.rotation
) < mn.Rad(epsilon)
def getRestPositions(articulated_object):
r"""Constructs a valid rest pose vector for an ArticulatedObject with all zeros for non-spherical joints which get an identity quaternion instead."""
rest_pose = np.zeros(len(articulated_object.joint_positions))
for linkIx in range(articulated_object.num_links):
if (
articulated_object.get_link_joint_type(linkIx)
== habitat_sim.physics.JointType.Spherical
):
rest_pose[articulated_object.get_link_joint_pos_offset(linkIx) + 3] = 1
return rest_pose
def getRandomPositions(articulated_object):
r"""Constructs a random pose vector for an ArticulatedObject with unit quaternions for spherical joints."""
joint_limits = articulated_object.joint_position_limits
lower_limits = np.maximum(joint_limits[0], -1)
upper_limits = np.minimum(joint_limits[1], 1)
rand_pose = np.random.uniform(
lower_limits, upper_limits, len(articulated_object.joint_positions)
)
for linkIx in range(articulated_object.num_links):
if (
articulated_object.get_link_joint_type(linkIx)
== habitat_sim.physics.JointType.Spherical
):
# draw a random quaternion
rand_quat = random_quaternion()
rand_pose[
articulated_object.get_link_joint_pos_offset(linkIx) + 3
] = rand_quat.scalar
rand_pose[
articulated_object.get_link_joint_pos_offset(
linkIx
) : articulated_object.get_link_joint_pos_offset(linkIx)
+ 3
] = rand_quat.vector
return rand_pose
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_articulated_object_add_remove():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
robot_file = "data/test_assets/urdf/kuka_iiwa/model_free_base.urdf"
# parse URDF and add a robot to the world
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert robot
assert robot.is_alive
assert robot.object_id == 0 # first robot added
# add a second robot
robot2 = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, global_scale=2.0
)
assert robot2
assert art_obj_mgr.get_num_objects() == 2
assert robot2.global_scale == 2.0
# remove a robot and check that it was removed
art_obj_mgr.remove_object_by_handle(robot.handle)
assert not robot.is_alive
assert art_obj_mgr.get_num_objects() == 1
assert robot2.is_alive
# add some more
for _i in range(5):
art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert art_obj_mgr.get_num_objects() == 6
# remove another
art_obj_mgr.remove_object_by_id(robot2.object_id)
assert not robot2.is_alive
assert art_obj_mgr.get_num_objects() == 5
# remove all
art_obj_mgr.remove_all_objects()
assert art_obj_mgr.get_num_objects() == 0
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_articulated_object_maintain_link_order():
# test that the maintain_link_order option for urdf import
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
amass_file = "data/test_assets/urdf/amass_male.urdf"
# parse URDF and add a humanoid to the world
ao = art_obj_mgr.add_articulated_object_from_urdf(
filepath=amass_file, maintain_link_order=True
)
assert ao
assert ao.is_alive
amass_urdf_link_order = [
"lhip",
"lknee",
"lankle",
"rhip",
"rknee",
"rankle",
"lowerback",
"upperback",
"chest",
"lowerneck",
"upperneck",
"lclavicle",
"lshoulder",
"lelbow",
"lwrist",
"rclavicle",
"rshoulder",
"relbow",
"rwrist",
]
link_names = []
for link_ix in range(ao.num_links):
link_names.append(ao.get_link_name(link_ix))
# check the link ordering against ground truth
assert amass_urdf_link_order == link_names
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
@pytest.mark.parametrize(
"test_asset",
[
"data/test_assets/urdf/kuka_iiwa/model_free_base.urdf",
"data/test_assets/urdf/fridge/fridge.urdf",
"data/test_assets/urdf/prim_chain.urdf",
"data/test_assets/urdf/amass_male.urdf",
],
)
def test_articulated_object_kinematics(test_asset):
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
robot_file = test_asset
# parse URDF and add an ArticulatedObject to the world
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert robot.is_alive
# NOTE: basic transform properties refer to the root state
# root state should be identity by default
expected_root_state = habitat_sim.RigidState()
check_articulated_object_root_state(robot, expected_root_state)
# set the transformation with various methods
# translation and rotation properties:
robot.translation = expected_root_state.translation = mn.Vector3(1.0, 2.0, 3.0)
check_articulated_object_root_state(robot, expected_root_state)
robot.rotation = expected_root_state.rotation = mn.Quaternion.rotation(
mn.Rad(0.5), mn.Vector3(-5.0, 1.0, 9.0).normalized()
)
check_articulated_object_root_state(robot, expected_root_state)
# transform property:
test_matrix_rotation = mn.Quaternion.rotation(
mn.Rad(0.75), mn.Vector3(4.0, 3.0, -1.0).normalized()
)
test_matrix_translation = mn.Vector3(3.0, 2.0, 1.0)
transform_test_matrix = mn.Matrix4.from_(
test_matrix_rotation.to_matrix(), test_matrix_translation
)
robot.transformation = transform_test_matrix
expected_root_state = habitat_sim.RigidState(
test_matrix_rotation, test_matrix_translation
)
# looser epsilon for quat->matrix conversions
check_articulated_object_root_state(robot, expected_root_state, epsilon=1.0e-3)
# rigid_state property:
expected_root_state = habitat_sim.RigidState()
robot.rigid_state = expected_root_state
check_articulated_object_root_state(robot, expected_root_state)
# state modifying functions:
robot.translate(test_matrix_translation)
expected_root_state.translation = test_matrix_translation
check_articulated_object_root_state(robot, expected_root_state)
# rotate the robot 180 degrees
robot.rotate(mn.Rad(math.pi), mn.Vector3(0, 1.0, 0.0))
expected_root_state.rotation = mn.Quaternion.rotation(
mn.Rad(math.pi), mn.Vector3(0, 1.0, 0.0)
)
check_articulated_object_root_state(robot, expected_root_state)
# not testing local transforms at this point since using the same mechanism
# object should have some degrees of freedom
num_dofs = len(robot.joint_forces)
assert num_dofs > 0
# default zero joint states
assert np.allclose(robot.joint_positions, getRestPositions(robot))
assert np.allclose(robot.joint_velocities, np.zeros(num_dofs))
assert np.allclose(robot.joint_forces, np.zeros(num_dofs))
# test joint state get/set
# generate vectors with num_dofs evenly spaced samples in a range
target_pose = getRandomPositions(robot)
# positions
robot.joint_positions = target_pose
assert np.allclose(robot.joint_positions, target_pose)
# velocities
target_joint_vel = np.linspace(1.1, 2.0, num_dofs)
robot.joint_velocities = target_joint_vel
assert np.allclose(robot.joint_velocities, target_joint_vel)
# forces
target_joint_forces = np.linspace(2.1, 3.0, num_dofs)
robot.joint_forces = target_joint_forces
assert np.allclose(robot.joint_forces, target_joint_forces)
# absolute, not additive setter
robot.joint_forces = target_joint_forces
assert np.allclose(robot.joint_forces, target_joint_forces)
# test additive method
robot.add_joint_forces(target_joint_forces)
assert np.allclose(robot.joint_forces, 2 * target_joint_forces)
# clear all positions, velocities, forces to zero
robot.clear_joint_states()
assert np.allclose(robot.joint_positions, getRestPositions(robot))
assert np.allclose(robot.joint_velocities, np.zeros(num_dofs))
assert np.allclose(robot.joint_forces, np.zeros(num_dofs))
# test joint limits and clamping
joint_limits = robot.joint_position_limits
lower_pos_limits = joint_limits[0]
upper_pos_limits = joint_limits[1]
# setup joint positions outside of the limit range
invalid_joint_positions = getRestPositions(robot)
for pos in range(len(invalid_joint_positions)):
if not math.isinf(upper_pos_limits[pos]):
invalid_joint_positions[pos] = upper_pos_limits[pos] + 0.1
robot.joint_positions = invalid_joint_positions
# allow these to be set
assert np.allclose(robot.joint_positions, invalid_joint_positions, atol=1.0e-4)
# then clamp back into valid range
robot.clamp_joint_limits()
assert np.all(robot.joint_positions <= upper_pos_limits)
assert np.all(robot.joint_positions <= invalid_joint_positions)
# repeat with lower limits
invalid_joint_positions = getRestPositions(robot)
for pos in range(len(invalid_joint_positions)):
if not math.isinf(lower_pos_limits[pos]):
invalid_joint_positions[pos] = lower_pos_limits[pos] - 0.1
robot.joint_positions = invalid_joint_positions
# allow these to be set
assert np.allclose(robot.joint_positions, invalid_joint_positions, atol=1.0e-4)
# then clamp back into valid range
robot.clamp_joint_limits()
assert np.all(robot.joint_positions >= lower_pos_limits)
# test auto-clamping (only occurs during step function BEFORE integration)
robot.joint_positions = invalid_joint_positions
assert np.allclose(robot.joint_positions, invalid_joint_positions, atol=1.0e-4)
# taking a single step should not clamp positions by default
sim.step_physics(-1)
assert np.allclose(robot.joint_positions, invalid_joint_positions, atol=1.0e-3)
assert robot.auto_clamp_joint_limits == False
robot.auto_clamp_joint_limits = True
assert robot.auto_clamp_joint_limits == True
# taking a single step should clamp positions when auto clamp enabled
sim.step_physics(-1)
assert np.all(robot.joint_positions >= lower_pos_limits)
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/apartment_1.glb"),
reason="Requires the habitat-test-scenes",
)
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
@pytest.mark.parametrize(
"test_asset",
[
"data/test_assets/urdf/kuka_iiwa/model_free_base.urdf",
"data/test_assets/urdf/fridge/fridge.urdf",
"data/test_assets/urdf/prim_chain.urdf",
"data/test_assets/urdf/amass_male.urdf",
],
)
def test_articulated_object_dynamics(test_asset):
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
robot_file = test_asset
# parse URDF and add an ArticulatedObject to the world
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert robot.is_alive
# object should be initialized with dynamics
assert robot.motion_type == habitat_sim.physics.MotionType.DYNAMIC
sim.step_physics(0.2)
assert robot.root_linear_velocity[1] < -1.0
sim.step_physics(2.8)
# the robot should fall to the floor under gravity and stop
assert robot.translation[1] < -0.5
assert robot.translation[1] > -1.7
# test linear and angular root velocity
robot.translation = mn.Vector3(100)
robot.rotation = mn.Quaternion()
target_lin_vel = np.linspace(1.1, 2.0, 3)
target_ang_vel = np.linspace(2.1, 3.0, 3)
robot.root_linear_velocity = target_lin_vel
robot.root_angular_velocity = target_ang_vel
assert np.allclose(robot.root_linear_velocity, target_lin_vel, atol=1.0e-4)
assert np.allclose(robot.root_angular_velocity, target_ang_vel, atol=1.0e-4)
# take a single step and expect the velocity to be applied
current_time = sim.get_world_time()
sim.step_physics(-1)
timestep = sim.get_world_time() - current_time
lin_finite_diff = (robot.translation - mn.Vector3(100)) / timestep
assert np.allclose(robot.root_linear_velocity, lin_finite_diff, atol=1.0e-3)
expected_rotation = mn.Quaternion.rotation(
mn.Rad(np.linalg.norm(target_ang_vel * timestep)),
target_ang_vel / np.linalg.norm(target_ang_vel),
)
angle_error = mn.math.angle(robot.rotation, expected_rotation)
assert angle_error < mn.Rad(0.0005)
assert not np.allclose(robot.root_linear_velocity, mn.Vector3(0), atol=0.1)
assert not np.allclose(robot.root_angular_velocity, mn.Vector3(0), atol=0.1)
# reset root transform and switch to kinematic
robot.translation = mn.Vector3(0)
robot.clear_joint_states()
robot.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert robot.motion_type == habitat_sim.physics.MotionType.KINEMATIC
sim.step_physics(1.0)
assert robot.translation == mn.Vector3(0)
assert robot.root_linear_velocity == mn.Vector3(0)
assert robot.root_angular_velocity == mn.Vector3(0)
# set forces and velocity and check no simulation result
current_positions = robot.joint_positions
robot.joint_velocities = np.linspace(1.1, 2.0, len(robot.joint_velocities))
robot.joint_forces = np.linspace(2.1, 3.0, len(robot.joint_forces))
sim.step_physics(1.0)
assert np.allclose(robot.joint_positions, current_positions, atol=1.0e-4)
assert robot.translation == mn.Vector3(0)
# positions can be manually changed
target_joint_positions = getRandomPositions(robot)
robot.joint_positions = target_joint_positions
assert np.allclose(robot.joint_positions, target_joint_positions, atol=1.0e-4)
# instance fresh robot with fixed base
art_obj_mgr.remove_object_by_id(robot.object_id)
robot = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, fixed_base=True
)
assert robot.translation == mn.Vector3(0)
assert robot.motion_type == habitat_sim.physics.MotionType.DYNAMIC
# perturb the system dynamically
robot.joint_velocities = np.linspace(5.1, 8.0, len(robot.joint_velocities))
sim.step_physics(1.0)
# root should remain fixed
assert robot.translation == mn.Vector3(0)
assert robot.root_linear_velocity == mn.Vector3(0)
assert robot.root_angular_velocity == mn.Vector3(0)
# positions should be dynamic and perturbed by velocities
assert not np.allclose(robot.joint_positions, getRestPositions(robot), atol=0.1)
# instance fresh robot with free base
art_obj_mgr.remove_object_by_id(robot.object_id)
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
# put object to sleep
assert robot.can_sleep
assert robot.awake
robot.awake = False
assert not robot.awake
sim.step_physics(1.0)
assert not robot.awake
assert robot.translation == mn.Vector3(0)
# add a new object to drop onto the first, waking it up
robot2 = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
sim.step_physics(0.5)
assert robot.awake
assert robot2.awake
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_articulated_object_fixed_base_proxy():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
rigid_obj_mgr = sim.get_rigid_object_manager()
obj_template_mgr = sim.get_object_template_manager()
robot_file = "data/test_assets/urdf/fixed_base_test.urdf"
# parse URDF and add an ArticulatedObject to the world with fixed base
robot = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, fixed_base=True
)
assert robot.is_alive
# add a test object to the world
cube_prim_handle = obj_template_mgr.get_template_handles("cube")[0]
cube_obj = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
for it in range(2):
if it == 1:
# test updating the fixed base position
robot.translation = [0.2, 0.3, 0.4]
cube_obj.translation = robot.translation
# initial position should be intersecting with all components
assert robot.contact_test()
# move the Dynamic link out of the way
robot.joint_positions = [0.3]
# should still report contact because cube_obj is Dynamic
assert robot.contact_test()
# should not report contact once cube_obj is Static because base link is also Static
cube_obj.motion_type = habitat_sim.physics.MotionType.STATIC
assert not robot.contact_test()
cube_obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC
assert not robot.contact_test()
robot.joint_positions = [0.0]
assert robot.contact_test()
cube_obj.motion_type = habitat_sim.physics.MotionType.DYNAMIC
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_articulated_object_damping_joint_motors():
# test automated creation of joint motors from URDF configured joint damping values
robot_file = "data/test_assets/urdf/kuka_iiwa/model_free_base.urdf"
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
# parse URDF and add an ArticulatedObject to the world
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert robot.is_alive
# When URDF joint damping is defined, we generate a set of motors automatically
# get a map of joint motor ids to starting DoF indices
existing_joint_motors = robot.existing_joint_motor_ids
assert len(existing_joint_motors) == 7
for id_to_dof in existing_joint_motors.items():
# for this model, should be single dof motors for all dofs
assert id_to_dof[0] == id_to_dof[1]
motor_settings = robot.get_joint_motor_settings(id_to_dof[0])
# note max impulse is taken directly from the configured damping value
assert motor_settings.max_impulse == 0.5
assert (
motor_settings.motor_type
== habitat_sim.physics.JointMotorType.SingleDof
)
# should not be controlling position
assert motor_settings.position_gain == 0
# should be attempting to maintain 0 velocity
assert motor_settings.velocity_gain == 1.0
assert motor_settings.velocity_target == 0.0
def check_joint_positions(robot, target, single_dof_eps=5.0e-3, quat_eps=0.2):
positions = robot.joint_positions
for link_id in robot.get_link_ids():
start_pos = robot.get_link_joint_pos_offset(link_id)
joint_type = robot.get_link_joint_type(link_id)
if joint_type == habitat_sim.physics.JointType.Spherical:
target_q = mn.Quaternion(
target[start_pos : start_pos + 3], target[start_pos + 3]
)
actual_q = mn.Quaternion(
positions[start_pos : start_pos + 3], positions[start_pos + 3]
)
angle_error = mn.math.angle(target_q, actual_q)
angle_error2 = mn.math.angle(target_q, -1 * actual_q)
# negative quaternion represents the same rotation, but gets a different angle error so check both
assert angle_error < mn.Rad(quat_eps) or angle_error2 < mn.Rad(quat_eps)
elif joint_type in [
habitat_sim.physics.JointType.Revolute,
habitat_sim.physics.JointType.Prismatic,
]:
assert abs(target[start_pos] - positions[start_pos]) < single_dof_eps
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
@pytest.mark.parametrize(
"test_asset",
[
"data/test_assets/urdf/kuka_iiwa/model_free_base.urdf",
"data/test_assets/urdf/fridge/fridge.urdf",
"data/test_assets/urdf/prim_chain.urdf",
"data/test_assets/urdf/amass_male.urdf",
],
)
def test_articulated_object_joint_motors(test_asset):
# set this to output test results as video for easy investigation
produce_debug_video = False
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
art_obj_mgr = sim.get_articulated_object_manager()
robot_file = test_asset
# add an ArticulatedObject to the world with a fixed base
robot = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, fixed_base=True
)
assert robot.is_alive
# remove any automatically created motors
existing_motor_ids = robot.existing_joint_motor_ids
for motor_id in existing_motor_ids:
robot.remove_joint_motor(motor_id)
assert len(robot.existing_joint_motor_ids) == 0
check_joint_positions(robot, getRestPositions(robot))
# setup the camera for debug video
sim.agents[0].scene_node.translation = [0.0, -1.5, 2.0]
observations = []
target_time = 0.0
# let the agent drop
target_time += 0.2
while sim.get_world_time() < target_time:
sim.step_physics(1.0 / 60.0)
if produce_debug_video:
observations.append(sim.get_sensor_observations())
# iterate through links and setup joint motors to hold a rest position
joint_motor_settings = None
print(f" L({-1}): name = {robot.get_link_name(-1)}")
for link_id in robot.get_link_ids():
print(
f" L({link_id}): name = {robot.get_link_name(link_id)} | joint_name = {robot.get_link_joint_name(link_id)}"
)
if (
robot.get_link_joint_type(link_id)
== habitat_sim.physics.JointType.Spherical
):
# construct a spherical JointMotorSettings
joint_motor_settings = habitat_sim.physics.JointMotorSettings(
spherical_position_target=mn.Quaternion(),
position_gain=1.0,
spherical_velocity_target=mn.Vector3(),
velocity_gain=0.1,
max_impulse=10000.0,
)
elif (
robot.get_link_joint_type(link_id)
== habitat_sim.physics.JointType.Prismatic
or robot.get_link_joint_type(link_id)
== habitat_sim.physics.JointType.Revolute
):
# construct a single dof JointMotorSettings
joint_motor_settings = habitat_sim.physics.JointMotorSettings(
position_target=0.0,
position_gain=1.0,
velocity_target=0.0,
velocity_gain=0.1,
max_impulse=10000.0,
)
else:
# planar or fixed joints are not supported
continue
# create the motor from its settings
robot.create_joint_motor(link_id, joint_motor_settings)
target_time += 6.0
while sim.get_world_time() < target_time:
sim.step_physics(1.0 / 60.0)
if produce_debug_video:
observations.append(sim.get_sensor_observations())
# validate that rest pose is maintained
# Note: assume all joints for test assets can be actuated
target_positions = getRestPositions(robot)
check_joint_positions(robot, target_positions)
# check removal and auto-creation
joint_motor_settings = habitat_sim.physics.JointMotorSettings(
position_target=0.0,
position_gain=0.8,
velocity_target=0.0,
velocity_gain=0.2,
max_impulse=10000.0,
)
num_motors = len(robot.existing_joint_motor_ids)
existing_motor_ids = robot.existing_joint_motor_ids
for motor_id in existing_motor_ids:
robot.remove_joint_motor(motor_id)
assert len(robot.existing_joint_motor_ids) == 0
robot.create_all_motors(joint_motor_settings)
assert len(robot.existing_joint_motor_ids) == num_motors
# set new random position targets
random_position_target = getRandomPositions(robot)
robot.update_all_motor_targets(random_position_target)
target_time += 6.0
while sim.get_world_time() < target_time:
sim.step_physics(1.0 / 60.0)
if produce_debug_video:
observations.append(sim.get_sensor_observations())
# NOTE: because target is randomly generated, this check can fail probabilistically (try re-running test)
check_joint_positions(robot, random_position_target)
# set zero position gains and non-zero velocity target
new_vel_target = np.ones(len(robot.joint_velocities)) * 0.5
robot.update_all_motor_targets(new_vel_target, velocities=True)
for motor_id in robot.existing_joint_motor_ids:
joint_motor_settings = robot.get_joint_motor_settings(motor_id)
# first check that velocity target update is reflected in settings
if (
joint_motor_settings.motor_type
== habitat_sim.physics.JointMotorType.SingleDof
):
assert joint_motor_settings.velocity_target == 0.5
else:
# spherical
assert joint_motor_settings.spherical_velocity_target == mn.Vector3(0.5)
joint_motor_settings.position_gain = 0
joint_motor_settings.velocity_gain = 1.0
robot.update_joint_motor(motor_id, joint_motor_settings)
# to ensure joint has enough distance to achieve target velocity, reset positions
robot.clear_joint_states()
target_time += 0.5
while sim.get_world_time() < target_time:
sim.step_physics(1.0 / 60.0)
if produce_debug_video:
observations.append(sim.get_sensor_observations())
# TODO: spherical joint motor velocities are not working correctly
if "amass_male" not in test_asset:
assert np.allclose(new_vel_target, robot.joint_velocities, atol=0.06)
# produce some test debug video
if produce_debug_video:
from habitat_sim.utils import viz_utils as vut
vut.make_video(
observations,
"color_sensor",
"color",
"test_articulated_object_joint_motors__" + test_asset.split("/")[-1],
open_vid=False,
)
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_rigid_constraints():
# set this to output test results as video for easy investigation
produce_debug_video = False
observations = []
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
obj_template_mgr = sim.get_object_template_manager()
art_obj_mgr = sim.get_articulated_object_manager()
rigid_obj_mgr = sim.get_rigid_object_manager()
# setup the camera for debug video (looking at 0,0,0)
sim.agents[0].scene_node.translation = [0.0, -1.5, 2.0]
sim.agents[0]._sensors["color_sensor"].specification().clear_color = [
0.5,
0.5,
0.5,
]
# add a visualize reference frame
cube_prim_viz_handle = obj_template_mgr.get_template_handles("cubeWireframe")[0]
cube_viz_obj = rigid_obj_mgr.add_object_by_template_handle(cube_prim_viz_handle)
cube_viz_obj.collidable = False
cube_viz_obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC
viz_cube_bb = cube_viz_obj.collision_shape_aabb
# add a test cube object
cube_prim_handle = obj_template_mgr.get_template_handles("cubeSolid")[0]
cube_obj = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
# ---------------------------
# test rigid P2P constraints
# ---------------------------
# add a constraint to dangle the cube from its corner at the origin
constraint_settings = habitat_sim.physics.RigidConstraintSettings()
constraint_settings.object_id_a = cube_obj.object_id
constraint_settings.pivot_a = cube_obj.collision_shape_aabb.front_top_left
constraint_id = sim.create_rigid_constraint(constraint_settings)
assert constraint_id >= 0
observations += simulate(sim, 1.0, produce_debug_video)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert np.allclose(global_pivot_pos, np.zeros(3), atol=1.0e-4)
# move the cube constraint to a corner of the viz region
constraint_settings.pivot_b = viz_cube_bb.back_bottom_right
sim.update_rigid_constraint(constraint_id, constraint_settings)
observations += simulate(sim, 1.0, produce_debug_video)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert np.allclose(global_pivot_pos, constraint_settings.pivot_b, atol=1.0e-4)
# switch the cube pivot to the opposite corner
constraint_settings.pivot_a = cube_obj.collision_shape_aabb.back_bottom_right
sim.update_rigid_constraint(constraint_id, constraint_settings)
observations += simulate(sim, 2.0, produce_debug_video)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert np.allclose(global_pivot_pos, constraint_settings.pivot_b, atol=1.0e-4)
# add another object and constraint them together
cube_obj_2 = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
constraint_settings_2 = habitat_sim.physics.RigidConstraintSettings()
constraint_settings_2.object_id_a = cube_obj_2.object_id
constraint_settings_2.object_id_b = cube_obj.object_id
constraint_settings_2.pivot_a = (
cube_obj_2.collision_shape_aabb.back_bottom_right
)
constraint_settings_2.pivot_b = cube_obj.collision_shape_aabb.front_top_left
constraint_settings_2.max_impulse = 1000.0
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
assert constraint_id_2 >= 0
observations += simulate(sim, 4.0, produce_debug_video)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert np.allclose(global_pivot_pos, constraint_settings.pivot_b, atol=1.0e-3)
# both pivot corners should be at the same global position
global_connect_a = cube_obj_2.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_a
)
global_connect_b = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_b
)
assert np.allclose(global_connect_a, global_connect_b, atol=5.0e-3)
# weaken the world constraint
constraint_settings.max_impulse = 0.2
sim.update_rigid_constraint(constraint_id, constraint_settings)
observations += simulate(sim, 2.0, produce_debug_video)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert not np.allclose(
global_pivot_pos, constraint_settings.pivot_b, atol=1.0e-4
)
# check that the queried settings are reflecting updates
queried_settings = sim.get_rigid_constraint_settings(constraint_id)
assert queried_settings.object_id_a == constraint_settings.object_id_a
assert queried_settings.object_id_b == constraint_settings.object_id_b
assert queried_settings.pivot_a == constraint_settings.pivot_a
assert queried_settings.pivot_b == constraint_settings.pivot_b
assert queried_settings.max_impulse == constraint_settings.max_impulse
queried_settings = sim.get_rigid_constraint_settings(constraint_id_2)
assert queried_settings.object_id_a == constraint_settings_2.object_id_a
assert queried_settings.object_id_b == constraint_settings_2.object_id_b
assert queried_settings.pivot_a == constraint_settings_2.pivot_a
assert queried_settings.pivot_b == constraint_settings_2.pivot_b
assert queried_settings.max_impulse == constraint_settings_2.max_impulse
assert cube_obj.translation[1] > -1.0
assert cube_obj_2.translation[1] > -1.0
# remove the constraint
sim.remove_rigid_constraint(constraint_id)
sim.remove_rigid_constraint(constraint_id_2)
observations += simulate(sim, 2.0, produce_debug_video)
# cubes should fall and separate
assert cube_obj.translation[1] < -1.0
assert cube_obj_2.translation[1] < -1.0
global_connect_a = cube_obj_2.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_a
)
global_connect_b = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_b
)
assert not np.allclose(global_connect_a, global_connect_b, atol=0.5)
# -----------------------------
# test rigid fixed constraints
# -----------------------------
# use the same settings, but change the type
constraint_settings.constraint_type = (
habitat_sim.physics.RigidConstraintType.Fixed
)
constraint_settings_2.constraint_type = (
habitat_sim.physics.RigidConstraintType.Fixed
)
constraint_id = sim.create_rigid_constraint(constraint_settings)
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 2.0, produce_debug_video)
# pivot relationship should be the same
global_connect_a = cube_obj_2.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_a
)
global_connect_b = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_b
)
assert np.allclose(global_connect_a, global_connect_b, atol=5.0e-3)
global_pivot_pos = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings.pivot_a
)
assert np.allclose(global_pivot_pos, constraint_settings.pivot_b, atol=1.0e-3)
# default frames lock identity orientation
assert mn.math.angle(cube_obj.rotation, mn.Quaternion()) < mn.Rad(0.01)
assert mn.math.angle(cube_obj_2.rotation, mn.Quaternion()) < mn.Rad(0.01)
# change the global frame rotation
global_target_frame = mn.Quaternion.rotation(
mn.Rad(mn.math.pi / 4.0), mn.Vector3(1.0, 0, 0)
)
constraint_settings.frame_b = global_target_frame.to_matrix()
sim.update_rigid_constraint(constraint_id, constraint_settings)
# counter rotate object frames pi/4
local_target_frame_1 = mn.Quaternion.rotation(
mn.Rad(mn.math.pi / 4.0), mn.Vector3(0.0, 1.0, 0)
)
local_target_frame_2 = mn.Quaternion.rotation(
-mn.Rad(mn.math.pi / 4.0), mn.Vector3(0.0, 1.0, 0)
)
constraint_settings_2.frame_a = local_target_frame_1.to_matrix()
constraint_settings_2.frame_b = local_target_frame_2.to_matrix()
sim.update_rigid_constraint(constraint_id_2, constraint_settings_2)
observations += simulate(sim, 2.0, produce_debug_video)
# check global frame change of object 1
assert mn.math.angle(cube_obj.rotation, global_target_frame) < mn.Rad(0.01)
# check that relative frames of objects total pi/2
angle_error = mn.math.angle(cube_obj.rotation, cube_obj_2.rotation) - mn.Rad(
mn.math.pi / 4.0
)
assert abs(float(angle_error)) < 0.01
# removing objects will clear constraints
rigid_obj_mgr.remove_object_by_id(cube_obj_2.object_id)
# object should not be moving or in contact, but should not be allowed to sleep with an active constraint
assert cube_obj.awake
simulate(sim, 6.0, False)
assert cube_obj.awake
# -----------------------------------------
# test articulated|rigid mixed constraints
# -----------------------------------------
# add a humanoid to the world
robot_file = "data/test_assets/urdf/amass_male.urdf"
robot = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, fixed_base=False
)
assert robot.is_alive
# need motors to stabalize the humanoid
joint_motor_settings = habitat_sim.physics.JointMotorSettings()
joint_motor_settings.position_gain = 0.5
robot.create_all_motors(joint_motor_settings)
# for link in range(robot.num_links):
# print(robot.get_link_name(link))
# hang AO from cube with P2P
constraint_settings_2 = habitat_sim.physics.RigidConstraintSettings()
constraint_settings_2.object_id_a = robot.object_id
constraint_settings_2.link_id_a = 9 # lwrist
constraint_settings_2.object_id_b = cube_obj.object_id
constraint_settings_2.pivot_b = cube_obj.collision_shape_aabb.front_top_left
constraint_settings_2.max_impulse = 10000000
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
constraint_settings.max_impulse = 10000000
sim.update_rigid_constraint(constraint_id, constraint_settings)
observations += simulate(sim, 4.0, produce_debug_video)
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
global_connect_b = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_b
)
assert np.allclose(global_connect_a, global_connect_b, atol=0.04)
# hang AO from cube with Fixed constraint
sim.remove_rigid_constraint(constraint_id_2)
constraint_settings_2.constraint_type = (
habitat_sim.physics.RigidConstraintType.Fixed
)
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 4.0, produce_debug_video)
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
global_connect_b = cube_obj.root_scene_node.transformation.transform_point(
constraint_settings_2.pivot_b
)
assert np.allclose(global_connect_a, global_connect_b, atol=0.08)
# check that relative frames of objects near 0
assert (
mn.math.angle(
cube_obj.rotation,
robot.get_link_scene_node(constraint_settings_2.link_id_a).rotation,
)
< mn.Rad(0.1)
)
# counter rotate object frames pi/4
local_target_frame_1 = mn.Quaternion.rotation(
mn.Rad(mn.math.pi / 4.0), mn.Vector3(0.0, 0, 1.0)
)
local_target_frame_2 = mn.Quaternion.rotation(
-mn.Rad(mn.math.pi / 4.0), mn.Vector3(0.0, 0, 1.0)
)
constraint_settings_2.frame_a = local_target_frame_1.to_matrix()
constraint_settings_2.frame_b = local_target_frame_2.to_matrix()
sim.update_rigid_constraint(constraint_id_2, constraint_settings_2)
observations += simulate(sim, 4.0, produce_debug_video)
# check frames align
angle_error = (
mn.math.angle(
cube_obj.rotation,
robot.get_link_scene_node(constraint_settings_2.link_id_a).rotation,
)
- mn.Rad(mn.math.pi / 4.0)
)
assert abs(float(angle_error)) < 0.05
# remove cube and AO should fall
assert robot.translation[1] > -2
rigid_obj_mgr.remove_object_by_id(cube_obj.object_id)
observations += simulate(sim, 1.0, produce_debug_video)
assert robot.translation[1] < -3
# hang AO from the world with P2P
constraint_settings_2.object_id_b = -1
constraint_settings_2.constraint_type = (
habitat_sim.physics.RigidConstraintType.PointToPoint
)
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 5.0, produce_debug_video)
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
assert np.allclose(global_connect_a, constraint_settings_2.pivot_b, atol=0.04)
# hang AO from world Fixed
sim.remove_rigid_constraint(constraint_id_2)
constraint_settings_2.constraint_type = (
habitat_sim.physics.RigidConstraintType.Fixed
)
# counter rotate object frames pi/4
local_target_frame_1 = mn.Quaternion.rotation(
mn.Rad(mn.math.pi / 4.0), mn.Vector3(0, 1.0, 0)
)
local_target_frame_2 = mn.Quaternion.rotation(
-mn.Rad(mn.math.pi / 4.0), mn.Vector3(0, 1.0, 0)
)
constraint_settings_2.frame_a = local_target_frame_1.to_matrix()
constraint_settings_2.frame_b = local_target_frame_2.to_matrix()
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 5.0, produce_debug_video)
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
assert np.allclose(global_connect_a, constraint_settings_2.pivot_b, atol=0.08)
# check frames align
angle_error = (
mn.math.angle(
local_target_frame_2,
robot.get_link_scene_node(constraint_settings_2.link_id_a).rotation,
)
- mn.Rad(mn.math.pi / 4.0)
)
assert abs(float(angle_error)) < 0.2
# hang the object from its base link
constraint_settings_2.link_id_a = -1
sim.remove_rigid_constraint(constraint_id_2)
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 5.0, produce_debug_video)
angle_error = mn.math.angle(local_target_frame_2, robot.rotation) - mn.Rad(
mn.math.pi / 4.0
)
# NOTE: This error is a bit high, but constraint is doing its best
assert abs(float(angle_error)) < 0.4
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
assert np.allclose(robot.translation, constraint_settings_2.pivot_b, atol=0.08)
sim.remove_rigid_constraint(constraint_id_2)
# -----------------------------------------
# test articulated constraints
# -----------------------------------------
# AO - AO P2P
robot.clear_joint_states()
robot.translation = [0.775, 0.0, 0.0]
robot.rotation = mn.Quaternion()
# add a new robot with a fixed base
robot2 = art_obj_mgr.add_articulated_object_from_urdf(
filepath=robot_file, fixed_base=True
)
robot2.translation = [-0.775, 0.0, 0.0]
robot2.create_all_motors(joint_motor_settings)
constraint_settings_2 = habitat_sim.physics.RigidConstraintSettings()
constraint_settings_2.object_id_a = robot.object_id
constraint_settings_2.link_id_a = 15 # rwrist
constraint_settings_2.pivot_a = [-0.04, 0.0, 0.0]
constraint_settings_2.object_id_b = robot2.object_id
constraint_settings_2.link_id_b = 9 # lwrist
constraint_settings_2.pivot_b = [0.04, 0.0, 0.0]
constraint_settings_2.max_impulse = 10000000
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 5.0, produce_debug_video)
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
global_connect_b = robot2.get_link_scene_node(
constraint_settings_2.link_id_b
).transformation.transform_point(constraint_settings_2.pivot_b)
assert np.allclose(global_connect_a, global_connect_b, atol=0.08)
# switch to fixed constraint
sim.remove_rigid_constraint(constraint_id_2)
constraint_settings_2.constraint_type = (
habitat_sim.physics.RigidConstraintType.Fixed
)
constraint_id_2 = sim.create_rigid_constraint(constraint_settings_2)
observations += simulate(sim, 5.0, produce_debug_video)
# check pivots are aligned
global_connect_a = robot.get_link_scene_node(
constraint_settings_2.link_id_a
).transformation.transform_point(constraint_settings_2.pivot_a)
global_connect_b = robot2.get_link_scene_node(
constraint_settings_2.link_id_b
).transformation.transform_point(constraint_settings_2.pivot_b)
assert np.allclose(global_connect_a, global_connect_b, atol=0.08)
# check frames align
angle_error = mn.math.angle(
robot.get_link_scene_node(constraint_settings_2.link_id_a).rotation,
robot2.get_link_scene_node(constraint_settings_2.link_id_b).rotation,
)
# NOTE: This error is a bit high, but constraint is doing its best
assert abs(float(angle_error)) < 0.37
# produce some test debug video
if produce_debug_video:
from habitat_sim.utils import viz_utils as vut
vut.make_video(
observations,
"color_sensor",
"color",
"test_rigid_constraints",
open_vid=True,
)
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/apartment_1.glb"),
reason="Requires the habitat-test-scenes",
)
@pytest.mark.skipif(
not habitat_sim.built_with_bullet,
reason="ArticulatedObject API requires Bullet physics.",
)
def test_bullet_collision_helper():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
obj_template_mgr = sim.get_object_template_manager()
cube_prim_handle = obj_template_mgr.get_template_handles("cube")[0]
rigid_obj_mgr = sim.get_rigid_object_manager()
cube_obj = rigid_obj_mgr.add_object_by_template_handle(cube_prim_handle)
cube_obj.translation = [2.5, 1.5, 2.5]
sim.step_physics(0.01)
assert sim.get_physics_num_active_contact_points() == 0
assert sim.get_physics_num_active_overlapping_pairs() == 0
assert (
sim.get_physics_step_collision_summary()
== "(no active collision manifolds)\n"
)
sim.step_physics(0.25)
assert sim.get_physics_num_active_contact_points() == 4
assert sim.get_physics_num_active_overlapping_pairs() == 1
assert (
sim.get_physics_step_collision_summary()
== "[RigidObject, cubeSolid, id 0] vs [Stage, subpart 0], 4 points\n"
)
sim.step_physics(3.0)
assert sim.get_physics_num_active_contact_points() == 0
assert sim.get_physics_num_active_overlapping_pairs() == 0
assert (
sim.get_physics_step_collision_summary()
== "(no active collision manifolds)\n"
)
rigid_obj_mgr.remove_object_by_id(cube_obj.object_id)
art_obj_mgr = sim.get_articulated_object_manager()
robot_file = "data/test_assets/urdf/fridge/fridge.urdf"
# parse URDF and add an ArticulatedObject to the world
robot = art_obj_mgr.add_articulated_object_from_urdf(filepath=robot_file)
assert robot.is_alive
robot.translation = mn.Vector3(2.5, 4.0, 2.5)
robot.rotation = mn.Quaternion()
sim.step_physics(0.01)
assert sim.get_physics_num_active_contact_points() == 0
assert sim.get_physics_num_active_overlapping_pairs() == 0
assert (
sim.get_physics_step_collision_summary()
== "(no active collision manifolds)\n"
)
sim.step_physics(0.75)
assert sim.get_physics_num_active_contact_points() == 2
# lots of overlapping pairs due to various fridge links near the stage
assert sim.get_physics_num_active_overlapping_pairs() == 5
assert (
sim.get_physics_step_collision_summary()
== "[URDF, fridge, link body] vs [Stage, subpart 0], 2 points\n"
)
sim.step_physics(3.0)
assert sim.get_physics_num_active_contact_points() == 0
assert sim.get_physics_num_active_overlapping_pairs() == 0
assert (
sim.get_physics_step_collision_summary()
== "(no active collision manifolds)\n"
)
| [
"noreply@github.com"
] | alstar8.noreply@github.com |
ae7f7ab412f1e0e4ea68463a31e7fa4c908c8bde | 06fa2d4eb930a72fa75f18f64e13ae5508998572 | /middlewares.py | b53e0733af567b00e64cfee3099bd847e8126b74 | [] | no_license | YusraShereen/ScrapyProj | 4a1dc232e0d82d971b99f0aad1aabc74d5152ba6 | 0643aa3da6a0c646b16fd98df5093febb2bed6cf | refs/heads/master | 2023-06-19T10:49:24.164988 | 2021-07-20T16:36:12 | 2021-07-20T16:36:12 | 387,853,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,656 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class ScrapyprojSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ScrapyprojDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"yusrashereen@gmail.com"
] | yusrashereen@gmail.com |
94066670c65fcbc3311f8710c8a8089ff1abd2e0 | 76d9f9197480e2d6be9946ffc6339e2d874168e8 | /try_except.py | a93f572f64219386212ea8f854a3321f51a6a6d4 | [] | no_license | Falrym/Python_Projects_CS_120 | dabe5f278b1450f04ff6ee5077cefda4ceba5e2f | 4b691868a28ba5e72ea721f79b21108463adf27e | refs/heads/master | 2023-02-14T18:16:30.916313 | 2021-01-02T18:14:13 | 2021-01-02T18:14:13 | 326,240,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # while True:
# try:
# numerator = int(input("numerator: "))
# denominator = int(input("denominator: "))
# result = numerator/denominator
# print(result)
# except ZeroDivisionError:
# print("you cannot divied by zero")
# try:
# number = int(input("enter a number: "))
# print(number)
# break
# except ValueError:
# print("you must enter a number")
# try:
# name = tim
# list_ = [1,2,3]
# print(name+list_)
# except TypeError:
# print("can only add list to list not string to list")
# def secret_message():
# try:
# secret = "codingisfun"
# return codingisfun
# except NameError:
# return "NameError occured. Some variable isn't defined."
# print(secret_message()) | [
"derek.hudgens@yahoo.com"
] | derek.hudgens@yahoo.com |
e7909ec595087995f080878c3c686c959150fd13 | 6da29ccfd0349366085095c98c91fc6d39eef670 | /util/mailutil.py | 766141a43084ecf90e263f68b4f76e1e1d4c7bad | [] | no_license | lwk123/Test58 | f92b052609815ab25799e99d7fd775d4ab3a1506 | eb40ac8feb0920fbb34329b4a8ff9bbf1480ca18 | refs/heads/master | 2021-09-03T13:44:45.543370 | 2018-01-09T13:56:06 | 2018-01-09T13:56:06 | 115,908,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 发送邮件
import ConfigParser
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
config = ConfigParser.ConfigParser()
config.readfp(open("../config.properties", "rb"))
session_name = 'email'
# 第三方 SMTP 服务
mail_host = config.get(session_name,"email.host") # 设置服务器
mail_user = config.get(session_name,"email.user") # 用户名
mail_pass = config.get(session_name,"email.pwd") # 口令
sender = config.get(session_name,"email.user")
##发送邮件,receivers是接受者数组,subject是邮件标题,content是邮件内容
def send_email(receivers,subject,content):
message = MIMEText(content, 'plain', 'utf-8')
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
##发送邮件,receivers是接受者数组,subject是邮件标题,content是邮件内容,from是发件人名称,to是收件人名称
def send_email_two(receivers,subject,content,fromwhere,to):
message = MIMEText(content, 'plain', 'utf-8')
message['From'] = Header(fromwhere, 'utf-8')
message['To'] = Header(to, 'utf-8')
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
| [
"1985636414@qq.com"
] | 1985636414@qq.com |
3cd264ce5cf91b24270e303157172e16a52098ba | b50f0d3b4a7cac4ad33c23654c330655ed740930 | /pythonx/python_function_expander/vendors/astroid/tests/unittest_nodes.py | b831c9bde31a19bfc925909403f92b18f68204b4 | [
"MIT"
] | permissive | ColinKennedy/vim-python-function-expander | 869c8025a948ee48ca8aac758001c4c2b8765218 | 7fbaac0bdb6316177c85284052694175a2819638 | refs/heads/master | 2020-04-04T13:04:28.664537 | 2019-10-07T00:15:57 | 2019-10-07T00:15:57 | 155,947,699 | 22 | 0 | MIT | 2018-11-07T18:36:40 | 2018-11-03T03:50:14 | Python | UTF-8 | Python | false | false | 30,227 | py | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""tests for specific behaviour of astroid nodes
"""
import os
import sys
import textwrap
import unittest
import warnings
import six
import astroid
from astroid import bases
from astroid import builder
from astroid import context as contextmod
from astroid import exceptions
from astroid import node_classes
from astroid import nodes
from astroid import parse
from astroid import util
from astroid import test_utils
from astroid import transforms
from astroid.tests import resources
abuilder = builder.AstroidBuilder()
BUILTINS = six.moves.builtins.__name__
class AsStringTest(resources.SysPathSetup, unittest.TestCase):
def test_tuple_as_string(self):
def build(string):
return abuilder.string_build(string).body[0].value
self.assertEqual(build('1,').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
self.assertEqual(build('(1, )').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
@test_utils.require_version(minver='3.0')
def test_func_signature_issue_185(self):
code = textwrap.dedent('''
def test(a, b, c=42, *, x=42, **kwargs):
print(a, b, c, args)
''')
node = parse(code)
self.assertEqual(node.as_string().strip(), code.strip())
def test_as_string_for_list_containing_uninferable(self):
node = builder.extract_node('''
def foo():
bar = [arg] * 1
''')
binop = node.body[0].value
inferred = next(binop.infer())
self.assertEqual(inferred.as_string(), '[Uninferable]')
self.assertEqual(binop.as_string(), '([arg]) * (1)')
def test_frozenset_as_string(self):
ast_nodes = builder.extract_node('''
frozenset((1, 2, 3)) #@
frozenset({1, 2, 3}) #@
frozenset([1, 2, 3,]) #@
frozenset(None) #@
frozenset(1) #@
''')
ast_nodes = [next(node.infer()) for node in ast_nodes]
self.assertEqual(ast_nodes[0].as_string(), 'frozenset((1, 2, 3))')
self.assertEqual(ast_nodes[1].as_string(), 'frozenset({1, 2, 3})')
self.assertEqual(ast_nodes[2].as_string(), 'frozenset([1, 2, 3])')
self.assertNotEqual(ast_nodes[3].as_string(), 'frozenset(None)')
self.assertNotEqual(ast_nodes[4].as_string(), 'frozenset(1)')
def test_varargs_kwargs_as_string(self):
ast = abuilder.string_build('raise_string(*args, **kwargs)').body[0]
self.assertEqual(ast.as_string(), 'raise_string(*args, **kwargs)')
def test_module_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module = resources.build_file('data/module.py', 'data.module')
with open(resources.find('data/module.py'), 'r') as fobj:
self.assertMultiLineEqual(module.as_string(), fobj.read())
def test_module2_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module2 = resources.build_file('data/module2.py', 'data.module2')
with open(resources.find('data/module2.py'), 'r') as fobj:
self.assertMultiLineEqual(module2.as_string(), fobj.read())
def test_as_string(self):
"""check as_string for python syntax >= 2.7"""
code = '''one_two = {1, 2}
b = {v: k for (k, v) in enumerate('string')}
cdd = {k for k in b}\n\n'''
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
def test_3k_as_string(self):
"""check as_string for python 3k syntax"""
code = '''print()
def function(var):
nonlocal counter
try:
hello
except NameError as nexc:
(*hell, o) = b'hello'
raise AttributeError from nexc
\n'''
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
@unittest.expectedFailure
def test_3k_annotations_and_metaclass(self):
code_annotations = textwrap.dedent('''
def function(var:int):
nonlocal counter
class Language(metaclass=Natural):
"""natural language"""
''')
ast = abuilder.string_build(code_annotations)
self.assertEqual(ast.as_string(), code_annotations)
def test_ellipsis(self):
ast = abuilder.string_build('a[...]').body[0]
self.assertEqual(ast.as_string(), 'a[...]')
def test_slices(self):
for code in ('a[0]', 'a[1:3]', 'a[:-1:step]', 'a[:,newaxis]',
'a[newaxis,:]', 'del L[::2]', 'del A[1]', 'del Br[:]'):
ast = abuilder.string_build(code).body[0]
self.assertEqual(ast.as_string(), code)
def test_slice_and_subscripts(self):
code = """a[:1] = bord[2:]
a[:1] = bord[2:]
del bree[3:d]
bord[2:]
del av[d::f], a[df:]
a[:1] = bord[2:]
del SRC[::1,newaxis,1:]
tous[vals] = 1010
del thousand[key]
del a[::2], a[:-1:step]
del Fee.form[left:]
aout.vals = miles.of_stuff
del (ccok, (name.thing, foo.attrib.value)), Fee.form[left:]
if all[1] == bord[0:]:
pass\n\n"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
class _NodeTest(unittest.TestCase):
"""test transformation of If Node"""
CODE = None
@property
def astroid(self):
try:
return self.__class__.__dict__['CODE_Astroid']
except KeyError:
module = builder.parse(self.CODE)
self.__class__.CODE_Astroid = module
return module
class IfNodeTest(_NodeTest):
"""test transformation of If Node"""
CODE = """
if 0:
print()
if True:
print()
else:
pass
if "":
print()
elif []:
raise
if 1:
print()
elif True:
print()
elif func():
pass
else:
raise
"""
def test_if_elif_else_node(self):
"""test transformation for If node"""
self.assertEqual(len(self.astroid.body), 4)
for stmt in self.astroid.body:
self.assertIsInstance(stmt, nodes.If)
self.assertFalse(self.astroid.body[0].orelse) # simple If
self.assertIsInstance(self.astroid.body[1].orelse[0], nodes.Pass) # If / else
self.assertIsInstance(self.astroid.body[2].orelse[0], nodes.If) # If / elif
self.assertIsInstance(self.astroid.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.block_range(1), (0, 22))
self.assertEqual(self.astroid.block_range(10), (0, 22)) # XXX (10, 22) ?
self.assertEqual(self.astroid.body[1].block_range(5), (5, 6))
self.assertEqual(self.astroid.body[1].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(7), (7, 8))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(8), (8, 8))
class TryExceptNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
except IOError:
pass
except UnicodeError:
print()
else:
print()
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 8))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 8))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[0].block_range(7), (7, 7))
self.assertEqual(self.astroid.body[0].block_range(8), (8, 8))
class TryFinallyNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 4))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
class TryExceptFinallyNodeTest(_NodeTest):
CODE = """
try:
print('pouet')
except Exception:
print ('oops')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 6))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
@unittest.skipIf(six.PY3, "Python 2 specific test.")
class TryExcept2xNodeTest(_NodeTest):
CODE = """
try:
hello
except AttributeError, (retval, desc):
pass
"""
def test_tuple_attribute(self):
handler = self.astroid.body[0].handlers[0]
self.assertIsInstance(handler.name, nodes.Tuple)
class ImportNodeTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self):
super(ImportNodeTest, self).setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
def test_import_self_resolve(self):
myos = next(self.module2.igetattr('myos'))
self.assertTrue(isinstance(myos, nodes.Module), myos)
self.assertEqual(myos.name, 'os')
self.assertEqual(myos.qname(), 'os')
self.assertEqual(myos.pytype(), '%s.module' % BUILTINS)
def test_from_self_resolve(self):
namenode = next(self.module.igetattr('NameNode'))
self.assertTrue(isinstance(namenode, nodes.ClassDef), namenode)
self.assertEqual(namenode.root().name, 'astroid.node_classes')
self.assertEqual(namenode.qname(), 'astroid.node_classes.Name')
self.assertEqual(namenode.pytype(), '%s.type' % BUILTINS)
abspath = next(self.module2.igetattr('abspath'))
self.assertTrue(isinstance(abspath, nodes.FunctionDef), abspath)
self.assertEqual(abspath.root().name, 'os.path')
self.assertEqual(abspath.qname(), 'os.path.abspath')
self.assertEqual(abspath.pytype(), '%s.function' % BUILTINS)
def test_real_name(self):
from_ = self.module['NameNode']
self.assertEqual(from_.real_name('NameNode'), 'Name')
imp_ = self.module['os']
self.assertEqual(imp_.real_name('os'), 'os')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'os.path')
imp_ = self.module['NameNode']
self.assertEqual(imp_.real_name('NameNode'), 'Name')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'Name')
imp_ = self.module2['YO']
self.assertEqual(imp_.real_name('YO'), 'YO')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'data')
def test_as_string(self):
ast = self.module['modutils']
self.assertEqual(ast.as_string(), "from astroid import modutils")
ast = self.module['NameNode']
self.assertEqual(ast.as_string(), "from astroid.node_classes import Name as NameNode")
ast = self.module['os']
self.assertEqual(ast.as_string(), "import os.path")
code = """from . import here
from .. import door
from .store import bread
from ..cave import wine\n\n"""
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
def test_bad_import_inference(self):
# Explication of bug
'''When we import PickleError from nonexistent, a call to the infer
method of this From node will be made by unpack_infer.
inference.infer_from will try to import this module, which will fail and
raise a InferenceException (by mixins.do_import_module). The infer_name
will catch this exception and yield and Uninferable instead.
'''
code = '''
try:
from pickle import PickleError
except ImportError:
from nonexistent import PickleError
try:
pass
except PickleError:
pass
'''
module = builder.parse(code)
handler_type = module.body[1].handlers[0].type
excs = list(node_classes.unpack_infer(handler_type))
# The number of returned object can differ on Python 2
# and Python 3. In one version, an additional item will
# be returned, from the _pickle module, which is not
# present in the other version.
self.assertIsInstance(excs[0], nodes.ClassDef)
self.assertEqual(excs[0].name, 'PickleError')
self.assertIs(excs[-1], util.Uninferable)
def test_absolute_import(self):
module = resources.build_file('data/absimport.py')
ctx = contextmod.InferenceContext()
# will fail if absolute import failed
ctx.lookupname = 'message'
next(module['message'].infer(ctx))
ctx.lookupname = 'email'
m = next(module['email'].infer(ctx))
self.assertFalse(m.file.startswith(os.path.join('data', 'email.py')))
def test_more_absolute_import(self):
module = resources.build_file('data/module1abs/__init__.py', 'data.module1abs')
self.assertIn('sys', module.locals)
class CmpNodeTest(unittest.TestCase):
def test_as_string(self):
ast = abuilder.string_build("a == 2").body[0]
self.assertEqual(ast.as_string(), "a == 2")
class ConstNodeTest(unittest.TestCase):
def _test(self, value):
# pylint: disable=no-member; union type in const_factory, this shouldn't happen
node = nodes.const_factory(value)
self.assertIsInstance(node._proxied, nodes.ClassDef)
self.assertEqual(node._proxied.name, value.__class__.__name__)
self.assertIs(node.value, value)
self.assertTrue(node._proxied.parent)
self.assertEqual(node._proxied.root().name, value.__class__.__module__)
def test_none(self):
self._test(None)
def test_bool(self):
self._test(True)
def test_int(self):
self._test(1)
def test_float(self):
self._test(1.0)
def test_complex(self):
self._test(1.0j)
def test_str(self):
self._test('a')
def test_unicode(self):
self._test(u'a')
class NameNodeTest(unittest.TestCase):
def test_assign_to_True(self):
"""test that True and False assignments don't crash"""
code = """
True = False
def hello(False):
pass
del True
"""
if sys.version_info >= (3, 0):
with self.assertRaises(exceptions.AstroidBuildingError):
builder.parse(code)
else:
ast = builder.parse(code)
assign_true = ast['True']
self.assertIsInstance(assign_true, nodes.AssignName)
self.assertEqual(assign_true.name, "True")
del_true = ast.body[2].targets[0]
self.assertIsInstance(del_true, nodes.DelName)
self.assertEqual(del_true.name, "True")
class AnnAssignNodeTest(unittest.TestCase):
@test_utils.require_version(minver='3.6')
def test_primitive(self):
code = textwrap.dedent("""
test: int = 5
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "int")
self.assertEqual(assign.value.value, 5)
self.assertEqual(assign.simple, 1)
@test_utils.require_version(minver='3.6')
def test_primitive_without_initial_value(self):
code = textwrap.dedent("""
test: str
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "str")
self.assertEqual(assign.value, None)
@test_utils.require_version(minver='3.6')
def test_complex(self):
code = textwrap.dedent("""
test: Dict[List[str]] = {}
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertIsInstance(assign.annotation, astroid.Subscript)
self.assertIsInstance(assign.value, astroid.Dict)
@test_utils.require_version(minver='3.6')
def test_as_string(self):
code = textwrap.dedent("""
print()
test: int = 5
test2: str
test3: List[Dict[(str, str)]] = []
""")
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
class ArgumentsNodeTC(unittest.TestCase):
def test_linenumbering(self):
ast = builder.parse('''
def func(a,
b): pass
x = lambda x: None
''')
self.assertEqual(ast['func'].args.fromlineno, 2)
self.assertFalse(ast['func'].args.is_statement)
xlambda = next(ast['x'].infer())
self.assertEqual(xlambda.args.fromlineno, 4)
self.assertEqual(xlambda.args.tolineno, 4)
self.assertFalse(xlambda.args.is_statement)
if sys.version_info < (3, 0):
self.assertEqual(ast['func'].args.tolineno, 3)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
@test_utils.require_version(minver='3.0')
def test_kwoargs(self):
ast = builder.parse('''
def func(*, x):
pass
''')
args = ast['func'].args
self.assertTrue(args.is_argument('x'))
class UnboundMethodNodeTest(unittest.TestCase):
def test_no_super_getattr(self):
# This is a test for issue
# https://bitbucket.org/logilab/astroid/issue/91, which tests
# that UnboundMethod doesn't call super when doing .getattr.
ast = builder.parse('''
class A(object):
def test(self):
pass
meth = A.test
''')
node = next(ast['meth'].infer())
with self.assertRaises(exceptions.AttributeInferenceError):
node.getattr('__missssing__')
name = node.getattr('__name__')[0]
self.assertIsInstance(name, nodes.Const)
self.assertEqual(name.value, 'test')
class BoundMethodNodeTest(unittest.TestCase):
def test_is_property(self):
ast = builder.parse('''
import abc
def cached_property():
# Not a real decorator, but we don't care
pass
def reify():
# Same as cached_property
pass
def lazy_property():
pass
def lazyproperty():
pass
def lazy(): pass
class A(object):
@property
def builtin_property(self):
return 42
@abc.abstractproperty
def abc_property(self):
return 42
@cached_property
def cached_property(self): return 42
@reify
def reified(self): return 42
@lazy_property
def lazy_prop(self): return 42
@lazyproperty
def lazyprop(self): return 42
def not_prop(self): pass
@lazy
def decorated_with_lazy(self): return 42
cls = A()
builtin_property = cls.builtin_property
abc_property = cls.abc_property
cached_p = cls.cached_property
reified = cls.reified
not_prop = cls.not_prop
lazy_prop = cls.lazy_prop
lazyprop = cls.lazyprop
decorated_with_lazy = cls.decorated_with_lazy
''')
for prop in ('builtin_property', 'abc_property', 'cached_p', 'reified',
'lazy_prop', 'lazyprop', 'decorated_with_lazy'):
inferred = next(ast[prop].infer())
self.assertIsInstance(inferred, nodes.Const, prop)
self.assertEqual(inferred.value, 42, prop)
inferred = next(ast['not_prop'].infer())
self.assertIsInstance(inferred, bases.BoundMethod)
class AliasesTest(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_aliases(self):
def test_from(node):
node.names = node.names + [('absolute_import', None)]
return node
def test_class(node):
node.name = 'Bar'
return node
def test_function(node):
node.name = 'another_test'
return node
def test_callfunc(node):
if node.func.name == 'Foo':
node.func.name = 'Bar'
return node
return None
def test_assname(node):
if node.name == 'foo':
return nodes.AssignName('bar', node.lineno, node.col_offset,
node.parent)
return None
def test_assattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
return None
def test_getattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
return None
def test_genexpr(node):
if node.elt.value == 1:
node.elt = nodes.Const(2, node.lineno, node.col_offset,
node.parent)
return node
return None
self.transformer.register_transform(nodes.From, test_from)
self.transformer.register_transform(nodes.Class, test_class)
self.transformer.register_transform(nodes.Function, test_function)
self.transformer.register_transform(nodes.CallFunc, test_callfunc)
self.transformer.register_transform(nodes.AssName, test_assname)
self.transformer.register_transform(nodes.AssAttr, test_assattr)
self.transformer.register_transform(nodes.Getattr, test_getattr)
self.transformer.register_transform(nodes.GenExpr, test_genexpr)
string = '''
from __future__ import print_function
class Foo: pass
def test(a): return a
foo = Foo()
foo.a = test(42)
foo.a
(1 for _ in range(0, 42))
'''
module = self.parse_transform(string)
self.assertEqual(len(module.body[0].names), 2)
self.assertIsInstance(module.body[0], nodes.ImportFrom)
self.assertEqual(module.body[1].name, 'Bar')
self.assertIsInstance(module.body[1], nodes.ClassDef)
self.assertEqual(module.body[2].name, 'another_test')
self.assertIsInstance(module.body[2], nodes.FunctionDef)
self.assertEqual(module.body[3].targets[0].name, 'bar')
self.assertIsInstance(module.body[3].targets[0], nodes.AssignName)
self.assertEqual(module.body[3].value.func.name, 'Bar')
self.assertIsInstance(module.body[3].value, nodes.Call)
self.assertEqual(module.body[4].targets[0].attrname, 'b')
self.assertIsInstance(module.body[4].targets[0], nodes.AssignAttr)
self.assertIsInstance(module.body[5], nodes.Expr)
self.assertEqual(module.body[5].value.attrname, 'b')
self.assertIsInstance(module.body[5].value, nodes.Attribute)
self.assertEqual(module.body[6].value.elt.value, 2)
self.assertIsInstance(module.body[6].value, nodes.GeneratorExp)
@unittest.skipIf(six.PY3, "Python 3 doesn't have Repr nodes.")
def test_repr(self):
def test_backquote(node):
node.value.name = 'bar'
return node
self.transformer.register_transform(nodes.Backquote, test_backquote)
module = self.parse_transform('`foo`')
self.assertEqual(module.body[0].value.value.name, 'bar')
self.assertIsInstance(module.body[0].value, nodes.Repr)
class DeprecationWarningsTest(unittest.TestCase):
def test_asstype_warnings(self):
string = '''
class C: pass
c = C()
with warnings.catch_warnings(record=True) as w:
pass
'''
module = parse(string)
filter_stmts_mixin = module.body[0]
assign_type_mixin = module.body[1].targets[0]
parent_assign_type_mixin = module.body[2]
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
filter_stmts_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
parent_assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
def test_isinstance_warnings(self):
msg_format = ("%r is deprecated and slated for removal in astroid "
"2.0, use %r instead")
for cls in (nodes.Discard, nodes.Backquote, nodes.AssName,
nodes.AssAttr, nodes.Getattr, nodes.CallFunc, nodes.From):
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
isinstance(42, cls)
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
actual_msg = msg_format % (cls.__class__.__name__, cls.__wrapped__.__name__)
self.assertEqual(str(w[0].message), actual_msg)
@test_utils.require_version('3.5')
class Python35AsyncTest(unittest.TestCase):
def test_async_await_keywords(self):
async_def, async_for, async_with, await_node = builder.extract_node('''
async def func(): #@
async for i in range(10): #@
f = __(await i)
async with test(): #@
pass
''')
self.assertIsInstance(async_def, nodes.AsyncFunctionDef)
self.assertIsInstance(async_for, nodes.AsyncFor)
self.assertIsInstance(async_with, nodes.AsyncWith)
self.assertIsInstance(await_node, nodes.Await)
self.assertIsInstance(await_node.value, nodes.Name)
def _test_await_async_as_string(self, code):
ast_node = parse(code)
self.assertEqual(ast_node.as_string().strip(), code.strip())
def test_await_as_string(self):
code = textwrap.dedent('''
async def function():
await 42
''')
self._test_await_async_as_string(code)
def test_asyncwith_as_string(self):
code = textwrap.dedent('''
async def function():
async with (42):
pass
''')
self._test_await_async_as_string(code)
def test_asyncfor_as_string(self):
code = textwrap.dedent('''
async def function():
async for i in range(10):
await 42
''')
self._test_await_async_as_string(code)
class ContextTest(unittest.TestCase):
def test_subscript_load(self):
node = builder.extract_node('f[1]')
self.assertIs(node.ctx, astroid.Load)
def test_subscript_del(self):
node = builder.extract_node('del f[1]')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_subscript_store(self):
node = builder.extract_node('f[1] = 2')
subscript = node.targets[0]
self.assertIs(subscript.ctx, astroid.Store)
def test_list_load(self):
node = builder.extract_node('[]')
self.assertIs(node.ctx, astroid.Load)
def test_list_del(self):
node = builder.extract_node('del []')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_list_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('[0] = 2')
def test_tuple_load(self):
node = builder.extract_node('(1, )')
self.assertIs(node.ctx, astroid.Load)
def test_tuple_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('(1, ) = 3')
@test_utils.require_version(minver='3.5')
def test_starred_load(self):
node = builder.extract_node('a = *b')
starred = node.value
self.assertIs(starred.ctx, astroid.Load)
@test_utils.require_version(minver='3.0')
def test_starred_store(self):
node = builder.extract_node('a, *b = 1, 2')
starred = node.targets[0].elts[1]
self.assertIs(starred.ctx, astroid.Store)
if __name__ == '__main__':
unittest.main()
| [
"colinvfx@gmail.com"
] | colinvfx@gmail.com |
8b8c2212cf4eaa65d485fcf99a858b2ec72c07c2 | 310dbcf30d39634452251375609b6139ae2aa764 | /backend/plugin.py | d534d968de492ffa1bf7fd6e29596617b5415b6d | [] | no_license | jwcxz/lpctrl | 15b63c63f8738423f7bce1130dce753f252a6f62 | 2fe42e5ae643511ac071bfb1d6e1a74cf672ca8f | refs/heads/master | 2016-09-06T18:01:31.857758 | 2013-05-04T06:31:26 | 2013-05-04T06:31:26 | 3,004,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | import threading
from backend.constants import *
SLIDER_VALS = [ 0x30, 0x30,
0x21, 0x21,
0x12, 0x12,
0x03, 0x03 ];
class Plugin(threading.Thread):
enabled = False;
def __init__(self, dev, args):
threading.Thread.__init__(self);
# what to do on initialization of the plugin
self.dev = dev;
self.args = args;
self.enabled = False;
self.dbuf = LP_DBC_ENB;
self.top = [0]*8;
def run(self):
# called to start the thread
self.enabled = True
def stop(self):
# called when the thread is stopped
self.enabled = False
def showgrid(self, grid, side=[0]*8):
for row in grid:
i = 0;
while i < 8:
self.dev.write_short(LP_ADDR_QWRT, row[i], row[i+1]);
i+=2;
i = 0;
while i < 8:
self.dev.write_short(LP_ADDR_QWRT, side[i], side[i+1]);
i+=2;
i = 0;
while i < 8:
self.dev.write_short(LP_ADDR_QWRT, self.top[i], self.top[i+1]);
i+=2;
self.dev.write_short(LP_ADDR_CTRL, LP_REGS_DBUF, self.dbuf);
self.dbuf = self.dbuf ^ ( LP_DBC_B1U | LP_DBC_B1D );
def handle_input(self, pkt):
pass;
def addr_to_button(self, addr):
x = addr & 0xF;
y = addr >> 4;
return (x,y);
def button_to_addr(self, button):
addr = (button[1] << 4) | (button[0]);
return addr;
def set(self, button, color):
val = LP_BTN_CLR | LP_BTN_CPY | color;
addr = self.button_to_addr(button);
self.dev.write_short(LP_ADDR_SETB, addr, val);
def push(self, button, color, on, color_off=0):
if on: v = color;
else: v = color_off;
self.set(button, v);
def slider(self, x, y):
# slider from the bottom
for yy in xrange(8):
if yy < y:
self.set((x, yy), 0);
else:
self.set((x, yy), SLIDER_VALS[y]);
| [
"jwc@jwcxz.com"
] | jwc@jwcxz.com |
cc93138d65edba1c5b1b60dce4310eef220e71e8 | 63599fe431ed0e82177a30136fed789eb7a06200 | /posts/models.py | 58dedbf27e09335dd67b1bfab3f58c2998a4bb76 | [] | no_license | yutanakachan/agefree2 | 5a22cfcc40024db4914316573c543a7186dead06 | 686afbc7a64edd02ff9ee039f8b19ad171e9bd33 | refs/heads/master | 2020-04-09T23:17:34.612213 | 2018-12-07T09:26:45 | 2018-12-07T09:26:45 | 160,652,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=100)
published = models.DateTimeField()
image = models.ImageField(upload_to='media/')
body = models.TextField()
def __str__(self):
return self.title
def summary(self):
return self.body[:10]
| [
"y.nakagawa1107@gmail.com"
] | y.nakagawa1107@gmail.com |
c0c262f31ed504fc6eda8edb69f734c6ecd1b643 | 4d3485ac082da067a52758908dc18f0d3dfdec12 | /src/mutualModelling/model.py | 7f7bee917dbeac47c242df2ef68a04b3f8649d43 | [
"ISC"
] | permissive | alexis-jacq/Mutual_Modelling | f8497b1da9b9da536dfec1927c8b44027fced7fa | 2e34cf2b6282341dffdd518584fcbc63df46df68 | refs/heads/master | 2021-01-17T20:28:50.584728 | 2016-11-09T17:19:45 | 2016-11-09T17:19:45 | 56,058,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,420 | py | #!/usr/bin/env python
# coding: utf-8
"""
library of functions/algorithms to build, update and compaire models of different agents built by a robot.
"""
import numpy as np
import random
import operator
from bidict import bidict
import copy
""" GLOBAL PARAMETERS """
# hebbian learning:
#==================
FIRE_TIME = 10 # time a event is activated
ETA1 = 0.9 # for EMA of the correlation between intensity of signals
# if delayed hebbian: GAMMA = 0.1 # time discount for learning
# reinforcement learning:
#========================
THETA1 = 10 # chose action (exponent for softmax pulling
THETA2 = 20 # chose perception
ETA2 = 0.8
DISCOUNT = 0.99 # discount for the impact of futur on the temporal diff algo
""" functions for random pulling"""
#----------------------------------
def random_pull_dict(distribution): # dist. is a dictionnary key->value
if distribution:
proba = np.array(distribution.values())*1.
proba = proba/np.sum(proba)
return np.random.choice(distribution.keys(),1,p=proba)[0]
else:
return None
def softmax(distribution): # dist. is a list (np.array) of values
if list(distribution):
proba = np.exp(THETA1*distribution)
proba = proba/np.sum(proba)
return np.random.choice(len(distribution),1,p=proba)[0]
else:
return None
""" object Model """
#-------------------
class Model:
""" an object 'Model' representing hebbian-inspired network that encode dynamics between events representing concepts learned by an agent."""
def __init__(self,name, network=None, activateds=None, modifieds=None):
global FIRE_TIME
# name:
self.name = name
# events encoding events:
#-----------------------
self.intensities = {} # list of event's intensity between -1 and 1 (intensity or truth)
self.nb_events = 0
self.activateds = [] # list of activated events, the first is the most recently activated (contains by default the event encoding the empty concept)
self.old_intensities = []
self.modifieds = set() # for each input from exterior (percept) event intensities are modified once
# it makes the differrence between the flow of reasoning and real perception
self.event_number = bidict() # each event is numeroted {event_id <--> event_number}
# hebbian learning (world's causality):
#--------------------------------------
self.counts = np.zeros([0,0,0,2,2]) # count the close activations for hebbian learning
# reinforcement learning (action):
#---------------------------------
self.action = None
self.expected = 0
self.rewards = np.zeros([0,2]) # reward associated with goals (0 if no objective)
self.action_number = bidict() # set of events encoding actions
self.nb_actions = 0
# for TD learning with fuzzy states:
self.Q = np.zeros([0,0,2]) # indirect reward value learned by association ~ like QLearning with TD
self.V = np.zeros([0,0,2]) # for actor-critic method
self.n = np.zeros([0,0,2])
self.matter = np.ones([0,2]) # importance of events (based on V)
self.R = np.zeros([0,2]) # estimation of direct reward
# IRL and understandable behavior:
#---------------------------------
self.EA = -np.ones([0,2])
self.ES = np.zeros([0,2])
self.EI = np.zeros([0,2])
""" functions for creating/updating/using models """
#--------------------------------------------------------------
def add_activated(self, event):
if self.activateds:
if len(self.activateds)==FIRE_TIME:
for i in range(len(self.activateds)-1):
self.activateds[i] = self.activateds[i+1]
self.activateds[-1] = event
else:
self.activateds.append(event)
else:
self.activateds.append(event)
def add_intensity(self, intensity):
if self.old_intensities:
if len(self.old_intensities)==FIRE_TIME:
for i in range(len(self.old_intensities)-1):
self.old_intensities[i] = self.old_intensities[i+1]
self.old_intensities[-1] = intensity
else:
self.old_intensities.append(intensity)
else:
self.old_intensities.append(intensity)
def add_perceived(self, val):
if self.perceiveds:
if len(self.perceiveds)==FIRE_TIME:
for i in range(len(self.perceiveds)-1):
self.perceiveds[i] = self.perceiveds[i+1]
self.perceiveds[-1] = val
else:
self.perceiveds.append(val)
else:
self.perceiveds.append(val)
def add_events(self, events_id):
if isinstance(events_id, list) or isinstance(events_id, tuple):
number = self.nb_events
for event_id in events_id:
self.intensities.setdefault(event_id,0)
if event_id not in self.event_number:
self.event_number[event_id] = number
number += 1
new_counts = np.zeros([self.nb_actions,number, number,2,2])
new_counts[:,:self.nb_events,:self.nb_events,:,:] = self.counts
self.counts = new_counts
#self.Q[last_state,action,int(last_intensity>0)] += 0.1*TD
new_matter = np.ones([number,2])
new_matter[:self.nb_events,:] = self.matter
self.matter = new_matter
new_R = np.zeros([number,2])
new_R[:self.nb_events,:] = self.R
self.R = new_R
new_EA = -np.ones([number,2])
new_EA[:self.nb_events,:] = self.EA
self.EA = new_EA
new_ES = np.zeros([number,2])
new_ES[:self.nb_events,:] = self.ES
self.ES = new_ES
new_EI = np.zeros([number,2])
new_EI[:self.nb_events,:] = self.EI
self.EI = new_EI
new_rewards = np.zeros([number,2])
new_rewards[:self.nb_events,:] = self.rewards
self.rewards = new_rewards
new_Q = np.zeros([number, self.nb_actions,2])
new_Q[:self.nb_events,:self.nb_actions,:] = self.Q
self.Q = new_Q
new_V = np.zeros([number, self.nb_actions,2])
new_V[:self.nb_events,:self.nb_actions,:] = self.V
self.V = new_V
new_n = np.zeros([number, self.nb_actions,2])
new_n[:self.nb_events,:self.nb_actions,:] = self.n
self.n = new_n
self.nb_events = number
def add_actions(self, events_id):
if isinstance(events_id, list) or isinstance(events_id, tuple):
self.add_events(events_id)
number = self.nb_actions
for event_id in events_id:
if event_id not in self.action_number:
self.action_number[event_id] = number
number += 1
new_counts = np.zeros([number, self.nb_events, self.nb_events,2,2])
new_counts[:self.nb_actions,:,:,:,:] = self.counts
self.counts = new_counts
new_Q = np.zeros([self.nb_events, number,2])
new_Q[:,:self.nb_actions,:] = self.Q
self.Q = new_Q
new_V = np.zeros([self.nb_events, number,2])
new_V[:,:self.nb_actions,:] = self.V
self.V = new_V
new_n = np.zeros([self.nb_events, number,2])
new_n[:,:self.nb_actions,:] = self.n
self.n = new_n
self.nb_actions = number
def set_rewards(self, goals):
for goal in goals:
event_id = goal[0]
value = goal[1]
reward = goal[2]
if event_id not in self.event_number:
self.add_events([event_id])
if value>1:
value=1.
if value<-1:
value=-1.
self.rewards[self.event_number[event_id],int(value>0)] = reward
def set_instincts(self, obs_actions): # ~ a-priori knowledge
for obs_action in obs_actions:
event_id = obs_action[0]
value = obs_action[1]
action = obs_action[2]
if event_id not in self.event_number:
self.add_events([event_id])
if action not in self.action_number:
self.add_actions([action])
if value>1:
value=1.
if value<-1:
value=-1.
event_num = self.event_number[event_id]
action_num = self.action_number[action]
self.Q[event_num,action_num,int(value>0)] = 1.
# if EMA of TD:
self.V[event_num,action_num,int(value>0)] = 1.
def think_new_event(self, elligibles, new_intensities):
last_intensity = self.old_intensities[-1]
last_event = self.activateds[-1]
num_last_event = self.event_number[last_event]
num_action = self.action_number[self.action]
# max over positive/negative new_intensities:
noise = np.random.rand(len(self.counts[0,0,:,0,0]))/1000.
"""
# rational :
max_pos = np.max(self.counts[num_action,num_last_event,:,int(last_intensity>0),1] + noise)
max_neg = np.max(self.counts[num_action,num_last_event,:,int(last_intensity>0),0] + noise)
if max_pos>=max_neg:
new_event_num = np.argmax(self.counts[num_action,num_last_event,:,int(last_intensity>0),1] + noise)
new_intensity = np.max(self.counts[num_action,num_last_event,:,int(last_intensity>0),1] + noise)
else:
new_event_num = np.argmax(self.counts[num_action,num_last_event,:,int(last_intensity>0),0] + noise)
new_intensity = -np.max(self.counts[num_action,num_last_event,:,int(last_intensity>0),0] + noise)
"""
# hopefull :
dream_act_pos = np.max(self.counts[:,:,np.argmax(self.R[:,1]),1,1],0)
dream_act_neg = np.max(self.counts[:,:,np.argmax(self.R[:,1]),0,1],0)
if max(dream_act_pos)>=max(dream_act_neg):
new_event_num = np.argmax(dream_act_pos + noise)
new_intensity = 1.
else:
new_event_num = np.argmax(dream_act_neg + noise)
new_intensity = -1
new_event = self.event_number.inv[new_event_num]
elligibles.setdefault(new_event,0)
elligibles[new_event] = np.exp(THETA2*self.matter[new_event_num,int(new_intensity>0)]*abs(new_intensity))
new_intensities.setdefault(new_event,0)
new_intensities[new_event] = new_intensity
return elligibles, new_intensities
def perceive_new_event(self,percepts,total_reward,elligibles):
for percept in percepts:
if not (percept in self.event_number):
self.add_events([percept[0]])
percept_id = percept[0]
percept_val = percept[1]
percept_num = self.event_number[percept_id]
self.intensities[percept_id] = percept_val
elligibles.setdefault(percept_id,0)
elligibles[percept_id] = np.exp(THETA2*self.matter[self.event_number[percept_id],int(percept_val>0)])
if self.action and self.old_intensities:
total_reward += self.rewards[percept_num,int(percept_val>0)]*np.abs(self.old_intensities[-1])
father = self.activateds[-1]
son = percept_id
intensity_father = self.old_intensities[-1]
intensity_son = percept_val
action = self.action
self.hebbian_learning(father,son,action,intensity_father,intensity_son)
return total_reward, elligibles
def update(self, possible_actions=None, percepts=None, explore=True, intrinsic=0):
# FIND THE NEXT ACTIVATED:
elligibles = {}
new_intensities = {}
# REASONING:
#===========
if self.old_intensities and not percepts:
elligibles, new_intensities = self.think_new_event(elligibles, new_intensities)
# PERCEPTION:
#============
# could add an action "force_reasoning" where the robot doesnot do the perception loop
# like someone closing eyes in order to reason
tot_reward = intrinsic
if percepts:
tot_reward, elligibles = self.perceive_new_event(percepts, tot_reward, elligibles)
# UPDATES:
#=========
# stochastic election of incoming active event:
#print percepts
next_activated = random_pull_dict(elligibles)
"""if percepts :
print "obs : "+str(next_activated)
else:
print "think : "+str(next_activated)"""
if tot_reward>1:
tot_reward=1.
if tot_reward<-1:
tot_reward=-1.
# new intensities:
for event in new_intensities:
if event not in self.modifieds:
self.intensities[event] = new_intensities[event]
self.modifieds.add(event)
# TODO: loop on the previous percept in past to make reinforcement with delay
# action learning:
if self.action and percepts:
self.reinforcement_learning(next_activated,tot_reward)
# new activated event
if next_activated:
self.add_activated(next_activated)
self.add_intensity(self.intensities[next_activated])
# DECISION:
#==========
if possible_actions:
return self.decision_making(possible_actions,explore)
else:
return self.decision_making(None,explore)
def hebbian_learning(self, event1, event2, action, I1, I2):
num_event1 = self.event_number[event1]
num_event2 = self.event_number[event2]
num_act = self.action_number[action]
s = np.sum(self.counts[num_act,num_event1,:,int(I1>0),:])
v = self.counts[num_act,num_event1,num_event2,int(I1>0),int(I2>0)]
self.counts[num_act,num_event1,:,int(I1>0),:] *= s/(s+1.)
self.counts[num_act,num_event1,num_event2,int(I1>0),int(I2>0)] = (s*v+1.)/(s+1.)
def decision_making(self, possible_actions=None, explore=True):
state = 0
I = 0
if self.activateds:
state = self.event_number[self.activateds[-1]]
I = self.old_intensities[-1]
# TODO exploration based on convergence/difficulty to reach a state
values = self.Q[state,:,int(I>0)]*np.abs(I)+np.random.rand(len(self.Q[state,:,int(I>0)]))/1000.
new_values = -np.Infinity*np.ones(len(values))
if possible_actions:
indices = []
for action in possible_actions:
indices.append(self.action_number[action])
new_values[np.array(indices)]=values[np.array(indices)]
else:
new_values = values
if explore or self.EA[state,int(I>0)]==-1:
choice = softmax(new_values)
else:
# understandable behavior:
expected_state = int(self.ES[state,int(I>0)])
expected_intensity = int(self.EI[state,int(I>0)]>0)
if self.R[expected_state,expected_intensity]>np.random.rand():
choice = self.EA[state,int(I>0)]
else:
new_values[int(self.EA[state,int(I>0)])]=-np.Infinity
#choice = np.argmax(new_values)
choice = softmax(new_values)
# EMA:
#self.expected = np.max(self.V[state,:,int(I>0)]*np.abs(I))
self.expected = self.V[state,int(choice),int(I>0)]*np.abs(I)
# Q:
# self.expected = self.Q[state,choice,int(I>0)]*np.abs(I)
self.action = self.action_number.inv[choice]
return self.action
def reinforcement_learning(self,new_activated,reward):
if self.activateds and self.action and new_activated:
# last state:
action = self.action_number[self.action]
last_state = self.event_number[self.activateds[-1]]
last_intensity = self.old_intensities[-1]
# new state:
new_state = self.event_number[new_activated]
new_intensity = self.intensities[new_activated]
# classic Q:
new_values = self.Q[new_state,:,int(new_intensity>0)]*np.abs(new_intensity)
"""
# expect EMA of TD:
new_values = self.V[new_state,:,int(new_intensity>0)]*np.abs(new_intensity)
"""
reach = np.max(new_values)
# TD learning:
TD = reward + DISCOUNT*reach - self.expected
n = self.n[last_state,action,int(last_intensity>0)]+1.
# classic Qlearning
self.Q[last_state,action,int(last_intensity>0)] = (n*self.Q[last_state,action,int(last_intensity>0)] + TD)/(n+1.)
self.n[last_state,action,int(last_intensity>0)] += 1.
self.matter[new_state,int(new_intensity>0)] = (n*(self.matter[new_state,int(new_intensity>0)]) + abs(TD))/(n+1.)
self.R[new_state,int(new_intensity>0)] = (n*(self.R[new_state,int(new_intensity>0)]) + reward)/(n+1.)
# EMA of TD
self.V[last_state,action,int(last_intensity>0)] = ETA2*self.V[last_state,action,int(last_intensity>0)] + (1-ETA2)*TD
# understandable behavior
self.EA[last_state,int(last_intensity>0)] = action
self.ES[last_state,int(last_intensity>0)] = new_state
self.EI[last_state,int(last_intensity>0)] = int(new_intensity>0)
"""
print "last "+str(self.activateds[-1])+" "+str(last_intensity)
print "act "+ str(self.action)
print "new "+str(new_activated)+" "+str(new_intensity)
print "rew "+str(TD)
print "======================"
"""
def update_inverse(self, possible_actions=None, percepts=None, last_action=None):
# FIND THE NEXT ACTIVATED:
elligibles = {}
new_intensities = {}
if last_action:
if self.action_number:
if not last_action in set(self.action_number):
self.add_actions([last_action])
else:
self.add_actions([last_action])
self.action = last_action
# REASONING:
#===========
if self.old_intensities and not percepts:
elligibles, new_intensities = self.think_new_event(elligibles, new_intensities)
# PERCEPTION:
#============
# could add an action "force_reasoning" where the robot doesnot do the perception loop
# like someone closing eyes in order to reason
tot_reward = 0
if percepts:
tot_reward, elligibles = self.perceive_new_event(percepts, tot_reward, elligibles)
# UPDATES:
#=========
# stochastic election of incoming active event:
next_activated = random_pull_dict(elligibles)
if len(self.activateds)>0:
last_activated = self.activateds[-1]
last_intensity = self.old_intensities[-1]
self.inverse_learning(last_activated,last_intensity)
if self.action:
self.reinforcement_learning(next_activated,tot_reward)
# new intensities:
for event in new_intensities:
if event not in self.modifieds:
self.intensities[event] = new_intensities[event]
self.modifieds.add(event)
# new activated event
if next_activated:
self.add_activated(next_activated)
self.add_intensity(self.intensities[next_activated])
# DECISION:
#==========
if possible_actions:
return tot_reward#self.decision(possible_actions)
else:
return tot_reward#self.decision()
def inverse_learning(self,last_activated,last_intensity):
if self.activateds and self.action:
# action:
action = self.action_number[self.action]
last_state = self.event_number[last_activated]
n = self.n[last_state,action,int(last_intensity>0)]
s = np.sum(self.n[last_state,:,int(last_intensity>0)])
expected_state = int(self.ES[last_state,int(last_intensity>0)])
expected_intensity = self.EI[last_state,int(last_intensity>0)]
if action == self.EA[last_state,int(last_intensity>0)]:
self.rewards[expected_state,int(expected_intensity>0)] = 0.9*self.rewards[expected_state,int(expected_intensity>0)] + 0.1
elif self.EA[last_state,int(last_intensity>0)]>=0:
self.rewards[expected_state,int(expected_intensity>0)] = 0.9*self.rewards[expected_state,int(expected_intensity>0)] - 0.1
# static functions (of multiple models):
#---------------------------------------
def diff_reward(model1, model2):
tot_dist = 0
event_diff = {}
for event_id in set(model1.event_number).intersection(set(model2.event_number)):
event_num1 = model1.event_number[event_id]
event_num2 = model2.event_number[event_id]
# this distance function is arbitrary, could be L2, L3 etc...
dist = np.sum(np.abs(model1.R[event_num1,:]-model2.rewards[event_num2,:]))#* np.abs(model1.R[event_num1,:]))#*matter
event_diff.setdefault(event_id,dist)
tot_dist += dist
return event_diff,tot_dist
def diff_knowledge(model1,model2):
tot_dist = 0
event_diff = {}
for event_id in set(model1.event_number).intersection(set(model2.event_number)):
event_num1 = model1.event_number[event_id]
I1 = model1.intensities[event_id]
I2 = model2.intensities[event_id]
# this distance function is arbitrary, could be L2, L3 etc...
dist = np.sum(np.abs(I1-I2))*model1.matter[event_num1,I1]
event_diff.setdefault(event_id,dist)
tot_dist += dist
return event_diff,tot_dist
| [
"alexis.jacq@gmail.com"
] | alexis.jacq@gmail.com |
11598bb19f096e2d77de2f92ae6033ae43579ded | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-HealthKit/PyObjCTest/test_hkcdadocumentsample.py | 5bdeb58bb45061daf1ce60c8c7399d94213d8863 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 724 | py | from PyObjCTools.TestSupport import TestCase
import HealthKit
class TestHKCDADocumentSample(TestCase):
def test_constants13_0(self):
self.assertIsInstance(HealthKit.HKPredicateKeyPathCDATitle, str)
self.assertIsInstance(HealthKit.HKPredicateKeyPathCDAPatientName, str)
self.assertIsInstance(HealthKit.HKPredicateKeyPathCDAAuthorName, str)
self.assertIsInstance(HealthKit.HKPredicateKeyPathCDACustodianName, str)
self.assertIsInstance(HealthKit.HKDetailedCDAValidationErrorKey, str)
def test_methods(self):
self.assertArgIsOut(
HealthKit.HKCDADocumentSample.CDADocumentSampleWithData_startDate_endDate_metadata_validationError_,
4,
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
86b110ef4c6db36d268997477fa8b0dfd534379e | 5b347340fbcf0cddfa78a214977629000f57b2d3 | /pg2d.py | 3d385843863e03b0452c71bfabe60dc69827be7c | [] | no_license | dougshidong/joukowski_high_order_mesh | 425ef6cb7f5e749eff65832c74d6490a0b54863b | 48bbc1be3f9dab7c5e2d90c38703a0510e0fc512 | refs/heads/main | 2022-12-30T19:15:45.816962 | 2020-10-22T22:17:34 | 2020-10-22T22:17:34 | 305,817,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | from __future__ import division
def writePG2D(filename_base, ref, Q, TriFlag, E, V, nLE, NC, nWK, nWB, nr):
#=========================#
# Write out the grid file #
#=========================#
assert Q == 1
filename = filename_base + ('_tri' if TriFlag else '_quad') + '_ref'+str(ref)+ '_Q'+str(Q)+'.pg2d'
print 'Writing ', filename
f = open(filename, 'w')
nelem = E.shape[0];
neli = int((NC.shape[0]-1)/Q)
nelj = int((NC.shape[1]-1)/Q)
nAf = int((nLE-1)/Q)
nFFi = int((nWB-1)/Q)
nFFo = int((nr-1)/Q)
fac = 2 if TriFlag else 1
nedges = (neli+1)*(nelj+1)*fac + (nelem if TriFlag else 0) - int((nWK-1)/Q)
nbedges = nAf + nFFi + 2*nFFo
ntri = fac*nelem + nbedges
nnode = V.shape[0];
f.write(str(nnode) + ' ' + str(nedges) + ' ' + str(ntri) + ' ' + str(nbedges) + '\n') #dim nNodes negrp nbfgrp
#----------------#
# Boundary edges #
#----------------#
nbc = 1
# Airfoil
for i in range(nAf):
f.write(str(NC[nWK-1+Q*i,0]) + ' ' + str(NC[nWK-1+Q*(i+1),0]) + ' ' + int(nWK-1)/Q+i*fac+1 + ' ' + ntri+nbc + '\n')
nbc+=1
# Farfield inflow
for i in range(nFFi):
f.write(str(NC[Q*i,nr-1]) + ' ' + str(NC[Q*(i+1),nr-1]) + ' ' + (neli-1)*nelj*fac+i*fac+1 + ' ' + ntri+nbc + '\n')
nbc+=1
# Farfield Outflow
for i in range(nFFo):
f.write(str(NC[0,Q*i]) + ' ' + str(NC[0,Q*(i+1)]) + ' ' + ntri+nbc + '\n')
nbc+=1
for i in range(nFFo):
f.write(str(NC[nWB-1,Q*i]) + ' ' + str(NC[nWB-1,Q*(i+1)]) + ' ' + ntri+nbc + '\n')
nbc+=1
#----------------#
# Interior Edges #
#----------------#
#Wake edges
j = 0
for i in range(int((nWK-1)/Q)):
cell1 = i+1
cell2 = neli - i+1
f.write(str(NC[Q*i,Q*j]) + ' ' + str(NC[Q*i,Q*(j+1)]) + ' ' + str(cell1) + ' ' + str(cell2) + '\n')
# i-constant edges
for j in range(nelj):
for i in range(neli-1):
cell1 = i + neli*j + 1
cell2 = i+1 + neli*j + 1
f.write(str(NC[Q*i,Q*j]) + ' ' + str(NC[Q*i,Q*(j+1)]) + ' ' + str(cell1) + ' ' + str(cell2) + '\n')
# j-constant edges
for j in range(nelj-1):
for i in range(neli):
cell1 = i + neli*j + 1
cell2 = i + neli*(j+1) + 1
f.write(str(NC[Q*i,Q*j]) + ' ' + str(NC[Q*(i+1),Q*j]) + ' ' + str(cell1) + ' ' + str(cell2) + '\n')
#----------#
# Vertices #
#----------#
floatformat = "{:3.16e}"
f.write('ndoes')
for i in range(nnode):
f.write(floatformat.format(V[i,0]) + ' ' + floatformat.format(V[i,1]) + '\n')
f.close()
return
| [
"doug.shidong@gmail.com"
] | doug.shidong@gmail.com |
9aaec30fa6669d03839d6805ec0886ea96c88281 | 8ce8799ec8a89241a756f31ec000f22109206423 | /node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/node_modules/bson-ext/build/config.gypi | dfb16ca53d6be0487c72e02ad6ce896d4214a473 | [
"Apache-2.0",
"MIT"
] | permissive | shaunymca/stackrank | 661d2d615fcf505087ceaec33f1b602520a55e49 | 60afc8ccdc00a0dd9c6b8539e9671f42aed930ac | refs/heads/master | 2021-08-28T13:09:41.239515 | 2021-08-17T14:11:19 | 2021-08-17T14:11:19 | 45,931,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/smcavinney/.node-gyp/0.10.22",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/smcavinney/Documents/My Documents/Fantasy/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/node_modules/bson-ext/build/Release/bson.node",
"module_name": "bson",
"module_path": "/Users/smcavinney/Documents/My Documents/Fantasy/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/node_modules/bson-ext/build/Release",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"userignorefile": "/Users/smcavinney/.npmignore",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "null",
"long": "",
"ignore": "",
"npat": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"versions": "",
"message": "%s",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"coverage": "",
"pre": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/smcavinney/.npmrc",
"init_module": "/Users/smcavinney/.npm-init.js",
"npaturl": "http://npat.npmjs.org/",
"user": "502",
"node_version": "v0.10.22",
"editor": "vi",
"save": "true",
"tag": "latest",
"global": "",
"username": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "null",
"searchsort": "name",
"rebuild_bundle": "true",
"yes": "",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/smcavinney/.npm",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.22 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"init_version": "0.0.0",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/hw/ms8vj2f92935v4_bvh40g2q00000gn/T/",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"smcavinney@rjmetrics.com"
] | smcavinney@rjmetrics.com |
1148c0910b91cc03e7d17be5b7ff92f615332024 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03253/s408919725.py | 3b3fd0ae0d573c7704b3717b620af69d006ea589 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | import collections
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
U = 2*10**6
MOD = 10**9+7
fact = [1]*(U+1)
fact_inv = [1]*(U+1)
for i in range(1,U+1):
fact[i] = (fact[i-1]*i)%MOD
fact_inv[U] = pow(fact[U],MOD-2,MOD)
for i in range(U,0,-1):
fact_inv[i-1] = (fact_inv[i]*i)%MOD
def comb(n,k):
if k < 0 or k > n:
return 0
z = fact[n]
z *= fact_inv[k]
z %= MOD
z *= fact_inv[n-k]
z %= MOD
return z
n, m = map(int, input().split())
c = collections.Counter(prime_factorize(m))
ans = 1
for i in c.values():
ans *= comb(i+n-1, n-1)
ans %= MOD
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2e629d9cd7127b697ab8a0b883f54987cf0882c9 | d0dc2ddfe1073ceee5d178a48964403f748fdb57 | /Wuziqi_Main.py | a15b928435602a297de12804ed5d58e7dc793704 | [] | no_license | hrb518/AlphaWuziqi_Zero | ebca1084d563d60a5647626eecd402d0cf356ad2 | e5e02cb16b64d8340312fde48308111590535579 | refs/heads/master | 2023-01-02T21:51:37.859080 | 2020-10-29T12:30:13 | 2020-10-29T12:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from Wuziqi_Board import Board
from Wuziqi_Game import Game
import time
from Wuziqi_constant import board_hight
from Wuziqi_constant import board_width
from Wuziqi_constant import n_in_num
from Wuziqi_constant import simu_time
from Wuziqi_constant import simu_num
if __name__ == '__main__':
print('棋盘大小为',board_width,'*',board_hight,'!')
print('率先完成',n_in_num,'连子的一方获胜!')
print('玩家下棋的方式eg: i,j 。其中: i的范围:0~',board_width,', j的范围:0~',board_hight,'\n')
print('训练选项: 1:训练 2:不训练')
print('模式选项: 1:AI对战 2:人机对战 3:人人对战\n')
while (1):
Islearning = input('请选择是否开始训练:')
if Islearning == '1':
Is_learning = True
print('开启训练模式!\n')
break
elif Islearning == '2':
Is_learning = False
print('关闭训练模式!\n')
break
else:
print('模式选择错误! 请重新选择!')
pass
time.sleep(1)
while(1):
model = input('请输入模式选项:')
if model == '1':
print('当前模式为AI对战!')
break
elif model == '2':
print('当前模式为人机对战!')
break
elif model == '3':
print('当前模式为人人对战')
Is_learning = False # 模式3不可以训练
break
else:
print('模式选择错误! 请重新选择!')
pass
pass
Wuziqi = Board(board_width,board_width,n_in_num)
game = Game(Wuziqi , n_in_row = n_in_num , simu_time = simu_time , simu_num = simu_num)
current_index = 0
total_VS = 5
for current_index in range(total_VS):
print('\n第',current_index+1,'局开始!')
game.start(model,current_index,Is_learning)
time.sleep(5) | [
"1084895390@qq.com"
] | 1084895390@qq.com |
2fb19187f21a900af8f5d973bdcdbb7f9f16512e | c67486d820387afa7aae87ef1eb1e4861b0bd570 | /src/utils.py | dd3862b361a00516e174a221e2b42a7bb3aa94ca | [] | no_license | juvekaradheesh/event-entity-coref-with-spanbert | fd1777a58f811cc80bd2fc4ade6a5217ee79eed9 | 491a4cfe1912318a9d1851c43161b0ef7e41ca2c | refs/heads/master | 2022-12-25T00:25:32.300584 | 2020-10-07T18:44:53 | 2020-10-07T18:44:53 | 298,850,898 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,397 | py | import os
import xmltodict
import json
import numpy as np
import torch
def process_ecb_plus(data_path, mention_type_start):
all_sentences = []
all_tokens = []
batch_indices = [0]
mentions = []
gold_starts = []
gold_ends = []
clusters = []
batch_index = 0
for dir_ in os.listdir(os.path.join(data_path, 'ECB+')):
if dir_ in ['15', '17']:
continue
dir_path = os.path.join(data_path, 'ECB+', str(dir_))
for file in os.listdir(dir_path):
is_plus = True if 'plus' in file else False
file_path = os.path.join(dir_path, file)
sentences, tokens, mens, gold_s, gold_e, clusts = process_ecb_xml(file_path, is_plus=is_plus, mention_type_start=mention_type_start)
all_sentences.extend(sentences)
all_tokens.append(tokens)
batch_index +=len(sentences)
batch_indices.append(batch_index)
mentions.append(mens)
clusters.append(clusts)
gold_starts.append(gold_s)
gold_ends.append(gold_e)
return all_sentences, all_tokens, batch_indices, mentions, gold_starts, gold_ends, clusters
def process_ecb_xml(file_path, is_plus, mention_type_start):
f = open(file_path, 'r')
obj = xmltodict.parse(f.read())
sentences = []
tokens = []
sentence = []
sent_tokens = []
curr_sentence = 0
for item in obj['Document']['token']:
if is_plus and int(item['@sentence']) == 0:
continue
if int(item['@sentence']) != curr_sentence:
curr_sentence = int(item['@sentence'])
sentences.append(sentence)
tokens.append(sent_tokens)
sentence = []
sent_tokens = []
if '#text' in item:
sentence.append(item['#text'])
sent_tokens.append(int(item['@t_id']))
sentences.append(sentence)
tokens.append(sent_tokens)
mentions = []
gold_starts = []
gold_ends = []
relation_mentions = []
clusters = []
for mention_type in obj['Document']['Markables']:
if mention_type.startswith(mention_type_start):
if '@m_id' in obj['Document']['Markables'][mention_type]:
if 'token_anchor' in obj['Document']['Markables'][mention_type]:
mentions.append(int(obj['Document']['Markables'][mention_type]['@m_id']))
if '@t_id' in obj['Document']['Markables'][mention_type]['token_anchor']:
gold_starts.append(int(obj['Document']['Markables'][mention_type]['token_anchor']['@t_id']))
gold_ends.append(int(obj['Document']['Markables'][mention_type]['token_anchor']['@t_id']))
else:
gold_starts.append(int(obj['Document']['Markables'][mention_type]['token_anchor'][0]['@t_id']))
gold_ends.append(int(obj['Document']['Markables'][mention_type]['token_anchor'][-1]['@t_id']))
else:
relation_mentions.append(int(obj['Document']['Markables'][mention_type]['@m_id']))
else:
for item in obj['Document']['Markables'][mention_type]:
if 'token_anchor' in item:
mentions.append(int(item['@m_id']))
if '@t_id' in item['token_anchor']:
gold_starts.append(int(item['token_anchor']['@t_id']))
gold_ends.append(int(item['token_anchor']['@t_id']))
else:
gold_starts.append(int(item['token_anchor'][0]['@t_id']))
gold_ends.append(int(item['token_anchor'][-1]['@t_id']))
else:
relation_mentions.append(int(item['@m_id']))
for item in obj['Document']['Relations']['CROSS_DOC_COREF']:
if int(item['target']['@m_id']) in relation_mentions:
cluster = []
if '@m_id' in item['source']:
cluster.append(item['source']['@m_id'])
else:
for mention in item['source']:
cluster.append(int(mention['@m_id']))
clusters.append(cluster)
return sentences, tokens, mentions, gold_starts, gold_ends, clusters
def fix_tokens_with_offsets(tokens, offset_mapping, batch_indices):
all_tokens = []
split_offsets = [offset_mapping[batch_indices[i]: batch_indices[i+1]] for i in range(len(batch_indices)-1)]
for z, (tok, off_map) in enumerate(zip(tokens, split_offsets)):
encoded_tokens = []
for doc_tokens, doc_offset in zip(tok, off_map):
# create an empty array of -100
doc_enc_labels = np.ones(len(doc_offset),dtype=int) * -100
arr_offset = np.array(doc_offset)
# set labels whose first offset position is 0 and the second is not 0
doc_enc_labels[(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)] = doc_tokens
encoded_tokens.append(doc_enc_labels.tolist())
for i, (doc_tokens, doc_offset) in enumerate(zip(tok, off_map)):
for j in range(len(doc_offset)):
if doc_offset[j][0] != 0:
encoded_tokens[i][j] = encoded_tokens[i][j-1]
all_tokens.append(encoded_tokens)
return all_tokens
def create_unmasked_sentence_map(offset_mapping, batch_indices):
complete_sentence_map = []
split_offsets = [offset_mapping[batch_indices[i]: batch_indices[i+1]] for i in range(len(batch_indices)-1)]
for off_map in split_offsets:
sentence_map = []
for i, off in enumerate(off_map):
sentence_map.append([i]*len(off))
complete_sentence_map.append(sentence_map)
return complete_sentence_map
def process_gold_mentions(all_tokens, all_gold_starts, all_gold_ends, all_mentions, attention_mask, batch_indices):
attention_masks = [attention_mask[batch_indices[i]: batch_indices[i+1]] for i in range(len(batch_indices)-1)]
new_tokens = []
full_gold_starts = []
full_gold_ends = []
new_mentions = []
for tokens, gold_starts, gold_ends, mentions, attention_mask in zip(all_tokens, all_gold_starts, all_gold_ends, all_mentions, attention_masks):
# Flatten Tokens and remove padding
tokens = torch.Tensor(tokens)
gold_starts = torch.Tensor(gold_starts)
gold_ends = torch.Tensor(gold_ends)
mentions = torch.Tensor(mentions)
attention_mask = torch.tensor(attention_mask)
tokens = torch.masked_select(tokens, attention_mask>0)
sort_indices = mentions.sort().indices
mentions = mentions[sort_indices]
gold_starts = gold_starts[sort_indices]
gold_ends = gold_ends[sort_indices]
new_gold_starts = []
for item in gold_starts:
new_gold_starts.append(torch.nonzero(tokens==item)[0][0])
new_gold_ends = []
for item in gold_ends:
new_gold_ends.append(torch.nonzero(tokens==item)[-1][0])
if len(new_gold_starts) > 0:
gold_starts = torch.stack(new_gold_starts)
gold_ends = torch.stack(new_gold_ends)
else:
gold_starts = torch.Tensor([])
gold_ends = torch.Tensor([])
new_tokens.append(tokens)
full_gold_starts.append(gold_starts)
full_gold_ends.append(gold_ends)
new_mentions.append(mentions)
return new_tokens, full_gold_starts, full_gold_ends, new_mentions
def get_cluster_ids(all_mentions, all_clusters):
all_cluster_ids = []
for mentions,clusters in zip(all_mentions, all_clusters):
cluster_ids = []
for mention in mentions:
mention_appeared=False
for i, cluster in enumerate(clusters):
if mention in cluster:
mention_appeared = True
cluster_ids.append(i+1)
break
if not mention_appeared:
cluster_ids.append(0)
# cluster_ids.append(torch.nonzero(clusters==mention)[0][0]+1)
all_cluster_ids.append(torch.Tensor(cluster_ids))
return all_cluster_ids | [
"juvekaradheesh@gmail.com"
] | juvekaradheesh@gmail.com |
adf6026f41cbd09fc1199cc5147629f4a195d096 | bee54127ab5a184f3e1f2fda633c63c1aa98d59b | /brain/fuzzy.py | 95f74cc9320d4105a41f4e2c85aed679a878c208 | [] | no_license | nttrungmt/pyro_v5.0.0 | 7de2312619c237c0ef66dadab59e6e171429a153 | a2fcc0fe37b0655028d8a11c8c30174cea12e6cf | refs/heads/master | 2020-06-08T17:53:37.735120 | 2019-06-22T20:32:25 | 2019-06-22T20:32:25 | 193,276,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,950 | py | """
Fuzzy Logic Base Class
E. Jucovy, 2005
based on fuzzy.py by D.S. Blank, 2001
"""
__author__ = "E. Jucovy, Douglas Blank <dblank@brynmawr.edu>"
__version__ = "$Revision: 1.9 $"
from math import exp
class FuzzyOperators:
def Union(self,a,b):
pass
def Intersection(self,a,b):
pass
def Complement(self,a):
pass
def __str__(self):
return self.__class__.__name__
class StandardFuzzyOperators(FuzzyOperators):
def Union(self,a,b):
return max(a,b)
def Intersection(self,a,b):
return min(a,b)
def Complement(self,a):
return 1.0 - a
class FuzzyError(TypeError):
def __init__(self, st=""):
TypeError.__init__(self, st)
class FuzzyValue:
"""
Fuzzy value class
Contains a floating-point value between 0 and 1
"""
def __init__(self, val, ops = StandardFuzzyOperators()):
"""
Initialize the fuzzy value
If val is less than zero or greater than one, limit val to those bounds
"""
self.Ops = ops
if val < 0:
self.Value = 0.0
elif val > 1:
self.Value = 1.0
else:
self.Value = float(val)
def __and__(self, other):
"""
Return the intersection of self and other
"""
return FuzzyValue(self.Ops.Intersection(self.Value, float(other)), self.Ops)
def __or__(self, other):
"""
Return the union of self and other
"""
return FuzzyValue(self.Ops.Union(self.Value, float(other)), self.Ops)
def __neg__(self):
"""
Return the complement of self
"""
return FuzzyValue(self.Ops.Complement(self.Value), self.Ops)
__invert__ = __neg__
def __add__(self, other):
return FuzzyValue(self.Value + float(other), self.Ops)
__radd__ = __add__
def __sub__(self, other):
return FuzzyValue(self.Value - float(other), self.Ops)
def __rsub__(self, other):
return FuzzyValue(float(other) - self.Value, self.Ops)
def __mul__(self, other):
return FuzzyValue(self.Value * float(other), self.Ops)
__rmul__ = __mul__
def __div__(self, other):
return FuzzyValue(self.Value / float(other), self.Ops)
def __rdiv__(self, other):
return FuzzyValue(float(other) / self.Value, self.Ops)
def __cmp__(self, other):
return self.Value - float(other)
def __float__(self):
return self.Value
defuzzify = __float__
def __str__(self):
return "<Fuzzy value " + str(self.Value) + ">"
# def alphaCut(self, alpha):
# return self.Value >= alpha
class FuzzyClassifier:
"""
Fuzzy classifier class with a membership function and parameters.
Membership function can be set on initialization or with
setMembershipFunction(function). The membership function should
return a value between 0 and 1 (values outside that range will be
automatically set to either 0 or 1).
All relevant parameters used by the membership function can be set
on initialization or by setParams()
"""
def __init__(self, func=None, fName=None, ops=StandardFuzzyOperators(), **kwargs):
"""
Initialize the FuzzyClassifier
First argument is a reference to the membership function
Second argument is the name of the membership function
Remaining arguments are parameter names and values
"""
self.myParams = {}
if func.__class__ is FuzzyClassifier:
self.Function = func.Function
self.myParams = func.myParams
elif not func is None:
self.Function = func
else:
def Halfway():
return 0.5
self.Function = Halfway
if func.__class__ is FuzzyClassifier:
self.FunctionName = func.FunctionName
elif not fName is None:
self.FunctionName = fName
else:
self.FunctionName = self.Function.__name__
self.__name__ = "FuzzyClassifier:%s" % self.FunctionName
self.Ops = ops
for i in kwargs:
self.myParams[i] = kwargs[i]
def __call__(self, *args):
"""
Apply the fuzzy classifier to a set of values
Return a FuzzyValue with value Function(args)
"""
# get params and function arguments
mydict = {}
args = list(args)
funcargs = list(self.Function.func_code.co_varnames
[:self.Function.func_code.co_argcount])
for i in funcargs:
try:
mydict[i] = self.myParams[i]
except KeyError:
try:
mydict[i] = args.pop(0)
except IndexError:
raise TypeError("Too few arguments to FuzzyClassifier %s()" \
% (self.FunctionName))
x = len(mydict) - self.Function.func_code.co_argcount
if x == -1:
raise FuzzyError("1 undefined parameter to FuzzyClassifier %s" \
% self.FunctionName)
elif x < 0:
raise FuzzyError("%d undefined parameters to FuzzyClassifier %s" \
% (-x, self.FunctionName))
return FuzzyValue(self.Function(**mydict), self.Ops)
def safesetParams(self, **kwargs):
"""
Set one or more of the classifier's parameters
without overwriting any predefined parameters.
If a parameter is already defined safesetParams
will not overwrite it.
"""
keys = kwargs.keys()
for key in keys:
if not self.myParams.has_key(key):
self.myParams[key] = kwargs[key]
def setParams(self, **kwargs):
"""
Set one or more of the classifier's parameters
without deleting predefined parameters; but will
overwrite parameters.
"""
keys = kwargs.keys()
for key in keys:
self.myParams[key] = kwargs[key]
def resetParams(self, **kwargs):
"""
Set all the classifier's parameters at once and
delete all parameters that might already exist
"""
self.myParams = kwargs
def getParam(self, *names):
"""
Return one or more of the classifier's parameters
"""
retlist = []
for name in names:
try:
retlist.append(self.myParams[name])
except KeyError:
retlist.append(None)
return retlist
def setFunction(self, func, fName = None):
"""
Set the classifier's membership function
First (required) parameter is the membership function itself.
Second (optional) parameter is a name for the function, recommended,
e.g., for lambda functions; if this is not set then the function's
actual name will be used
"""
if not fName is None:
self.FunctionName = fName
elif func.__class__ is FuzzyClassifier:
self.FunctionName = func.FunctionName
else:
self.FunctionName = func.__name__
if func.__class__ is FuzzyClassifier:
self.Function = func.Function
self.safesetParams(**func.myParams)
else:
self.Function = func
self.__name__ = "FuzzyClassifier:%s" % self.FunctionName
def __str__(self):
return "FuzzyClassifier instance with\n\tmembership function " + \
"%s\n\tparameters %s\n\toperator set %s" \
% (self.FunctionName, self.myParams, self.Ops)
def __nonzero__(self):
return True
def __rshift__(self, val):
"""
Return a FuzzyValue classified under a linear rising
membership function whose parameters are decided by the
current FuzzyClassifier's parameters
Implemented for backwards compatibility
"""
keys = self.myParams.keys()
if len(keys) > 2:
print "This may not do what you expect."
a = self.myParams[keys[0]]
b = self.myParams[keys[1]]
if a > b:
aFC = RisingFuzzy(b,a)
else:
aFC = RisingFuzzy(a,b)
return aFC(val)
def __lshift__(self, val):
"""
Return a FuzzyValue classified under a linear falling
membership function whose parameters are decided by the
current FuzzyClassifier's parameters
Implemented for backwards compatibility
"""
keys = self.myParams.keys()
if len(keys) > 2:
print "This may not do what you expect."
a = self.myParams[keys[0]]
b = self.myParams[keys[1]]
if a > b:
aFC = FallingFuzzy(b,a)
else:
aFC = FallingFuzzy(a,b)
return aFC(val)
def Fuzzy(a,b):
"""
Create a new FuzzyClassifier with two parameters and
default membership function
Implemented for backwards compatibility
"""
return FuzzyClassifier(a=a,b=b)
def RisingFuzzy(a,b):
"""
Create a new FuzzyClassifier with a linear rising membership
function and parameters a,b
a: lower bound, mu(a) = 0.0
b: upper bound, mu(b) = 1.0
"""
def __upMF(x0,a,b):
"""
A linear rising membership function
"""
if x0 < a:
return 0.0
elif x0 > b:
return 1.0
else:
return float(x0 - a) / (b - a)
return FuzzyClassifier(__upMF, "Rising", a=a, b=b)
def FallingFuzzy(a,b):
"""
Create a new FuzzyClassifier with a linear falling membership
function and parameters a,b
a: lower bound, mu(a) = 1.0
b: upper bound, mu(b) = 0.0
"""
def __downMF(x0,a,b):
"""
A linear falling membership function
"""
if x0 < a:
return 1.0
elif x0 > b:
return 0.0
else:
return float(b - x0) / (b - a)
return FuzzyClassifier(__downMF, "Falling", a=a, b=b)
def TriangleFuzzy(a,b,c):
"""
Create a new FuzzyClassifier with a linear triangular membership
function and parameters a,b,c
a: lower bound, mu(a) = 0.0
b: midpoint, mu(b) = 1.0
c: upper bound, mu(c) = 0.0
"""
def __triMF(x0,a,b,c):
"""
A linear triangular membership function
"""
if x0 < a:
return 0.0
elif x0 < b:
return float(x0 - a) / (b - a)
elif x0 < c:
return float(c - x0) / (c - b)
else:
return 0.0
return FuzzyClassifier(__triMF, "Triangle", a=a, b=b, c=c)
def TrapezoidFuzzy(a,b,c,d):
"""
Create a new FuzzyClassifier with a linear trapezoidal membership
function and parameters a,b,c,d
a: lower bound, mu(a) = 0.0
b: start of top, mu(b) = 1.0
c: end of top, mu(c) = 1.0
d: upper bound, mu(d) = 0.0
"""
def __trapMF(x0,a,b,c,d):
"""
A linear trapezoidal membership function
"""
if x0 < a:
return 0.0
elif x0 < b:
return float(x0 - a) / (b - a)
elif x0 < c:
return 1.0
elif x0 < d:
return float(d - x0) / (d - c)
else:
return 0.0
return FuzzyClassifier(__trapMF, "Trapezoid", a=a, b=b, c=c, d=d)
def GaussianFuzzy(c,s):
"""
Create a new FuzzyClassifier with a gaussian membership function
and parameters c,s
c: center (mean), mu(c) = 1.0
s: spread (standard deviation)
"""
def __GaussMF(x0,c,s):
"""
A Gaussian membership function
"""
return exp(pow((float(x0) - c) / s, 2.0) / -2.0)
return FuzzyClassifier(__GaussMF, "Gaussian", c=c, s=s)
# needs comment
def BellFuzzy(a,b,c):
"""
All values will effectively be mapped to either 0, 0.5, or 1.
(Not quite, since it's continuous, but close.)
"""
def __BellMF(x,a,b,c):
return 1.0 / (1.0 + pow((x - c) / a, 2.0*b))
return FuzzyClassifier(__BellMF, "BellCurve", a=a,b=b,c=c)
# NOT YET
def SigmoidFuzzy(a,c):
"""
Create a new FuzzyClassifier with a sigmoid membership function
and parameters a,c
I wouldn't use this yet if I were you.
"""
def __SigmoidMF():
"""
I wouldn't use this yet if I were you
"""
return 1.0 / (1.0 + exp(-a * (x - c)))
return FuzzyClassifier(__SigmoidMF, "Sigmoid", a=a, c=c)
# NOT YET TESTED
def LRFuzzy(f,g,c,a,b):
"""
Create a new FuzzyClassifier with a left-right membership
function and parameters f,g,c,a,b
f: left-side function (or FuzzyClassifier)
g: right-side function (or FuzzyClassifier)
c: switching point
"""
def __LRMF():
"""
I wouldn't use this yet if I were you
"""
if x <= c:
return f((c - x) / a)
return g((x - c) / b)
return FuzzyClassifier(__LRMF, "Left"+f.__name__+"Right"+g.__name__,
f=f,g=g,c=c,a=a,b=b)
if __name__ == '__main__': # some tests
f = BellFuzzy(10,20,30)
for i in range(100):
print str(i) + ", " + str(float(f(i)))
| [
"nttrungmt@gmail.com"
] | nttrungmt@gmail.com |
ec5ea35c4ffead8f683b6f064e189c45313204ce | 0c911f90939d18c5f50f8ae116e636d7e8d7602e | /DJANGO/SIMPLE_SOCIAL_CLONE/simplesocial/groups/migrations/0001_initial.py | 79daa94e01d1000840aa65fba3299bda343516e4 | [] | no_license | riyadRafiq/Python-and-Django-Full-Stack-Web-Developer-Bootcamp-Projects | 98dcc3cc82e3db250d66d97430dbb210fcce394b | a995519bfdaaf9e65460a0e8e94c749853d149ba | refs/heads/master | 2021-02-08T01:55:22.904980 | 2020-11-20T17:57:32 | 2020-11-20T17:57:32 | 244,095,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2020-03-07 19:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(blank=True, default='')),
('description_html', models.TextField(blank=True, default='', editable=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_groups', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='groups.GroupMember', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='groupmember',
unique_together=set([('group', 'user')]),
),
]
| [
"riyadrafiq@gmail.com"
] | riyadrafiq@gmail.com |
0ab3c451a8741658f7109ca97f6f9867bfef9e66 | 2ef84b5d5ffd723b72473a483a1b91b50ef3c260 | /se/Config/app.py | 5ade6b5f1be4c37b6de0708877c0c722b1007033 | [] | no_license | kuldeep-rawani/lead_management_app | 003e962a9724bc253a07d18266acfa7dffe33690 | c8bd6b18f5c733df9e544690e4aabacc87e6e632 | refs/heads/master | 2021-08-15T01:53:42.286645 | 2017-02-22T07:43:14 | 2017-02-22T07:43:14 | 111,065,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from se.Models.Lead import lead_management
from se.Models.Attachment import lead_attachment
if __name__ == '__main__':
app.run()
| [
"kuldeep@sourceeasy.com"
] | kuldeep@sourceeasy.com |
95cad932e8c7577777a588f35369823d4b232749 | 9632b2cbdf25bb3b788db392ae3ea503008909c6 | /node.py | fef7629274b4bfdaacbe2e6a081649f10df2296b | [] | no_license | riking/trafficsimgame-js | 0f610a3dc44c857aec23161c69955795cf80255d | 06254bba10e35d287d791651e51b1acad13132a4 | refs/heads/master | 2021-01-18T14:02:59.283992 | 2012-04-06T20:59:34 | 2012-04-06T20:59:34 | 2,811,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,388 | py | import math
import vectors
import world
import cars
class Node:
# self.connections = {node: road, node: road}
def __init__(self, position, connect=None, manager=None):
self.pos = vectors.Vector(position)
self.connections = {}
if connect:
for n in connect:
self.addConnection(n)
if manager:
self.imanager = manager
else:
self.imanager = ZeroWaitIntersection(self)
def __eq__(self, other):
if isinstance(other, Node):
return self.pos == other.pos
if isinstance(other, vectors.Vector):
return self.pos == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, Node):
return self.pos != other.pos
if isinstance(other, vectors.Vector):
return self.pos != other
return NotImplemented
def __hash__(self):
return self.pos.__hash__()
def __str__(self):
return str(self.pos)
def __repr__(self):
return "Node(%s,%s)" % (str(self.pos), len(self.connections))
def getPos(self):
return self.pos
def addConnection(self, other):
r = world.Road(self, other)
if other in self.connections.keys():
return
self.connections[other] = r
other.addConnectionCB(self, r)
def addConnectionCB(self, other, road):
self.connections[other] = road
def getRoad(self, other):
if other in self.connections:
return self.connections[other]
return None
def tick(self, map, rand):
self.onTick(map, rand)
for r in self.connections.values():
r.tick(self, map, rand)
def onTick(self, map, rand):
pass # Reserved for subclasses
def carArrived(self, car, road, map):
if self == car.destination():
print("car arrived")
print(car)
car.cleanup()
del car
return # hopefully this will kill the car
self.imanager.carArrived(self, car, road, map)
def dist(self,other):
return math.sqrt((self.pos[0] - other.pos[0]) ** 2 + (self.pos[1] - other.pos[1]) ** 2)
def coordstr(self):
return str(self.pos)
def fullDescription(self):
cstr = ""
for c,r in self.connections.items():
cstr += c.coordstr()
return "Node at %s \nConnected to: %s" % (self.pos, cstr)
def cleanup(self): # on map exit. clean up circular references
try:
for n, r in self.connections.items():
r.cleanup()
del self.connections
except (NameError, AttributeError) as exc:
pass # dump all exceptions
class CarGenNode (Node):
def __init__(self, position, cartype = None, delay=20):
Node.__init__(self, position)
self.delay = delay
if not cartype:
cartype = cars.baseCarType
self.car = cartype
self.tickcount = 1
def onTick(self, map, rand):
self.tickcount -= 1
if self.tickcount <= 0:
self.tickcount = self.delay
dest = rand.choice(map.nodelist)
if dest == self:
return
c = self.car(self, map, rand, dest)
if not c.routeInit(self, map, rand):
del c
return
self.carArrived(c, None, map)
# Lane Data format
# lanedata = dict ( Road, *1* )
# len(lanedata) = connection number
# *1* = list [*2*]
#len(*1*) = number of lanes
# *2* = binary flags
# *2* & (1>>lanedata.index(*1*))
#
#############
# A
# /|\
# --/ \---
#B --- ---- D
# --\ /---
# \\|//
# ||||
# ||||
# ||||
# C
# { A : [1>>B & 1>>C & 1>>D], B:[1>>A,1>>D,1>>C], C:[1>>B,1>>B & 1>>A,1>>D,1>>D], D:[1>>C,1>>B,1>>A] }
class Intersection:
def __init__(self, node, lanedata = None):
self.parent = node
if lanedata:
pass # TODO: Unpack lane data
def tick(self, node, map, rand):
self.onTick(node, map, rand)
def onTick(self, node, map, rand):
pass # Reserved for subclasses
def laneSort(self, car, road, map):
raise NotImplementedError()
def carArrived(self, parent, car, road, map):
raise NotImplementedError()
def passCar(self, parent, car, roadfrom, map):
nn = car.getNextNode(roadfrom, parent, map)
if nn not in node.connections.keys():
nn = car.emergency_reroute(roadfrom, parent, map)
if nn not in node.connections.keys():
#print("car.emergency_rereoute failed to give a valid next node, deleting car")
car.cleanup()
del car
return
node.connections[nn].addCarFrom(self, car)
car.notifyRoadChange(parent, nn, roadfrom, parent.connections[nn], map)
class ZeroWaitIntersection (Intersection):
def __init__(self, node):
Intersection.__init__(self, node)
def carArrived(self, parent, car, road, map):
self.passCar(parent, car, road, map)
def laneSort(self,car,road,map):
return 0
| [
"rikingcoding@gmail.com"
] | rikingcoding@gmail.com |
1700253dd5ee98c3964c2addb337c32f3cfcbcf8 | 4478cbb2288457d649477fefdf96d710be4ff4bc | /scripts/dvxplorer_test.py | bc61df74a727a9536be9a52f5ea62eadc1c4a08d | [
"MIT"
] | permissive | duguyue100/pyaer | bd0f08363623403e0dc72f62d8bf4dd70205cdc6 | d5b923e1564be06a5e771e7f6b72db755563c2e7 | refs/heads/master | 2023-05-01T22:14:39.133760 | 2023-03-25T13:04:52 | 2023-03-25T13:04:52 | 105,263,648 | 27 | 5 | MIT | 2019-06-20T20:00:18 | 2017-09-29T11:11:22 | Python | UTF-8 | Python | false | false | 1,868 | py | """DVXplorer Test.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function, absolute_import
import numpy as np
import cv2
from pyaer.dvxplorer import DVXPLORER
device = DVXPLORER()
print("Device ID:", device.device_id)
print("Device Serial Number:", device.device_serial_number)
print("Device USB bus Number:", device.device_usb_bus_number)
print("Device USB device address:", device.device_usb_device_address)
print("Device String:", device.device_string)
print("Device Firmware Version:", device.firmware_version)
print("Logic Version:", device.logic_version)
print("Device Chip ID:", device.chip_id)
if device.device_is_master:
print("Device is master.")
else:
print("Device is slave.")
print("MUX has statistics:", device.mux_has_statistics)
print("Device size X:", device.dvs_size_X)
print("Device size Y:", device.dvs_size_Y)
print("DVS has statistics:", device.dvs_has_statistics)
print("IMU Type:", device.imu_type)
print("EXT input has generator:", device.ext_input_has_generator)
clip_value = 3
histrange = [(0, v) for v in (device.dvs_size_Y, device.dvs_size_X)]
device.start_data_stream()
# load new config
device.set_bias_from_json("./scripts/configs/dvxplorer_config.json")
print(device.get_bias())
while True:
try:
(pol_events, num_pol_event,
special_events, num_special_event,
imu_events, num_imu_event) = \
device.get_event("events_hist")
print("Number of events:", num_pol_event)
if num_pol_event != 0:
img = pol_events[..., 1]-pol_events[..., 0]
img = np.clip(img, -clip_value, clip_value)
img = (img+clip_value)/float(clip_value*2)
cv2.imshow("image", img)
cv2.waitKey(1)
except KeyboardInterrupt:
device.shutdown()
cv2.destroyAllWindows()
break
| [
"duguyue100@gmail.com"
] | duguyue100@gmail.com |
e7122065f5ca2cf762f86e2343251e9ccb8b3c92 | cb21c8ea795bbb2be306865ba7ff295e45c7fc32 | /htn/assemble.py | 95b139fe70c9ab66d85361bcdccb147e5a0a9b17 | [] | no_license | dloti/pddl-to-htn | 675e72688449d461bdf6598158008cc5b4dec68c | 135eac34cf7eac6610a8eb08bc29b2df5c3284c6 | refs/heads/master | 2022-12-01T06:59:28.980945 | 2020-05-08T17:14:13 | 2020-05-08T17:14:13 | 259,025,143 | 4 | 0 | null | 2020-08-02T17:29:02 | 2020-04-26T12:29:51 | Java | UTF-8 | Python | false | false | 1,663 | py | import subprocess
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='input domain name')
parser.add_argument('-o', '--output', help='output path for generated htns')
args = parser.parse_args()
out_path = os.path.normpath(args.output)
spec_file_path = os.path.join("./domains",args.input,args.input+".spec")
prob_list_path = os.path.normpath(args.output+'/prob_list.txt')
with open(spec_file_path) as f:
content = f.readlines()
domain = content[0].rstrip()
path = content[1].rstrip()
domain_file = content[2].rstrip()
rep_instance = content[3].rstrip()
problems = content[4:]
with open(os.path.join(out_path,domain),'w') as domain_out:
domain_path = path+domain_file
subprocess.call(["./bin/create_htn", domain, domain_path, path+rep_instance], stdout=domain_out)
with open(prob_list_path,'w') as prob_list:
for problem in problems:
problem = problem.rstrip()
problem_path = path+problem
tmp = problem.rsplit('/')
if len(tmp) > 1:
problem = '-'.join(tmp)
prob_list.write(problem.rstrip('.pddl')+'\n')
if problem == '':
continue
problem_out = open(args.output+'/'+problem.rstrip('.pddl'),'w')
subprocess.call(["./bin/initialize", domain, domain_path, problem_path], stdout=problem_out)
problem_out.close()
prob_list.close()
#subprocess.call(["sbcl", '--script', 'script.txt','>','result.txt'])
if __name__ == "__main__":
main()
| [
"¨dlotinac@gmail.com¨"
] | ¨dlotinac@gmail.com¨ |
79f7beec7930893d1bdb2e735e40c9ec6f6acbc8 | 5dd93a32e9803cbe29de37434d9bd1209f871b84 | /majiang2/src/majiang2/dealer/dealer_all_color.py | 218363eebcaef53c18c2002b1d505d9f45c74634 | [] | no_license | cnbcloud/mjserver | 71e9448478d6b6c04e852fc74968b3b2cb75f51c | b5b08a85d49c3bed460255a62dc5201b998d88d4 | refs/heads/master | 2021-01-21T17:46:29.073368 | 2017-07-27T09:25:49 | 2017-07-27T09:25:49 | 98,517,509 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,873 | py | # -*- coding=utf-8
'''
Created on 2016年9月24日
@author: zhaol
'''
from majiang2.dealer.dealer import Dealer
import random
from majiang2.tile.tile import MTile
from freetime.util import log as ftlog
"""
麻将手牌编码
万 1-9
筒 11-19
条 21-29
东 31
南 32
西 33
北 34
中 35
发 36
白 37
"""
class AllColorDealer(Dealer):
def __init__(self):
"""初始化
子类在自己的初始化方法里,初始化麻将牌池范围,准备发牌
包含所有的牌
"""
super(AllColorDealer, self).__init__()
# 本玩法包含的花色
self.__card_colors = [MTile.TILE_WAN, MTile.TILE_TONG, MTile.TILE_TIAO, MTile.TILE_FENG]
# 花色数量
self.__card_count = len(self.__card_colors)
# 初始化本玩法包含的牌
self.setCardTiles(MTile.getTiles(self.__card_colors))
ftlog.debug(self.cardTiles)
@property
def cardColors(self):
return self.__card_colors
def setCardColors(self, cardColors):
self.__card_colors = cardColors
@property
def cardCount(self):
return self.__card_count
def setCardCount(self, cardCount):
self.__card_count = cardCount
"""洗牌/发牌
子类必须实现
"""
def shuffle(self, goodPointCount, cardCountPerHand):
"""参数说明
goodPointCount : 好牌点的人数
cardCountPerHand : 每手牌的麻将牌张数
"""
# 初始化一下cardTiles,因为每设置一次好牌点都会从里面弹出13张牌
self.setCardTiles(MTile.getTiles(self.__card_colors))
left_tiles = []
for color in self.__card_colors:
left_tiles.extend(self.cardTiles[color])
# 对剩余的牌洗牌
random.shuffle(left_tiles)
self.addTiles(left_tiles)
return self.tiles
def getGoodCard(self, cardCountPerHand):
"""发一个人的好牌
"""
count = self.getGoodCardCount(cardCountPerHand)
ftlog.debug( 'count:', count )
color = random.randint(0, self.__card_count -1)
cards = []
cLen = len(self.cardTiles[color])
if count > cLen:
count = cLen
# 发好牌
for _ in range(count):
cards.append(self.cardTiles[color].pop(0))
# 发第二门
count1 = (cardCountPerHand - count) / 2
color = (color + 1) % self.__card_count
for _ in range(count1):
cards.append(self.cardTiles[color].pop(0))
# 发最后一门
left = cardCountPerHand - count - count1
color = (color + 1) % self.__card_count
for _ in range(left):
cards.append(self.cardTiles[color].pop(0))
return cards
def getGoodCardCount(self, count):
"""好牌一门的数量
"""
middle = count / 2
choice = random.randint(0, 99)
if choice > 90:
middle += 2;
elif choice > 60:
middle += 1;
return middle
if __name__ == "__main__":
dealer = AllColorDealer()
# 山东清一色
# {"seat1": [13, 13, 14, 14, 15, 15, 16, 16, 17, 11, 11, 12, 12], "seat2": [1, 11, 2, 21, 3, 21, 19, 19, 19, 21, 22, 23, 24], "seat3": [1, 2, 3, 4, 5, 7, 8, 9, 4, 12, 6, 16, 27], "seat4": [1, 2, 3, 4, 5, 7, 8, 9, 17, 17, 18, 18, 27], "jing": [], "pool": [6, 9, 21, 17, 6, 18, 18, 6, 16, 15, 15, 13, 14, 13, 14, 19, 1, 2, 3, 4, 5, 12, 8, 7, 5, 8, 9, 11, 22, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 28, 28, 28, 28, 29, 29, 29, 29, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 7], "laizi": []}
# 山东混一色
# {"seat1": [31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 1], "seat2": [13, 9, 27, 24, 27, 5, 28, 34, 35, 11, 9, 29, 24], "seat3": [3, 13, 14, 22, 19, 8, 18, 19, 36, 29, 26, 18, 17], "seat4": [8, 7, 21, 7, 25, 4, 8, 17, 1, 12, 16, 25, 13], "pool": [2, 11, 22, 3, 1, 5, 24, 36, 22, 37, 4, 15, 2, 27, 12, 15, 35, 19, 23, 24, 14, 6, 11, 15, 9, 32, 21, 6, 16, 14, 37, 13, 36, 8, 15, 12, 35, 37, 23, 23, 3, 14, 1, 28, 37, 18, 2, 29, 17, 3, 26, 23, 6, 7, 11, 27, 28, 4, 16, 25, 6, 25, 18, 5, 36, 35, 12, 26, 17, 2, 7, 21, 21, 9, 29, 31, 19, 26, 5, 33, 28, 22, 16, 4], "jing": [], "laizi": []}
# dealer.generateTiles({
# "seat1": [31,31,31,32,32,32,33,33,33,34,34,34,1],
# "seat2": [],
# "seat3": [],
# "seat4": [],
# "pool": [2,0,0,0,1],
# "magics": []
# })
# 山东风一色
# {"seat1": [31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35], "seat2": [21, 24, 3, 18, 21, 5, 1, 7, 5, 17, 8, 12, 7], "seat3": [4, 13, 14, 22, 29, 1, 23, 37, 25, 29, 15, 28, 26], "seat4": [5, 22, 9, 4, 37, 35, 16, 8, 12, 33, 22, 12, 4], "pool": [2, 16, 14, 4, 35, 23, 15, 27, 19, 23, 16, 11, 1, 16, 13, 25, 8, 26, 28, 14, 2, 22, 27, 15, 13, 2, 8, 23, 28, 35, 15, 21, 11, 28, 29, 36, 12, 36, 11, 9, 19, 19, 6, 1, 17, 24, 18, 6, 19, 7, 9, 3, 24, 3, 17, 31, 2, 37, 6, 14, 26, 36, 18, 27, 26, 9, 5, 36, 25, 17, 32, 6, 24, 18, 27, 37, 34, 21, 11, 7, 3, 13, 29, 25], "jing": [], "laizi": []}
# dealer.generateTiles({
# "seat1": [31,31,31,32,32,32,33,33,33,34,34,34,35],
# "seat2": [],
# "seat3": [],
# "seat4": [],
# "pool": [2,0,0,0,35],
# "magics": []
# })
# 山东碰碰胡
# dealer.generateTiles({
# "seat1": [31,31,31,12,12,12,23,23,23,34,34,34,35],
# "seat2": [],
# "seat3": [],
# "seat4": [],
# "pool": [2,0,0,0,35],
# "magics": []
# })
# 山东七对
# dealer.generateTiles({
# "seat1": [31,32,33,34,35,36,37,1,9,11,19,21,29],
# "seat2": [],
# "seat3": [],
# "seat4": [],
# "pool": [2,0,0,0,31],
# "magics": []
# })
dealer.generateTiles({
"seat1": [1,1,1,2,2,2,3,3,3,4,4,6,7],
"seat2": [11,11,11,12,12,12,13,13,13,14,14,6,7],
"seat3": [21,21,21,22,22,22,23,23,23,24,24,6,7],
"seat4": [31,31,31,32,32,32,33,33,33,34,34,6,7],
"pool": [5,5,5,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"magics": []
}) | [
"gamedev@jundeMac-mini.local"
] | gamedev@jundeMac-mini.local |
c86f6cdc19992ca81228b9b59009f6e334b6b5e3 | 40c735de998571d77318d85f8cd0ee74555b4acf | /reaction/data/makeData.py | f1a0c94ad06def4ad7522b61b2bb80696ee98a69 | [] | no_license | cam1681/ODENet | 682e6ed5a4d6c7711170f49a7efac0e61bb50ea3 | d853eccb83b7c1cae58aef059e80e989a6e06ccc | refs/heads/master | 2020-12-27T03:59:43.631650 | 2020-02-02T08:38:57 | 2020-02-02T08:38:57 | 237,741,647 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | import numpy as np
import matplotlib.pylab as plt
from itertools import chain, product
N = 5;
rawdata = np.loadtxt('actin{}.txt'.format(N),unpack=False)
rawdata[:,0] = rawdata[:,0]/3600
indices, = np.where((np.diff(rawdata[:,0])==0)*1 == 1)
x = np.delete(rawdata[:,0],indices)
y = np.delete(rawdata[:,1],indices)
maxindex = x.shape[0]
diffx = np.sign(np.diff(x))
for i in range(len(diffx)):
if diffx[i] == 0:
diffx[i] = diffx[i-1]
#diffx = [_df if _df != 0 else 1 for _df in diffx]
shift = (np.diff(diffx) !=0 )*1
index, = np.where(shift == 1)
index[1::2] += 1
xs = index.shape[0] + 1
xdata = [[]]*xs
ydata = [[]]*xs
for i in range(xs):
if i == 0:
xdata[i] = x[:index[i]]
ydata[i] = y[:index[i]]
elif i == xs-1:
xdata[i] = x[index[i-1]+1:]
ydata[i] = y[index[i-1]+1:]
else:
xdata[i] = x[index[i-1]+1:index[i]]
ydata[i] = y[index[i-1]+1:index[i]]
if i%2 != 0:
xdata[i] = xdata[i][:0:-1]
ydata[i] = ydata[i][:0:-1]
vx = list(chain.from_iterable(xdata))
vy = list(chain.from_iterable(ydata))
vdata = np.array([list(_a) for _a in zip(vx,vy)])
dvx = np.sign(np.diff(vx))
indexv, = np.where(dvx==-1)
indexv = np.append(indexv,len(vx)-1)
if N == 3:
vc = [7.4, 9.6, 12.4, 14.2, 16.2, 18.4, 20.5]
vc.reverse()
else:
vc = [6.7, 8.5, 11.5, 14.9, 17.3, 20.3, 22.9]
vc.reverse()
vcd = list(list(np.repeat(vc[i],indexv[i]+1)) if i==0 else list(np.repeat(vc[i],indexv[i]-indexv[i-1])) for i in range(len(indexv)))
vcd = np.array(list(chain.from_iterable(vcd)))
vdata = np.array([[vdata[i,0],vdata[i,1],vcd[i]-vdata[i,1]] for i in range(len(vcd))])
np.savetxt('vdata{}.txt'.format(N), vdata)
np.savetxt('indexv{}.txt'.format(N), indexv)
#print(xdata)
#print(ydata)
#
for i in range(len(indexv)):
if i == 0:
plt.plot(vdata[:indexv[i],1])
else:
plt.plot(vdata[indexv[i-1]+1:indexv[i]+1,1])
plt.show()
| [
"pihu@pi-macbook.local"
] | pihu@pi-macbook.local |
5d6510bdbc3df2050efb95441d37867be34df818 | 37083ca6c0a2142fc069c59f426931da82636639 | /src/Decision_Tree.py | 2de9f58bdbdbbb981b5b9528f7f5837bed7082da | [] | no_license | burfel/ML-project | c0fd5a28b539edec61decaf4c71f77fbb35626a6 | 5917b3b5c4bffb25fc7943185405d937a4a9a6c1 | refs/heads/master | 2021-09-04T19:59:46.703574 | 2018-01-22T00:05:48 | 2018-01-22T00:05:48 | 118,189,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | import numpy as np
import time
from Decision_Node import DecisionNode#, FastDecisionNode
from Attribute_Selector import AttributeSelector
from Leaf_Decider import LeafDecider
"""
Based on C4.5 - the classifier (it is not for regression)
X contains the attributes that the algorithm can learn from per example
Y are the target values per example
"""
class DecisionTree:
def __init__(self, maximum_depth=10**10, min_examples_per_leaf=1):
self.n_exp = 0 # nr of examples
self.n_attr = 0 # nr of attributes
self.ld = LeafDecider(maximum_depth=maximum_depth, min_examples_per_leaf=min_examples_per_leaf)
self.attrs = AttributeSelector("standard decision tree").get_attribute_selector()
def fit(self, X=np.zeros((0,0)), Y=np.zeros(0)):
"""
Similar to sklearn's fit method. Takes x and y and creates the tree which
models the relationship between x and y
Args:
X (np.ndarray): input of the function
Y (np.ndarray): labels of the input
Returns: None
"""
# assertions should check whether the tree can be built
# assertions concerning X
assert type(X) is np.ndarray, "attribute values X is not ndarray"
assert len(X.shape)==2, "attribute values X doesn't have two dimensions"
assert X.shape[0]>0, "attribute values X has no examples"
assert X.shape[1]>0, "attribute values X has no attributes"
# assertions concerning Y
assert type(Y) is np.ndarray, "target values Y is not ndarray"
assert len(Y.shape)==1, "target values Y isn't one dimensional"
assert Y.shape[0]==X.shape[0], "target values Y has an invalid amount of examples"
# make sure the types are good
if X.dtype!=np.dtype('float64'): X = X.astype('float64')
if Y.dtype!=np.dtype('int32'): Y = Y.astype('int32')
self.n_exp = X.shape[0] # nr of examples
self.n_attr = X.shape[1] # nr of attributes
#self.root = FastDecisionNode(self, X, Y, depth=0)
self.root = DecisionNode(self, X, Y, depth=0)
def classify(self, example):
# essentially sklearn's predict method
return self.root.classify(example)
def print_me(self):
self.root.print_me()
def test_DT_1():
"""
-creates a tree with 100 classes and one example per class
-this leads to 100 leaves
-when classifying an arange in the bottom, arange(cols) + q*cols
leads to class q
"""
rows = 20
cols = 5
classes = 4
X = np.arange(rows*cols).reshape(rows,cols)
Y = np.arange(rows)
X[:,-1] = X[:,-1] % classes
Y = Y % classes
clf = DecisionTree()
clf.fit(X,Y)
example = np.arange(cols)+12*cols
example[-1] = example[-1] % classes
assert clf.classify(example)==0, "Error is test_DT_1"
example = np.arange(cols)+13*cols
example[-1] = example[-1] % classes
assert clf.classify(example)==1, "Error is test_DT_1"
def test_DT_2():
"""
-creates a tree with 100 classes and one example per class
-this leads to 100 leaves
-when classifying an arange in the bottom, arange(cols) + q*cols
leads to class q
"""
rows = 20
cols = 5
classes = 2
X = np.arange(rows*cols).reshape(rows,cols)
Y = np.arange(rows)
clf = DecisionTree(maximum_depth=40, min_examples_per_leaf=1)
clf.fit(X,Y)
example = np.arange(cols)+12*cols
assert clf.classify(example)==12, "Error in test_DT_2"
example = np.arange(cols)+(rows+2)*cols
assert clf.classify(example)==19, "Error in test_DT_2"
def test_DT_3():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data[:,:]
Y = iris.target
clf = DecisionTree(maximum_depth=5, min_examples_per_leaf=1)
st = time.clock()
clf.fit(X,Y)
print("time: ", time.clock()-st)
for i in range(len(X)):
assert clf.classify(X[i]) == Y[i], "The decision tree should get all right, something's wrong."
clf.print_me()
def test_DT_4():
n, m = 100, 100
X = np.random.random_sample(size=(n,m))
Y = np.random.randint(low=0, high=7,size=n)
clf = DecisionTree(maximum_depth=5, min_examples_per_leaf=1)
st = time.clock()
clf.fit(X,Y)
print("time: ", time.clock()-st)
clf.print_me()
if __name__=="__main__":
for i in range(100):
print("counter: ", i)
test_DT_1()
test_DT_2()
test_DT_3()
test_DT_4()
| [
"felicia.burtscher@hotmail.com"
] | felicia.burtscher@hotmail.com |
bf0e73cc661d93997ddf0586dd16a1920b74feee | 64ae9e59e387aa219183f6748f07ede3533c14b2 | /lib/hardware/switches/force10.py | 801a288bde842162510f99d2c59a73519ba2a7ab | [
"BSD-2-Clause"
] | permissive | sohonet/HEN | fe0168816d908c9c5d3180e90e67b12e4724c7be | 47575028a6f3d3fe04d6839dd779b2b1b991accc | refs/heads/master | 2021-01-19T07:30:22.849260 | 2012-02-20T19:41:17 | 2012-02-20T19:41:17 | 87,548,153 | 0 | 0 | null | 2017-04-07T13:25:40 | 2017-04-07T13:25:40 | null | UTF-8 | Python | false | false | 69,551 | py | ####################
# TODO
# Add roll back support for adding ports
# Add roll back support for creating vlans
# Add more debugging to telnet interface
# Clean up code
# More comments, and fix comments
# Add exception throwing
##################################################################################################################
# force10.py: contains the switch subclass for Force10 switches
#
# CLASSES
# --------------------------------------------------------------------
# Force10Switch The class used to support Force10 switches (derived from the Switch superclass). This class
# contains all operations relating to proprietary Force10 SNMP mibs, such as VLAN operations.
# Force10E1200Switch The class used to support Force10 superstack switches. This class contains information
# specific to this model of switch (number of ports, etc)
#
##################################################################################################################
import commands, os, string
from hardware.switches.switch import Switch
from auxiliary.hen import VLAN, Port, MACTableEntry, SimplePort, SimpleVlan
from auxiliary.snmp import SNMP
from auxiliary.oid import OID
from pysnmp.proto import rfc1902
from pysnmp.entity.rfc3413 import cmdgen, mibvar
from pysnmp.entity import engine, config
from pysnmp.smi import view
from pyasn1.codec.ber import decoder
from pyasn1.type import univ
from struct import *
from array import *
import time
import telnetlib
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
###########################################################################################
# CLASSES
###########################################################################################
class Force10Switch(Switch):
"""\brief Subclass for any Force10 switch in the testbed
This subclass implements methods to retrieve and set information from a switch using SNMP version 1 that
is proprietary to Force10 (vlans, etc).
"""
functions = []
#INTERNAL_ID_OFFSET = 1107787776
broken_firmware=True
DEFAULT_VLAN="Vlan 1"
###########################################################################
# Set VLAN Name
###########################################################################
def __setVLANName(self, vlan):
# there are currently issues with the force10 firmware
# that means the vlan name needs to be set via telnet
if not Force10Switch.broken_firmware:
# use snmp to set the vlan name
self.__setVLANNameSNMP(vlan)
else:
# use telnet to set the vlan name
self.__setVLANNameTelnet(vlan.getID(),vlan.getName())
# work around method to set the vlan name
def __setVLANNameTelnet(self, vlan_id,vlan_name):
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("configure\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("interface vlan "+str(vlan_id)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("name "+str(vlan_name)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
def __setVLANNameSNMP(self,vlan):
# use snmp to set the vlan name
self.snmp.set(\
OID.dot1qVlanStaticName + (vlan.getInternalID(),),\
rfc1902.OctetString(str(vlanName)))
if self.snmp.getErrorStatus():
log.debug("Error with "+str(mylist[int(self.snmp.getErrorIndex())-1]))
return -1
return 0
###########################################################################
# Delete VLAN
###########################################################################
def deleteVLAN(self, vlan_name):
# Check whether the vlan exists
if (not self.__vlanExistsForName(vlan_name)):
log.debug("trying to delete a vlan that doesn't exist")
return -1
vlan_ifindex = self.__getVlanIfIndexFromName(vlan_name)
if (vlan_ifindex == -1):
log.debug("can't get ifindex for vlan "+str(vlan_name))
return -1
log.debug("vlan ifindex "+str(vlan_ifindex))
vlan_id = self.__getVlanIdFromIfIndex(vlan_ifindex)
if (vlan_id == -1):
log.debug("can't get vlan id for vlan "+str(vlan_name)+" for ifindex "+str(vlan_ifindex))
return -1
log.debug("vlan id "+str(vlan_id))
return self.__deleteVLAN(vlan_name,vlan_id)
def __deleteVLAN(self,vlan_name,vlan_id):
log.debug("__deleteVLAN "+str(vlan_name)+" "+str(vlan_id))
if not Force10Switch.broken_firmware:
return self.__deleteVLANSNMP(vlan_name)
else:
return self.__deleteVLANTelnet(vlan_id)
# work around method to set the vlan name
def __deleteVLANTelnet(self, vlan_id):
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("configure\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("no interface vlan "+str(vlan_id)+"\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
return 0
def __deleteVLANSNMP(self, vlan_name):
"""\brief Creates a vlan given an name
\param vlan_name (\c string) vlan name to delete
\return (\int) 0 if the operation is successful, -1 if it failed.
"""
# you delete a vlan by using its ifindex.
# get vlan ifindex from vlan_name
vlan_ifindex = self.__getVLANInternalID(vlan_name)
log.debug("Deleting vlan with name "+str(vlan_name)+" and ifindex"+str(vlan_ifindex)+" on switch "+str(self.getSwitchName()))
# check that we got a sane ifindex
if vlan_ifindex == -1:
log.debug("When deleting vlan with name "+str(vlan_id)+" couldn't get ifindex on switch "+str(self.getSwitchName()))
return -1
# use snmp to delete the vlan
self.snmp.set( \
OID.dot1qVlanStaticRowStatus+(vlan_ifindex,) ,\
rfc1902.Integer32(6) )
# check to see whether the set worked
if self.snmp.getErrorStatus():
log.debug("Error deleting vlan with name "+str(vlan_name)+" and ifindex"+str(vlan_ifindex)+" on switch "+str(self.getSwitchName()))
return -1
return 0
def __deleteVLANByIfindex(self, vlan_ifindex):
"""\brief Creates a vlan given an name
\param vlan_name (\c string) vlan name to delete
\return (\int) 0 if the operation is successful, -1 if it failed.
"""
# you delete a vlan by using its ifindex.
log.debug("Deleting vlan with ifindex "+str(vlan_ifindex)+" on switch "+str(self.getSwitchName()))
# use snmp to delete the new vlan
self.snmp.set( \
OID.dot1qVlanStaticRowStatus+(vlan_ifindex,) ,\
rfc1902.Integer32(6) )
# check to see whether the set worked
if self.snmp.getErrorStatus():
log.debug("Error deleting vlan with and ifindex"+str(vlan_ifindex)+" on switch "+str(self.getSwitchName()))
return -1
return 0
###########################################################################
# Create VLAN
###########################################################################
def __createVLAN2(self, vlan_id, vlan_name):
"""\brief Creates a vlan given an id
\param vlan_id (\c int) vlan id to add
\return (\int) 0 if the operation is successful, -1 if it failed.
"""
log.debug( str(OID.dot1qVlanStaticRowStatus+(vlan_id,)) )
# you create a vlan using its tagged id
snmp_list = []
snmp_list.append( (OID.dot1qVlanStaticName+(vlan_id,) , rfc1902.OctetString(str(vlan_name)) ))
snmp_list.append( (OID.dot1qVlanStaticEgressPorts+(vlan_id,) , rfc1902.OctetString(self.getEmptyBitMap(64)) ))
snmp_list.append( (OID.dot1qVlanForbiddenEgressPorts+(vlan_id,) , rfc1902.OctetString(self.getEmptyBitMap(64)) ))
snmp_list.append( (OID.dot1qVlanStaticUntaggedPorts+(vlan_id,) , rfc1902.OctetString(self.getEmptyBitMap(64))))
snmp_list.append( (OID.dot1qVlanStaticRowStatus+(vlan_id,) , rfc1902.Integer32(4) ))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Error creating vlan with id "+str(vlan_id)+" on switch " +str(self.getSwitchName()))
return -1
return 0
def __createVLAN(self, vlan_id):
"""\brief Creates a vlan given an id
\param vlan_id (\c int) vlan id to add
\return (\int) 0 if the operation is successful, -1 if it failed.
"""
log.debug( str(OID.dot1qVlanStaticRowStatus+(vlan_id,)) )
# you create a vlan using its tagged id
self.snmp.set( \
OID.dot1qVlanStaticRowStatus+(vlan_id,) ,\
rfc1902.Integer32(4) )
if self.snmp.getErrorStatus():
log.debug("Error creating vlan with id "+str(vlan_id)+" on switch "+str(self.getSwitchName()))
return -1
return 0
def __setVLANName2(self, vlan_name, vlan_ifindex):
# use snmp to set the vlan name
print "vlan_name "+str(vlan_name)
print "vlan_ifindex "+str(vlan_ifindex)
self.snmp.set(OID.dot1qVlanStaticName + (vlan_ifindex,),\
rfc1902.OctetString(str(vlan_name)))
if self.snmp.getErrorStatus():
log.debug("Error with "+str(mylist[int(self.snmp.getErrorIndex())-1]))
return -1
return 0
def addUntaggedPort(self,port_str,vlan_str):
self.refreshVlanInfo()
vlan_obj = self.getVlanByName(vlan_str)
port_obj = self.getPortByName(port_str)
if vlan_obj == None or port_obj == None:
print "error with addUntaggedPort in force10.py"
print "port_str",port_str
print "vlan_str",vlan_str
print "vlan_obj",str(vlan_obj)
print "port_obj",str(port_obj)
print "vlan_obj id",str(vlan_obj.getLocalId())
print "port_obj id",str(port_obj.getId())
return -1
log.debug("port obj"+str(port_obj))
log.debug("vlan obj"+str(vlan_obj))
snmp_list = []
snmp_list.append(self.__modUntaggedPort(vlan_obj.getLocalId(),port_obj.getId(),True))
snmp_list.append(self.__modTaggedPort(vlan_obj.getLocalId(),port_obj.getId(),True))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_obj.getId())+" to vlan")
return -1
return 0
def addTaggedPort(self,port_str,vlan_str):
self.refreshVlanInfo()
vlan_obj = self.getVlanByName(vlan_str)
port_obj = self.getPortByName(port_str)
if vlan_obj == None or port_obj == None:
print "error with addUntaggedPort in force10.py"
print "port_str",port_str
print "vlan_str",vlan_str
print "vlan_obj",str(vlan_obj)
print "port_obj",str(port_obj)
print "vlan_obj id",str(vlan_obj.getLocalId())
print "port_obj id",str(port_obj.getId())
return -1
log.debug("port obj"+str(port_obj))
log.debug("vlan obj"+str(vlan_obj))
snmp_list = []
#snmp_list.append(self.__modUntaggedPort(vlan_obj.getLocalId(),port_obj.getId(),True))
snmp_list.append(self.__modTaggedPort(vlan_obj.getLocalId(),port_obj.getId(),True))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_obj.getId())+" to vlan")
return -1
return 0
def removeUntaggedPort(self,port_str,vlan_str):
#self.refreshVlanInfo()
vlan_ifindex = self.__getVlanIfIndexFromName(vlan_str)
vlan_id = self.__getVlanIdFromIfIndex(vlan_ifindex)
return self.__deleteUntaggedPortTelnet(port_str,vlan_id)
def removeTaggedPort(self,port_str,vlan_str):
#self.refreshVlanInfo()
vlan_ifindex = self.__getVlanIfIndexFromName(vlan_str)
vlan_id = self.__getVlanIdFromIfIndex(vlan_ifindex)
return self.__deleteTaggedPortTelnet(port_str,vlan_id)
def rubbish(self):
vlan_obj = self.getVlanByName(vlan_str)
vlan_default_obj = self.getVlanByName("Vlan 1")
port_obj = self.getPortByName(port_str)
if vlan_obj == None or port_obj == None:
print "error with addUntaggedPort in force10.py"
print "port_str",port_str
print "vlan_str",vlan_str
print "vlan_default_obj",str(vlan_default_obj)
print "vlan_obj",str(vlan_obj)
print "port_obj",str(port_obj)
print "vlan_obj id",str(vlan_obj.getLocalId())
print "port_obj id",str(port_obj.getId())
return -1
log.debug("port obj : "+str(port_obj))
log.debug("vlan obj : "+str(vlan_obj))
log.debug("vlan_default_obj : "+str(vlan_default_obj))
snmp_list = []
snmp_list.append(self.__modTaggedPort(vlan_obj.getLocalId(),port_obj.getId(),False))
snmp_list.append(self.__modUntaggedPort(vlan_obj.getLocalId(),port_obj.getId(),False))
# try moving deleted vlan to the default one as we delete it.
#snmp_list.append(self.__modUntaggedPort(vlan_default_obj.getLocalId(),port_obj.getId(),True))
#snmp_list.append(self.__modTaggedPort(vlan_default_obj.getLocalId(),port_obj.getId(),True))
# alternative thing to try, is just moving it to the defualt.
self.snmp.complex_set(snmp_list)
print "SNMP LIST "+str(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with removing port "+str(port_obj.getId())+" to vlan")
return -1
return 0
def __addUntaggedPort(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
print vlan_ifindex
# get Static Untagged Ports Map
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get untagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
#log.debug( self.printPortMap(epl,True) )
upl = self.__setPortList(upl,port_ifindex,True)
epl = self.__setPortList(epl,port_ifindex,True)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,), upl))
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_name)+" to vlan")
return -1
return 0
def __modUntaggedPort(self,vid,pid,mod):
# if mod is True, turn the port on
# False, turn the port off
# None, get the status
log.debug("full "+self.snmp.get(OID.dot1qVlanStaticUntaggedPorts \
+ (int(vid),))[0][1])
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts \
+ (int(vid),))[0][1]
log.debug("upl "+str(upl))
if self.snmp.getErrorStatus():
log.debug("unable to get untagged ports")
return -1
if mod != None:
log.debug("ifmap "+str(self.portIfIndexMapRev[pid]))
upl = self.__setPortList(upl,self.portIfIndexMapRev[pid],mod)
log.debug("new upl "+str(upl))
snmp_entry = (OID.dot1qVlanStaticUntaggedPorts + (int(vid),), upl)
print "SNMP ENTRY "+str(snmp_entry)
return snmp_entry
def __modTaggedPort(self,vid,pid,mod):
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (int(vid),))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
epl = self.__setPortList(epl,self.portIfIndexMapRev[pid],mod)
snmp_entry = (OID.dot1qVlanStaticEgressPorts + (int(vid),) , epl)
return snmp_entry
def refreshVlanInfo(self):
"""\brief Refreshes both the port list and vlan list for the switch
"""
untaggedPorts = self.getDot1qVlanStaticUntaggedPorts(True)
taggedPorts = self.getDot1qVlanStaticEgressPorts(True)
pvidPorts = self.getDot1qPvid(True)
self.resetPortsVlanInfo()
self.getVlanList()
# put untagged ports into list
for up in untaggedPorts:
vlan = self.getVlan(str(up[0][0][-1]))
plp = self.__simpleParsePortList(up[0][1])
vlan.setUntagged(plp)
for p in plp:
port = self.getPort(p)
port.setUntagged((vlan.getName(),vlan.getId()))
# put tagged ports into list
for tp in taggedPorts:
vlan = self.getVlan(str(tp[0][0][-1]))
plp = self.__simpleParsePortList(tp[0][1])
vlan.setTagged(plp)
for p in plp:
port = self.getPort(p)
a = port.getTagged()
a.append((vlan.getName(),vlan.getId()))
port.setTagged(a)
for pp in pvidPorts:
vlan = self.getVlan(str(pp[0][1]))
if (pp[0][0][-1] <= self.getNumberofPorts()):
pvid_list = vlan.getPvid()
pvid_list.append(pp[0][0][-1])
vlan.setPvid(pvid_list)
port = self.getPort(pp[0][0][-1])
#port.setPvid((pp[0][1],vlan_list[vid][0]))
port.setUntagged((vlan.getName(),vlan.getId()))
def __addUntaggedPort(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
log.debug("PORT IF INDEX "+str(port_ifindex))
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
print vlan_ifindex
# get Static Untagged Ports Map
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get untagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
#log.debug( self.printPortMap(epl,True) )
upl = self.__setPortList(upl,port_ifindex,True)
epl = self.__setPortList(epl,port_ifindex,True)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
snmp_list.append((OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,), upl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_name)+" to vlan")
return -1
return 0
def _creatVLAN(self, vlan_id, vlan_name):
"""\brief Creates a vlan based on its id and name
\param vlan (\c vlan_id) 802.1Q id for the vlan
\param vlan (\c vlan_name) name of the vlan
\return (\c int) 0 if the operation is sucessful, negative otherwise
"""
# Check whether the vlan exists
if (self.__vlanExistsForName(vlan_name)):
log.debug("trying to add a vlan that exists")
return -1
print "success 1"
if (vlan_name == None or vlan_name == "None"):
log.debug("can't create a vlan with a None name")
return -1
print "success 2"
# Create the vlan
if (self.__createVLAN2(vlan_id,vlan_name) == -1):
log.debug("Failed to create vlan for id "+str(vlan_id))
return -1
print "success 3"
# Get ifindex for vlan ID
vlan_ifindex = self.__getIfindexForVLANID(vlan_id)
if (vlan_ifindex == -1):
log.debug("Error getting vlan ifindex for vlan id "+str(vlan_id))
return -1
print "success 4"
# Name the vlan
if (self.__setVLANName2(vlan_name,vlan_ifindex) == -1):
log.debug("Failed to name vlan for id "+str(vlan_id)+" with "+str(vlan_name))
# roll back earlier creation.
if (vlan_ifindex != -1):
if (self.__deleteVLANByIfindex(vlan_ifindex) == -1):
log.critical("failed to rolled back vlan creation of vlan "+str(vlan_name)+" with id "+str(vlan_id))
else:
log.critical("successfully rolled back vlan creation of vlan "+str(vlan_name)+" with id "+str(vlan_id))
else:
log.critical("failed to get ifindex when rolling back")
print "success 5"
return -1
print "success 6"
return 0
def createVLAN(self, vlan):
"""\brief Creates a vlan as specified by a vlan object. See addPorts for rules on how ports are
added. The function returns the following error codes:
-1: if the operation failed
0: if the operation succeeds
\param vlan (\c VLAN) A VLAN object representing the vlan to be added
\return (\c int) 0 if the operation is sucessful, negative otherwise
"""
# Check whether the vlan exists
if (self.__vlanExistsForName(vlan.getName())):
log.debug("trying to add a vlan that exists")
return -1
if (vlan.getName() == None or vlan.getName() == "None"):
log.debug("can't create a vlan with a None name")
return -1
# Create the vlan
if (self.__createVLAN(vlan.getID()) == -1):
log.debug("Failed to create vlan for id "+str(vlan.getID()))
return -1
# Get ifindex for vlan ID
vlan_ifindex = self.__getIfindexForVLANID(vlan.getID())
if (vlan_ifindex == -1):
log.debug("Error getting vlan ifindex for vlan id (b)"+str(vlan.getID()))
return -1
vlan.setInternalID(vlan_ifindex)
# Name the vlan
if (self.__setVLANName(vlan) == -1):
log.debug("Failed to name vlan for id "+str(vlan.getID())+" with "+str(vlan.getName()))
# roll back earlier creation.
if (vlan_ifindex != -1):
if (self.__deleteVLANByIfindex(vlan_ifindex) == -1):
log.critical("failed to rolled back vlan creation of vlan "+str(vlan.getName())+" with id "+str(vlan.getID()))
else:
log.critical("successfully rolled back vlan creation of vlan "+str(vlan.getName())+" with id "+str(vlan.getID()))
else:
log.critical("failed to get ifindex when rolling back")
return -1
# need to add roll back support
for port in vlan.getPortsOnSwitch(self.getSwitchName()):
if not port.getTagged():
if (self.__addUntaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error added untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan.getName()))
return -1
else:
log.debug("Successfully added untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan.getName()))
else:
if (self.__addTaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error added tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan.getName()))
return -1
else:
log.debug("Successfully added tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan.getName()))
return 0
###########################################################################
# Add ports
###########################################################################
def addPorts(self,vlan_name, ports):
vlan_ifindex = self.__getVlanIfIndexFromName(vlan_name)
if (vlan_ifindex == -1):
log.debug("Unable to get vlan ifindex for vlan name "+str(vlan_name))
return -1
for port in ports:
if not port.getTagged():
if (self.__addUntaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error added untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return -1
else:
log.debug("Successfully added untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
else:
if (self.__addTaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error added tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return -1
else:
log.debug("Successfully added tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return 0
###########################################################################
# Delete ports
###########################################################################
def deletePorts(self,vlan_name, ports):
vlan_ifindex = self.__getVlanIfIndexFromName(vlan_name)
if (vlan_ifindex == -1):
log.debug("Unable to get vlan ifindex for vlan name "+str(vlan_name))
return -1
for port in ports:
if not port.getTagged():
if (self.__deleteUntaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error deleting untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return -1
else:
log.debug("Successfully deleted untagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
else:
if (self.__deleteTaggedPort(port.getPortNumber(),vlan_ifindex) == -1 ):
log.debug("Error deleting tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return -1
else:
log.debug("Successfully deleted tagged port "+str(port.getPortNumber())+" to vlan "+str(vlan_name))
return 0
###########################################################################
# Add untagged ports
###########################################################################
def __addUntaggedPort(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
# get Static Untagged Ports Map
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get untagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
#log.debug( self.printPortMap(epl,True) )
upl = self.__setPortList(upl,port_ifindex,True)
epl = self.__setPortList(epl,port_ifindex,True)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
snmp_list.append((OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,), upl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_name)+" to vlan")
return -1
return 0
###########################################################################
# Remove untagged ports
###########################################################################
def __deleteUntaggedPort(self,port_name,vlan_ifindex):
# there are currently issues with the force10 firmware
# that means the vlan name needs to be set via telnet
if not Force10Switch.broken_firmware:
# use snmp to set the vlan name
self.__deleteUntaggedPortSNMP(port_name,vlan_ifindex)
else:
# use telnet to set the vlan name
# need to lookup vlan index for vlan ifindex
vlan_id = self.__getVlanIdFromIfIndex(vlan_ifindex)
if vlan_id == -1:
log.debug("Couldn't get vlan id for vlan ifindex "+str(vlan_ifindex))
return -1
self.__deleteUntaggedPortTelnet(port_name,vlan_id)
def __deleteUntaggedPortTelnet(self,port_name,vlan_id):
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("configure\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("interface vlan "+str(vlan_id)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("no untagged "+str(port_name)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
return 0
def __deleteUntaggedPortSNMP(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
# get Static Untagged Ports Map
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug( "unable to get untagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug( "unable to get tagged ports" )
return -1
#log.debug( self.printPortMap(epl,True) )
upl = self.__setPortList(upl,port_ifindex,False)
epl = self.__setPortList(epl,port_ifindex,False)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,), upl))
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with deleting port "+str(port_name)+" to vlan")
return -1
return 0
###########################################################################
# Add tagged ports
###########################################################################
def __addTaggedPort(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
# get Static Tagged Ports Map
upl = self.snmp.get(OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug("unable to get tagged ports")
return -1
#log.debug( self.printPortMap(epl,True) )
upl = self.__setPortList(upl,port_ifindex,False)
epl = self.__setPortList(epl,port_ifindex,True)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
snmp_list.append((OID.dot1qVlanStaticUntaggedPorts + (vlan_ifindex,), upl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with adding port "+str(port_name)+" to vlan")
return -1
return 0
###########################################################################
# Delete Tagged ports
###########################################################################
def __deleteTaggedPort(self,port_name,vlan_ifindex):
# there are currently issues with the force10 firmware
# that means the vlan name needs to be set via telnet
if not Force10Switch.broken_firmware:
# use snmp to set the vlan name
self.__deleteTaggedPortSNMP(port_name,vlan_ifindex)
else:
# use telnet to set the vlan name
# need to lookup vlan index for vlan ifindex
vlan_id = self.__getVlanIdFromIfIndex(vlan_ifindex)
if vlan_id == -1:
log.debug("Couldn't get vlan id for vlan ifindex "+str(vlan_ifindex))
return -1
self.__deleteTaggedPortTelnet(port_name,vlan_id)
def __deleteTaggedPortTelnet(self,port_name,vlan_id):
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("configure\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("interface vlan "+str(vlan_id)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("no tagged "+str(port_name)+"\n")
tn.read_until("Force10(conf-if-vl-"+str(vlan_id)+")#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10(conf)#",TIMEOUT)
tn.write("exit\n")
tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
return 0
def __deleteTaggedPortSNMP(self,port_name,vlan_ifindex):
port_ifindex = self.__getPortInternalID(port_name)
if (port_ifindex == -1):
log.debug("Can't get port ifindex for port name "+str(port_name))
return -1
if self.snmp.getErrorStatus():
log.debug( "unable to get tagged ports")
return -1
#log.debug(self.printPortMap(upl,False))
# get Static Egress Ports Map
epl = self.snmp.get(OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,))[0][1]
if self.snmp.getErrorStatus():
log.debug( "unable to get tagged ports" )
return -1
#log.debug( self.printPortMap(epl,True) )
epl = self.__setPortList(epl,port_ifindex,False)
snmp_list = []
snmp_list.append((OID.dot1qVlanStaticEgressPorts + (vlan_ifindex,) , epl))
self.snmp.complex_set(snmp_list)
if self.snmp.getErrorStatus():
log.debug("Problem with deleting port "+str(port_name)+" to vlan")
return -1
return 0
###########################################################################
# VLAN Information
###########################################################################
def getVlanList(self):
vlan_names = self.getVLANNames()
vlan_ids_table = self.getDot1qVlanFdbId()
self.clearVlans()
for vlan_id in vlan_ids_table:
vid = vlan_id[0][0][-1]
sv = SimpleVlan(vlan_names[vid],str(vlan_id[0][1]),str(vid),self.getNodeID())
self.addVlan(sv)
def refreshVlanInfo(self):
"""\brief Refreshes both the port list and vlan list for the switch
"""
untaggedPorts = self.getDot1qVlanStaticUntaggedPorts()
taggedPorts = self.getDot1qVlanStaticEgressPorts()
pvidPorts = self.getDot1qPvid()
self.resetPortsVlanInfo()
self.getVlanList()
# put untagged ports into list
for up in untaggedPorts:
vlan = self.getVlan(str(up[0][0][-1]))
plp = self.__simpleParsePortList(up[0][1])
vlan.setUntagged(plp)
for p in plp:
port = self.getPort(p)
port.setUntagged((vlan.getName(),vlan.getId()))
# put tagged ports into list
for tp in taggedPorts:
vlan = self.getVlan(str(tp[0][0][-1]))
plp = self.__simpleParsePortList(tp[0][1])
vlan.setTagged(plp)
for p in plp:
port = self.getPort(p)
a = port.getTagged()
a.append((vlan.getName(),vlan.getId()))
port.setTagged(a)
for pp in pvidPorts:
vlan = self.getVlan(str(pp[0][1]))
if (pp[0][0][-1] <= self.getNumberofPorts()):
pvid_list = vlan.getPvid()
pvid_list.append(pp[0][0][-1])
vlan.setPvid(pvid_list)
port = self.getPort(pp[0][0][-1])
#port.setPvid((pp[0][1],vlan_list[vid][0]))
port.setUntagged((vlan.getName(),vlan.getId()))
def getVLANNames(self):
"""\brief Gets the names of the vlans on the switch, returning a dictionary whose keys are the objects' internal ids and whose values are the actual vlan names (see \getSNMPResultTable for more info)
\return (\c dictionary) A dictionary with the names of the vlans
"""
vlans = {}
dot1qVlanStaticNameTable = self.getDot1qVlanStaticName()
for dot1qVlanStaticNameTableRow in dot1qVlanStaticNameTable:
#log.debug( dot1qVlanStaticNameTableRow[0][1] )
vlans[dot1qVlanStaticNameTableRow[0][0][len(dot1qVlanStaticNameTableRow[0][0])-1]] = dot1qVlanStaticNameTableRow[0][1]
# default vlan does not show up in linksys tables, hard code it here
#vlans['1'] = rfc1902.OctetString('Default VLAN')
#for i in vlans:
# log.debug( i,vlans[i] )
return vlans
def getFullVLANInfo(self, theVLANName=None):
"""\brief Returns a list of VLAN objects consisting of all of the vlans in the switch. If the theVLANName parameter is set, the function returns a single VLAN object corresponding to the requested vlan
\param theVLANName (\c string) The name of the VLAN to retrieve
\return (\c list of VLAN objects) A list of VLAN objects with the requested information or a VLAN object if a vlan name is specified
"""
vlans = []
portIfIndexTable = self.getDot1dBasePortIfIndex()
ifDescrTable = self.getIfDescr()
untaggedPorts = self.getDot1qVlanStaticUntaggedPorts()
taggedPorts = self.getDot1qVlanStaticEgressPorts()
pvidPorts = self.getDot1qPvid()
vlan_names = self.getVLANNames()
vlan_ids = self.getDot1qVlanFdbId()
#log.debug( untaggedPorts )
#log.debug( taggedPorts )
#log.debug( pvidPorts )
vlan_list = {} # key -> (name, untagged list, tagged list, pvid list)
for vid in vlan_names:
if not vlan_list.has_key(vid):
vlan_list[vid] = (None,[],[],[])
vlan_list[vid] = (vlan_names[vid],[],[],[])
# put untagged ports into list
for up in untaggedPorts:
pl = self.__parsePortList(up[0][1],False)
vid = up[0][0][len(up[0][0])-1]
if not vlan_list.has_key(vid):
vlan_list[vid] = (None,[],[],[])
vlan_list[vid] = (vlan_list[vid][0],pl, vlan_list[vid][2], vlan_list[vid][3])
# put tagged ports into list
for tp in taggedPorts:
pl = self.__parsePortList(tp[0][1],True)
vid = tp[0][0][len(tp[0][0])-1]
if not vlan_list.has_key(vid):
vlan_list[vid] = (None,[],[],[])
vlan_list[vid] = (vlan_list[vid][0],vlan_list[vid][1],pl, vlan_list[vid][3])
# put pvid ports into list
for pp in pvidPorts:
pl = self.__parsePortList(pp[0][1],False)
vid = pp[0][0][len(pp[0][0])-1]
if not vlan_list.has_key(vid):
vlan_list[vid] = (None,[],[],[])
vlan_list[vid] = (vlan_list[vid][0],vlan_list[vid][1],vlan_list[vid][2],pl)
for v in vlan_list:
#if v == 1107787777:
# continue
#log.debug( "vlan ",v," ",vlan_list[v][0] )
#log.debug( "\t untagged" )
for port in vlan_list[v][1]:
found = False
for portIfIndexTableRow in portIfIndexTable:
if portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1] == port.getInternalID():
#print portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1],port.getInternalID()
for ifDescrTableRow in ifDescrTable:
if ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1] == portIfIndexTableRow[0][1]:
port.setPortNumber(ifDescrTableRow[0][1])
port.setInternalID(portIfIndexTableRow[0][1])
found = True
break
if found : break
#log.debug( "\t\t",port, )
#log.debug( "\t tagged" )
for port in vlan_list[v][2]:
found = False
for portIfIndexTableRow in portIfIndexTable:
if portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1] == port.getInternalID():
for ifDescrTableRow in ifDescrTable:
if ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1] == portIfIndexTableRow[0][1]:
port.setPortNumber(ifDescrTableRow[0][1])
port.setInternalID(portIfIndexTableRow[0][1])
found = True
break
if found : break
#log.debug( "\t\t",port,)
#log.debug( "\t pvid")
for port in vlan_list[v][3]:
found = False
for portIfIndexTableRow in portIfIndexTable:
if portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1] == port.getInternalID():
for ifDescrTableRow in ifDescrTable:
if ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1] == portIfIndexTableRow[0][1]:
port.setPortNumber(ifDescrTableRow[0][1])
port.setInternalID(portIfIndexTableRow[0][1])
found = True
break
if found : break
#log.debug( "\t\t",port,)
# consider what to do with ports in both tagged and untagged state
for v in vlan_list:
switches = {}
tmp_vlan = VLAN(vlan_list[v][0])
tmp_vlan.setInternalID(v)
for vid in vlan_ids:
if vid[0][0][len(vid[0][0])-1] == v:
tmp_vlan.setID(vid[0][1])
tmp_vlan.setTaggedID(vid[0][1])
break
switches[self.getSwitchName()] = vlan_list[v][1] + vlan_list[v][2] + vlan_list[v][3]
tmp_vlan.setSwitches(switches)
vlans.append(tmp_vlan)
for vlan in vlans:
if (vlan.getName() == theVLANName):
return [vlan]
return vlans
###########################################################################
# Get Mac table
###########################################################################
def getFullMACTable(self,simple=False):
if simple:
return self.__getSimpleMACTable()
else:
return self.__getFullMACTable()
def __getFullMACTable(self):
"""\brief Gets the full learned mac table from the switch, returning a list of MACTableEntry objects.
\return (\c list) A list of MACTableEntry objects with the results.
"""
portsTable = self.getDot1qTpFdbPort()
learnedTypeTable = self.getDot1qTpFdbStatus()
portIfIndexTable = self.getDot1dBasePortIfIndex()
ifDescrTable = self.getPortNames()
# get mac table
result = []
for learnedTypeTableRow in learnedTypeTable:
for learnedname, learnedval in learnedTypeTableRow:
if learnedval == rfc1902.Integer32('3'):
learnedname = rfc1902.ObjectName((learnedname.prettyPrint()).replace(OID.dot1qTpFdbStatus.prettyPrint(),OID.dot1qTpFdbPort.prettyPrint()))
for portTableRow in portsTable:
for portname, portval in portTableRow:
if learnedname == portname:
result.append(MACTableEntry(("%02x:%02x:%02x:%02x:%02x:%02x" % (int(learnedname[14]),int(learnedname[15]),int(learnedname[16]),int(learnedname[17]),int(learnedname[18]),int(learnedname[19]))).replace("0x","").upper(),portval,'3',self.getSwitchName()))
# Translate ids to names
for port in result:
#log.debug( port.getPort())
for portIfIndexTableRow in portIfIndexTable:
if portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1] == port.getPort():
for ifDescrTableRow in ifDescrTable:
if ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1] == portIfIndexTableRow[0][1]:
port.setPort(ifDescrTableRow[0][1])
return result
def addMacsToPorts(self):
"""\brief Gets the full learned mac table from the switch, returning a list of MACTableEntry objects.
\return (\c list) A list of MACTableEntry objects with the results.
"""
portsTable = self.getDot1qTpFdbPort()
learnedTypeTable = self.getDot1qTpFdbStatus()
portIfIndexTable = self.getDot1dBasePortIfIndex()
ifDescrTable = self.getPortNames()
self.resetPortsMacInfo()
# get mac table
result = []
for learnedTypeTableRow in learnedTypeTable:
for learnedname, learnedval in learnedTypeTableRow:
if learnedval == rfc1902.Integer32('3'):
for portTableRow in portsTable:
for portname, portval in portTableRow:
if learnedname[-6:] == portname[-6:]:
#print portname, portval#', rubbish
mac = ("%02x:%02x:%02x:%02x:%02x:%02x" %
(int(learnedname[14]),
int(learnedname[15]),
int(learnedname[16]),
int(learnedname[17]),
int(learnedname[18]),
int(learnedname[19])))
mac = mac.replace("0x","").upper()
port = self.getPort(self.portIfIndexMap[portval])
mac_list = port.getMacs()
mac_list.append(mac)
port.setMacs(mac_list)
def __getSimpleMACTable(self):
"""\brief Gets the full learned mac table from the switch, returning a list of MACTableEntry objects.
\return (\c list) A list of MACTableEntry objects with the results.
"""
portsTable = self.getDot1qTpFdbPort()
learnedTypeTable = self.getDot1qTpFdbStatus()
portIfIndexTable = self.getDot1dBasePortIfIndex()
ifDescrTable = self.getPortNames()
# get mac table
result = []
for learnedTypeTableRow in learnedTypeTable:
for learnedname, learnedval in learnedTypeTableRow:
if learnedval == rfc1902.Integer32('3'):
learnedname = rfc1902.ObjectName((learnedname.prettyPrint()).replace(OID.dot1qTpFdbStatus.prettyPrint(),OID.dot1qTpFdbPort.prettyPrint()))
for portTableRow in portsTable:
for portname, portval in portTableRow:
if learnedname == portname:
mac = ("%02x:%02x:%02x:%02x:%02x:%02x" %
(int(learnedname[14]),
int(learnedname[15]),
int(learnedname[16]),
int(learnedname[17]),
int(learnedname[18]),
int(learnedname[19])))
mac = mac.replace("0x","").upper()
result.append((mac,portval))
# Translate ids to names
for i in range(0,len(result)):
#log.debug( port.getPort())
for portIfIndexTableRow in portIfIndexTable:
if portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1] == result[i][1]:
for ifDescrTableRow in ifDescrTable:
if ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1] == portIfIndexTableRow[0][1]:
result[i] = (result[i][0],str(ifDescrTableRow[0][1]))
return result
###########################################################################
# Index and Name Manipulation
###########################################################################
def __vlanExistsForName(self, vlan_name):
log.debug("Getting vlan id from name "+str(vlan_name))
vlanNames = self.getVLANNames()
if vlan_name != None:
for name in vlanNames.values():
if (str(name) == str(vlan_name)):
return True
return False
def __getIfindexForVLANID(self, vlan_id):
dot1qVlanFdbIdTable = self.getDot1qVlanFdbId()
if self.snmp.getErrorStatus():
log.debug("Error getting ifindex for vlan id (a)"+str(vlan_id))
return -1
for dot1qVlanFdbIdRow in dot1qVlanFdbIdTable:
#print ">",str(dot1qVlanFdbIdRow[0][1]),str(vlan_id)
if (int(dot1qVlanFdbIdRow[0][1]) == int(vlan_id)):
#print " > FOUND ",str(dot1qVlanFdbIdRow[0][0][len(dot1qVlanFdbIdRow[0][0])-1])
return int(dot1qVlanFdbIdRow[0][0][len(dot1qVlanFdbIdRow[0][0])-1])
return -1
def __getVlanIfIndexFromName(self, vlan_name):
dot1qVlanStaticNameTable = self.getDot1qVlanStaticName()
for dot1qVlanStaticNameRow in dot1qVlanStaticNameTable:
#print ">>> ",str(dot1qVlanStaticNameRow[0][1]),str(vlan_name)
if (str(dot1qVlanStaticNameRow[0][1]) == str(vlan_name)):
#print ">>>> found ",str(dot1qVlanStaticNameRow[0][0][len(dot1qVlanStaticNameRow[0][0])-1])
return dot1qVlanStaticNameRow[0][0][len(dot1qVlanStaticNameRow[0][0])-1]
return -1
def __getVlanIdFromIfIndex(self,ifindex):
dot1qVlanFdbIdTable = self.getDot1qVlanFdbId()
for dot1qVlanFdbIdRow in dot1qVlanFdbIdTable :
if (str(dot1qVlanFdbIdRow[0][0][len(dot1qVlanFdbIdRow[0][0])-1]) == str(ifindex)):
return int(dot1qVlanFdbIdRow[0][1])
return -1
def __getPortInternalID(self, port):
"""\brief Given a port's name, returns its internal id number. This overrides the definition found
in the Switch super class
\param port (\c string) The name of the port
\return (\c int) The port's internal id number
"""
ifDescrTable = self.getIfDescr()
portIfIndexTable = self.getDot1dBasePortIfIndex()
for ifDescrTableRow in ifDescrTable:
if (str(ifDescrTableRow[0][1]) == str(port)):
ifindex=ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1]
for portIfIndexTableRow in portIfIndexTable:
if (str(ifindex) == str(portIfIndexTableRow[0][1])):
return int(portIfIndexTableRow[0][0][len(portIfIndexTableRow[0][0])-1])
return -1
def __getVLANInternalID(self, vlanName):
"""\brief Retrieves a vlan's internal id given its name. The function returns
the following codes:
-1: if the vlan does not exist on the switch
0: if successful
\param vlanName (\c string) The name of the vlan
\return (\c string) The internal id of the vlan if found, negative otherwise
"""
## Optimisation could be done here to ensure lookups are faster.
## Use a dictionary of id to name.
internalID = None
# no default vlan in the snmp tables for the linksys
if (vlanName == "Default VLAN"):
return "1"
dot1qVlanStaticNameTable = self.getDot1qVlanStaticName()
for dot1qVlanStaticNameTableRow in dot1qVlanStaticNameTable:
if dot1qVlanStaticNameTableRow[0][1] == rfc1902.OctetString(vlanName):
internalID = dot1qVlanStaticNameTableRow[0][0][len(dot1qVlanStaticNameTableRow[0][0])-1]
if (internalID == None):
return -1
return internalID
###########################################################################
# Manipuate Port list
###########################################################################
def __setPortList(self,pl,internal_port,enable):
#log.debug("internal_port "+str(internal_port)+" enabled "+str(enable))
#log.debug("start "+str(self.printPortMap(pl,enable)) )
raw_ports = array('B')
for i in range(0,len(pl)):
raw_ports.extend(unpack('B',pl[i]))
port_number = 0
for i in range(0,len(raw_ports)):
for slot in range(0,8):
port_number = port_number + 1
if (port_number == int(internal_port)):
if enable:
#log.debug("setting port "+str(i)+" " +str(slot)+" to on")
raw_ports[i] = (raw_ports[i] | Switch.mask[slot])
else:
#log.debug("setting port "+str(i)+" " +str(slot)+" to off")
raw_ports[i] = (raw_ports[i] & Switch.inv_mask[slot])
s = ""
for i in range(0,len(raw_ports)):
s = s + pack('B',raw_ports[i])
#log.debug( "end "+str(self.printPortMap(s,enable)) )
return rfc1902.OctetString(str(s))
def __parsePortList(self,pl,tagged):
ports = []
mask = [128,64,32,16,8,4,2,1,0]
port_number = 0
for i in range(0,len(pl)):
port = (unpack('B',pl[i]))[0]
if port != 0:
if (port & Switch.mask[0] == Switch.mask[0]):
ports.append(Port(port_number+1,tagged,port_number+1))
if (port & Switch.mask[1] == Switch.mask[1]):
ports.append(Port(port_number+2,tagged,port_number+2))
if (port & Switch.mask[2] == Switch.mask[2]):
ports.append(Port(port_number+3,tagged,port_number+3))
if (port & Switch.mask[3] == Switch.mask[3]):
ports.append(Port(port_number+4,tagged,port_number+4))
if (port & Switch.mask[4] == Switch.mask[4]):
ports.append(Port(port_number+5,tagged,port_number+5))
if (port & Switch.mask[5] == Switch.mask[5]):
ports.append(Port(port_number+6,tagged,port_number+6))
if (port & Switch.mask[6] == Switch.mask[6]):
ports.append(Port(port_number+7,tagged,port_number+7))
if (port & Switch.mask[7] == Switch.mask[7]):
ports.append(Port(port_number+8,tagged,port_number+8))
port_number = port_number + 8
return ports
def __simpleParsePortList(self,pl):
ports = []
mask = [128,64,32,16,8,4,2,1,0]
port_number = 0
for i in range(0,len(pl)):
port = (unpack('B',pl[i]))[0]
if port != 0:
if (port & Switch.mask[0] == Switch.mask[0]):
ports.append(self.portIfIndexMap[port_number+1])
if (port & Switch.mask[1] == Switch.mask[1]):
ports.append(self.portIfIndexMap[port_number+2])
if (port & Switch.mask[2] == Switch.mask[2]):
ports.append(self.portIfIndexMap[port_number+3])
if (port & Switch.mask[3] == Switch.mask[3]):
ports.append(self.portIfIndexMap[port_number+4])
if (port & Switch.mask[4] == Switch.mask[4]):
ports.append(self.portIfIndexMap[port_number+5])
if (port & Switch.mask[5] == Switch.mask[5]):
ports.append(self.portIfIndexMap[port_number+6])
if (port & Switch.mask[6] == Switch.mask[6]):
ports.append(self.portIfIndexMap[port_number+7])
if (port & Switch.mask[7] == Switch.mask[7]):
ports.append(self.portIfIndexMap[port_number+8])
port_number = port_number + 8
return ports
def __slowparsePortList(self,pl,tagged):
ports = []
raw_ports = array('B')
for i in range(0,len(pl)):
ports = []
raw_ports.extend(unpack('B',pl[i]))
mask = [128,64,32,16,8,4,2,1,0]
port_number = 0
for port in raw_ports:
if port != 0:
for slot in range(0,8):
port_number = port_number + 1
if (port & Switch.mask[slot] == Switch.mask[slot]):
ports.append(Port(port_number,tagged,port_number))
else:
port_number = port_number + 8
return ports
###########################################################################
# Monitoring
###########################################################################
def getPortsPacketCount(self, ports):
"""\brief Returns the packet counts for the given ports
\param ports (\c list of string) The names of the ports
\return (\c list of tuples of string, int) A list of tuples, each consisting of the port's name and its
its packet count in bytes
"""
ifDescrTable = self.getIfDescr()
ifPortCountTable = self.snmp.walk(OID.ifInOctects)
results = []
for port in ports:
for ifDescrTableRow in ifDescrTable:
if (ifDescrTableRow[0][1] == port.getPortNumber()):
for ifPortCountTableRow in ifPortCountTable:
if (ifDescrTableRow[0][0][10] == ifPortCountTableRow[0][0][10]):
results.append((port.getPortNumber(), int(ifPortCountTableRow[0][1])))
return results
def getPortsPacketCountByInternalID(self, portInternalIDs):
"""\brief Returns the packet counts given the ports' internal id numbers
\param ports (\c list of int) The ports' internal id numbers
\return (\c list of tuples of int, int) A list of tuples, each consisting of the port's id and its
packet count in bytes
"""
results = []
for portInternalID in portInternalIDs:
inPacketCount = int(self.snmp.get(OID.ifInUcastPkts + (portInternalID,))[0][1])
outPacketCount = int(self.snmp.get(OID.ifOutUcastPkts + (portInternalID,))[0][1])
results.append((portInternalID, inPacketCount, outPacketCount))
return results
def getPortsSpeedByInternalID(self, portInternalIDs):
"""\brief Returns the port speeds given the ports' internal id numbers
\param ports (\c list of int) The ports' internal id numbers
\return (\c list of tuples of int, int) A list of tuples, each consisting of the port's id and its
speed in bytes per second
"""
results = []
for portInternalID in portInternalIDs:
result = self.snmp.get(OID.ifSpeed + (portInternalID,))
results.append((portInternalID, int(result[0][1])))
return results
def getPortStatus(self, portNumber):
"""\brief Gets the operational status of a port: up (1), down (2)
\param portNumber (\c string) The port whose status is to be retrieved
\return (\c string) The port status
"""
ifDescrTable = self.getIfDescr()
for ifDescrTableRow in ifDescrTable:
if (str(ifDescrTableRow[0][1]) == str(portNumber)):
ifindex=ifDescrTableRow[0][0][len(ifDescrTableRow[0][0])-1]
return self.snmp.get(OID.ifOperStatus+(ifindex,))[0][1]
return "unknown"
def getPortTdr(self, port_name):
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
#tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("tdr-cable-test "+str(port_name)+"\n")
tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
time.sleep(10)
tn = telnetlib.Telnet(self.getIPAddress())
user = "admin"
password = "admin"
TIMEOUT=2
#tn.set_debuglevel(1)
tn.read_until("Login: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ",TIMEOUT)
tn.write(password + "\n")
tn.write("show tdr "+str(port_name)+"\n\n")
(code,obj,string_return) = tn.expect(["Error","Time"],TIMEOUT)
output = tn.read_until("Force10#",TIMEOUT)
tn.write("exit\n")
s = "TDR result :"
if code != 1:
s = "\nError running TDR test"
if code == 1:
for i in output.split('\n'):
if i.find("Pair") != -1:
s = s + "\n" + i.lstrip()
# 4 OK's and 4 Terminated means success
if s.count("OK") == 4 and s.count("Terminated") == 4:
return (True,s)
return (False,s)
###########################################################################
# Other
##############y#############################################################
def getPortInternalID(self, port):
return self.__getPortInternalID(port)
class Force10E1200Switch(Force10Switch):
"""\brief Sub-subclass used to support Force10 E1200 switches. This class contains information
specific to this model of switch (number of ports, etc)
"""
functions = ["switch"]
SENSOR_DESCRIPTIONS = {'temperature':{ \
'fantray0':75.000, \
'fantray1':75.000, \
'card0':75.000, \
'card1':75.000, \
'card2':75.000, \
'card3':75.000, \
'card4':75.000, \
'card5':75.000, \
'card6':75.000, \
'card7':75.000, \
'card8':75.000, \
'card9':75.000, \
'card10':75.000, \
'card11':75.000, \
'card12':75.000, \
'card13':75.000, \
'card14':75.000 \
}}
def __init__(self, switchNode):
"""\brief Initializes the class
\param switchNode (\c SwitchNode) The SwitchNode object to obtain information from to initialize the class with
"""
Force10Switch.__init__(self, switchNode, 2, 4094, 10000, 100000)
portIfIndexTable = self.getDot1dBasePortIfIndex()
self.portIfIndexMap = {}
self.portIfIndexMapRev = {}
for portIfIndexTableRow in portIfIndexTable:
self.portIfIndexMap[portIfIndexTableRow[0][0][-1]] = portIfIndexTableRow[0][1]
self.portIfIndexMapRev[portIfIndexTableRow[0][1]] = portIfIndexTableRow[0][0][-1]
def getSensorDescriptions(self):
"""\brief Returns the dictionary of sensorname:critical-value pairs.
"""
return self.SENSOR_DESCRIPTIONS
def getSensorReadings(self):
"""\brief Returns a dictionary of the form:
{sensorclass:{sensorname:reading}}.
The reading will either be a numeric value (no units of measurements are
given in the value) or -1 for sensors that could not be read.
"""
sensorResults = self.getEmptySensorDictionary()
temperatureTable = self.snmp.walk(OID.force10chSysCardUpperTemp)
if not self.snmp.getErrorStatus():
for temperatureTableRow in temperatureTable:
phySlotNum = temperatureTableRow[0][0][\
len(temperatureTableRow[0][0])-1]
phySlotTemp = temperatureTableRow[0][1]
sensorName = ""
if (phySlotNum == 8):
sensorName = "fantray0"
elif (phySlotNum == 9):
sensorName = "fantray1"
elif (phySlotNum < 8):
sensorName = "card"+str(phySlotNum -1)
elif (phySlotNum > 9):
sensorName = "card"+str(phySlotNum -3)
sensorClass = self.getSensorClassFromName( \
self.SENSOR_DESCRIPTIONS, sensorName)
if sensorClass:
(sensorResults[sensorClass])[sensorName] = int(phySlotTemp)
return sensorResults
def getSerialNumber(self):
serial = self.snmp.get(OID.force10chSerialNumber)[0][1]
if self.snmp.getErrorStatus():
log.debug( "error getting serial number" )
return "unknown"
return serial
def getNumberofPorts(self):
raise Exception("Should calculate this")
return 480
| [
"ben@darkworks.net"
] | ben@darkworks.net |
5a20a19b367e40fc5885c801478fb301d77671ae | 86e6975cf041756b613da2521026862825ddd6c8 | /hyperglass/log.py | 8791a2e44eb87a92c44790d308e74c1b2d832132 | [
"BSD-3-Clause-Clear"
] | permissive | GeeZeeS/hyperglass | e2f6a7c92421c353574f0d238a3cba4be81596cd | 90c179f46ecc58562dbcd9ec6d761075a8699f79 | refs/heads/main | 2023-07-14T10:48:24.273421 | 2021-08-20T11:18:44 | 2021-08-20T11:18:44 | 397,539,831 | 0 | 0 | BSD-3-Clause-Clear | 2021-08-18T09:11:57 | 2021-08-18T09:11:57 | null | UTF-8 | Python | false | false | 3,536 | py | """Logging instance setup & configuration."""
# Standard Library
import os
import sys
import logging
from datetime import datetime
# Third Party
from loguru import logger as _loguru_logger
_FMT = (
"<lvl><b>[{level}]</b> {time:YYYYMMDD} {time:HH:mm:ss} <lw>|</lw> {name}<lw>:</lw>"
"<b>{line}</b> <lw>|</lw> {function}</lvl> <lvl><b>→</b></lvl> {message}"
)
_DATE_FMT = "%Y%m%d %H:%M:%S"
_FMT_BASIC = "{message}"
_LOG_LEVELS = [
{"name": "TRACE", "color": "<m>"},
{"name": "DEBUG", "color": "<c>"},
{"name": "INFO", "color": "<le>"},
{"name": "SUCCESS", "color": "<g>"},
{"name": "WARNING", "color": "<y>"},
{"name": "ERROR", "color": "<y>"},
{"name": "CRITICAL", "color": "<r>"},
]
def setup_lib_logging() -> None:
"""Override the logging handlers for dependency libraries."""
for name in (
"gunicorn",
"gunicorn.access",
"gunicorn.error",
"uvicorn",
"uvicorn.access",
"uvicorn.error",
"uvicorn.asgi",
"netmiko",
"scrapli",
"httpx",
):
_loguru_logger.bind(logger_name=name)
def base_logger(level: str = "INFO"):
"""Initialize hyperglass logging instance."""
_loguru_logger.remove()
_loguru_logger.add(sys.stdout, format=_FMT, level=level, enqueue=True)
_loguru_logger.configure(levels=_LOG_LEVELS)
return _loguru_logger
log = base_logger()
logging.addLevelName(25, "SUCCESS")
def _log_success(self, message, *a, **kw):
"""Add custom builtin logging handler for the success level."""
if self.isEnabledFor(25):
self._log(25, message, a, **kw)
logging.Logger.success = _log_success
def set_log_level(logger, debug):
"""Set log level based on debug state."""
if debug:
os.environ["HYPERGLASS_LOG_LEVEL"] = "DEBUG"
base_logger("DEBUG")
if debug:
logger.debug("Debugging enabled")
return True
def enable_file_logging(logger, log_directory, log_format, log_max_size):
"""Set up file-based logging from configuration parameters."""
if log_format == "json":
log_file_name = "hyperglass.log.json"
structured = True
else:
log_file_name = "hyperglass.log"
structured = False
log_file = log_directory / log_file_name
if log_format == "text":
now_str = "hyperglass logs for " + datetime.utcnow().strftime(
"%B %d, %Y beginning at %H:%M:%S UTC"
)
now_str_y = len(now_str) + 6
now_str_x = len(now_str) + 4
log_break = (
"#" * now_str_y,
"\n#" + " " * now_str_x + "#\n",
"# ",
now_str,
" #",
"\n#" + " " * now_str_x + "#\n",
"#" * now_str_y,
)
with log_file.open("a+") as lf:
lf.write(f'\n\n{"".join(log_break)}\n\n')
logger.add(
log_file,
format=_FMT,
rotation=log_max_size,
serialize=structured,
enqueue=True,
)
logger.debug("Logging to {} enabled", str(log_file))
return True
def enable_syslog_logging(logger, syslog_host, syslog_port):
"""Set up syslog logging from configuration parameters."""
# Standard Library
from logging.handlers import SysLogHandler
logger.add(
SysLogHandler(address=(str(syslog_host), syslog_port)),
format=_FMT_BASIC,
enqueue=True,
)
logger.debug(
"Logging to syslog target {}:{} enabled", str(syslog_host), str(syslog_port),
)
return True
| [
"matt@allroads.io"
] | matt@allroads.io |
955e9db5dcd9fd12689827e63be265a21501d1e9 | 614c4ee8f256dcc0d2e4b489fe9d89e816692695 | /L_32_processing_each_image.py | dda98de7d4a54bf95b7d102c9b8d89be0aa4897c | [] | no_license | SherylHohman/CarND-Advanced-Lane_Lines-Exercises | 861bbbb52eea76427759a5766212bf86d2182f8a | 274e9396cecc3f8878227419d35d4ac3471bf6af | refs/heads/master | 2021-06-19T07:22:27.828587 | 2017-06-11T21:04:01 | 2017-06-11T21:04:01 | 92,852,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,703 | py |
# coding: utf-8
# # L_32 Processing Each Image
# #### Steps 1 - 3:
# In[1]:
get_ipython().magic('matplotlib inline')
# In[2]:
# Some Helper Functions
def create_folder_if_not_exists(folder_name):
import os
# May need to update this function if not running within Jupyter notebook, or running in different OS
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
# ## 1 - Camera Calibration matrix and Distortion Coefficients
# ###### (See L_11 for more details)
# Determine camera calibration matrix and distortion coefficients for the camera lens.
# Done Once.
# This will be used to undistort each video image frame, before passing it on.
# ### Part 1-1: objpoints & imgpoints
# - Library Imports
#
#
# - Prepare arrays to store imgpoints (for img), and gridpattern (objpoints) (of chessboard grid pattern)
# - Use **mgrid** to generate numpy array representing **objpoints**, each corner of the chessboard
# .
#
# - Read in Image,
# - Turn in into grayscale
# .
#
# - Use **findChessBoardCorners** to obtain **imgpoints** (measurements) for the (objpoint locations) in the image
# - (takes in grayscale image)
# - (returns imgpoints)
# - Store imgpoints and corresponding objpoints
# #### Pickle the objpoints and imgpoints for each image
# In[3]:
# Here are the steps to pickle the objpoints and imgpoints for each image
import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
# Chessboard has 8 inner corners across
# 6 inner corners vertical
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('calibration_wide/GO*.jpg')
# Get and Store chessboard corners for each image as it's Read in
for idx, fname in enumerate(images):
# read in image
img = cv2.imread(fname)
# turn it into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners for the image
ret, corners = cv2.findChessboardCorners(gray, (8,6), None)
# If (corners) found, add object points, image points to array of all obj ang img points
if ret == True:
# same set of (grid) values for each image -- it's the chessboard pattern
objpoints.append(objp)
# unique set of values for each image processed (..from findChessboardCorners)
imgpoints.append(corners)
# Draw and display the corners (not required)
cv2.drawChessboardCorners(img, (8,6), corners, ret)
cv2.imshow('img', img)
# cv2.waitKey(30)
cv2.destroyAllWindows()
# save objpoints and imgpoints to pickle file
objpoints_pickle = {}
objpoints_pickle["objpoints"] = objpoints
objpoints_pickle["imgpoints"] = imgpoints
folder_name = "l32/"
file_name = "wide_objpoints_pickle.p"
create_folder_if_not_exists(folder_name)
with open( folder_name+file_name, "wb" ) as f:
pickle.dump( objpoints_pickle, f )
print("Imgpoints and Objpoints have been saved to", folder_name+file_name)
# #### Read objpoints and imgpoints from pickle file
# In[4]:
# import pickle
# Read objpoints and imgpoints from pickle file
pickled_path = "l32/wide_objpoints_pickle.p"
objpoints_pickle = pickle.load( open( pickled_path, "rb" ) )
objpoints = objpoints_pickle["objpoints"]
imgpoints = objpoints_pickle["imgpoints"]
print("Imgpoints and Objpoints have been read in from " + pickled_path)
# #### Part 1-2: Callibrate and Undistort Images
# Calibrate the image (from objpoints and imgpoints - found from findChessboardCorners)
# - **cv2.calibrateCamera**
#
# Undistort the image
# - **cv2.undistort**
#
# In[5]:
import cv2
import pickle
# takes in: an image, chessboard grid object points, image's image points (as meassured by findChessboardCorners)
# - performs the camera calibration (mathematically correlate imgpoints to objpoints),
# - undistorts the image (image distortion correction)
# returns: the undistorted image
def cal_undistort(img, objpoints, imgpoints):
# Uses cv2.calibrateCamera() and cv2.undistort()
# throw away (color depth dim==1) portion of image shape (shape = gray_img.shape[::-1] == gray_img.shape[0:2])
shape = img.shape[0:2]
# Calibrate imgpoints to the objpoints
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, shape, None, None)
# save mtx and dist to pickle file
dist_mtx_pickle = {}
dist_mtx_pickle["dist"] = dist
dist_mtx_pickle["mtx"] = mtx
folder_name = "l32/"
file_name = "wide_dist_mtx_pickle.p"
create_folder_if_not_exists(folder_name)
with open( folder_name+file_name, "wb" ) as f:
pickle.dump( dist_mtx_pickle, f )
print("\n calibration 'dist' and 'mtx' have been saved to", folder_name+file_name, "\n")
# Undistort the image
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
# #### Part 1-3: Display Images: Before (distorted) and After (Undistorted)
#
# Read an image (whose imgpoints are stored in imgpoints)
# Calibrate the image (uses imgpoints and objpoints prev found for that image)
# Undistort the image
#
# .
# Display Distorted and Undistorted image
#
# .
# This *could* be re-written as a loop, to see before and after for all images that have been processed
#
# .
# Currently, it displays a before and after only for a single image (==first image that was stored in imgpoints array)
#
# In[6]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
get_ipython().magic('matplotlib inline')
# Could loop through all images to show before and after.
# Here, we only display before and after for a single image..
# Read in an Example image == First Image in Pickled objpoints, imgpoints
img = cv2.imread('calibration_wide/test_image.jpg')
# calibrate and undistort the image
undistorted = cal_undistort(img, objpoints, imgpoints)
# display an Example undistorted image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Before
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
# After
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
# Show the images
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# In[ ]:
# ## 2 - Thresholding
# ###### (See L_21, L_22, L_24, L_29 for more details)
# In[7]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def plot_results(orig_image, bw_result, color_result):
# how many images across and how many image rows
nrows = 3 #1
ncols = 1 #2
# figsize must be recalculated when change nrows and ncols
figsize = (12, 18) #1x2: (24,9)
# fonstsize is dependant on size of subplot
fontsize = 10 #40
# f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(24, 9))
f, (ax1, ax2, ax3) = plt.subplots(nrows, ncols, figsize=figsize)
f.tight_layout()
ax1.imshow(color_result)
ax1.set_title('BLUE: S-channel | GREEN: Sobel x-dir | CYAN: both', fontsize=fontsize)
ax1.axis('off')
ax2.imshow(bw_result, cmap='gray')
ax2.set_title('S_thresh:'+str(s_thresh)+' | sobelx_thresh: '+str(sx_thresh), fontsize=fontsize)
ax2.axis('off')
ax3.imshow(orig_image)
ax3.set_title('Original Image', fontsize=fontsize)
ax3.axis('off')
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
def save_results(bw_result, color_result):
import os
# Save Images to File so can compare results of various threshold values
settings = '_' + 's-thresh-' +str(s_thresh[0]) +'-'+str(s_thresh[1]) + '_' + 'sobel-x-thresh-'+str(sx_thresh[0])+'-'+str(sx_thresh[1])
folder_name = './l32-my-outputs-from-color_and_gradient/'
create_folder_if_not_exists(folder_name)
bw_binary_fullpath = folder_name+'l32_bw_binary' +settings+'.png'
color_binary_fullpath = folder_name+'l32_color_binary'+settings+'.png'
# convert BW to 3 channels for saving
bw_3channel = np.zeros_like(color_result)
bw_3channel = np.dstack((bw_result, bw_result, bw_result))
# COULD DO: only resave files if they don't already exist!
# save files (use mpimg, since color_result is in RGB order)
mpimg.imsave(bw_binary_fullpath, bw_3channel)
mpimg.imsave(color_binary_fullpath, color_result)
print('Binary Thresholds images saved as: \n' + bw_binary_fullpath + '\n' + color_binary_fullpath)
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
# def pipeline(orig_image, s_thresh=(170, 255), sx_thresh=(20, 100)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
## Sobel x
# Take the derivative in x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0)
# Absolute x derivative to accentuate lines away from horizontal
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
# Note color_binary[:, :, 0] is all 0s, effectively an all black image.
#color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
empty_channel = np.zeros_like(s_binary)
# colorful green and blue stacked image (red channel is black or all zeros)
color_binary = np.dstack(( empty_channel, sxbinary, s_binary))
# BW binary combination of sxbinary and s_binary (not using)
bw_binary = np.zeros_like(s_binary)
bw_binary[ (sxbinary == 1) | (s_binary == 1)] = 1
#return combined_binary
return color_binary, bw_binary
# image = mpimg.imread('bridge_shadow.jpg')
image = mpimg.imread('l32-color-shadow-example.jpg')
# SET THRESHOLD VALUES HERE
s_thresh = (180, 255) #Blue #(140, 255) # (90, 255) were good values from L_29
sx_thresh = ( 50, 150) #Green
# calculate binary images, based on threshold values above
color_result, bw_result = pipeline(image, s_thresh)
# Plot the results
plot_results(image, bw_result, color_result)
# Save Images to File so can compare results of various threshold values
save_results(bw_result, color_result)
# In[ ]:
# ## 3 - Perspective Transform
# ###### (See L_17 for more details)
# In[8]:
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays calculated above from cv2.calibrateCamera()
dist_pickle = pickle.load( open( "l32/wide_dist_mtx_pickle.p", "rb" ) )
dist = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
# Read in an image
img = cv2.imread('test_image2.png')
# store orig image's pixel dimensions (for use in warpPerspective)
height = img.shape[0]
width = img.shape[1]
image_size = (width, height)
# Chessboard grid "dimensions"
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
def corners_unwarp(img, nx, ny, mtx, dist):
# 1) Undistort using mtx and dist
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
# 2) Convert to grayscale
gray_undistorted = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)
# 3) Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray_undistorted, (nx,ny), None)
#print(corners.shape, '\n', corners)
# 4) If corners found:
if ret == True:
# a) draw corners
# create undistorted image, with corners drawn on it
undistorted_with_corners = cv2.drawChessboardCorners(undistorted, (nx,ny), corners, ret)
# Doesn't Work Online (either method that I tried: 1st is error, 2nd is invisible
# # plot undistorted_with_corners image:
# # cv2.imshow('undistorted_with_corners', undistorted_with_corners)
# f1, (ax1) = plt.subplots(1, 1, figsize=(24, 9))
# f1.tight_layout()
# ax1.imshow(undistorted_with_corners)
# ax1.set_title('Undistorted With Corners Drawn', fontsize=50)
# #plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# cv2.waitKey(1000)
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# I'll use grid coordinates [0,0], [0, nx-1], [ny-1, nx], [nx,ny]
# src = np.float32([corners[0][0], corners[7][0],
# corners[40][0], corners[47][0]])
# ie ordered as: TL,TR,BL,BR
# print(corners[0][0], corners[nx-1][0])
# print(corners[nx*(ny-1)][0], corners[nx*ny-1][0])
print(corners.shape)
print('4 coordinates chosen from undistorted image:')
src = np.float32([corners[nx*(0)][0], corners[(nx*1) -1][0],
corners[nx*(ny-1)][0], corners[(nx*ny)-1][0]])
print(src, '\n')
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# width and height of final image (pixels) == undistorted size = img size
width, height = (undistorted.shape[1], undistorted.shape[0])
print(width, height)
# width and height of squares in unwarped image (pixels)
grid_width_x = width/(nx+1) # nx lines ==> nx+1 equally spaced squares
grid_height_y = height/(ny+1)
print(grid_width_x, grid_height_y, 'grid width, height in undistorted, transformed image')
margin = 0 #15 #px
dst = np.float32([[ 0+margin+grid_width_x, 0+margin+grid_height_y],
[width-margin-grid_width_x, 0+margin+grid_height_y],
[ 0+margin+grid_width_x, height-margin-grid_height_y],
[width-margin-grid_width_x, height-margin-grid_height_y]])
print('4 corresponding coordinates on unWarped image:')
print(dst, '\n')
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# e) use cv2.warpPerspective() to warp your image to a top-down view
image_size = (undistorted.shape[1], undistorted.shape[0])
print(image_size)
# warped = cv2.warpPerspective(undistorted, M, image_size, flags=cv2.INTER_LINEAR)
warped = cv2.warpPerspective(undistorted_with_corners, M, image_size) #default: flags=cv2.INTER_LINEAR)
else:
print ("corners not found, could not perform warpPerspective on undistorted img")
print(ret, corners)
print(nx, ny, undistorted.shape)
# send back the grayscale undistorted image. This will show in odd colors, standing out !
warped = gray_undistorted #undistorted
M = 0
return warped, M, undistorted_with_corners #gray_undistorted, #undistorted
# Get Unwarped Image
top_down, perspective_M, undist = corners_unwarp(img, nx, ny, mtx, dist)
# Plot Images:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
# Original
ax1.imshow(img)
ax1.set_title('Original', fontsize=50)
# Undistorted
ax2.imshow(undist)
ax2.set_title('Undistort', fontsize=50)
# Perspective Warp Correction on Undistorted
ax3.imshow(top_down)
ax3.set_title('Perspective Warp', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# In[ ]:
# ## L_32 Notes and Instructions
# ### 2 - Thresholding Notes
# #### Thresholded Image Should look something like :
#
# <img src= "l32-binary-combo-img-example-result.jpg" width=700 />
#
# In[ ]:
# ### 3 - Perspective Transform Notes
#
# Next, you want to identify four source points for your perspective transform.
# In this case, you **can assume the road is a flat plane**.
# This isn't strictly true, but it can serve as an approximation for this project.
# You would like to pick four points in a trapezoidal shape (similar to region masking)
# that would represent a rectangle when looking down on the road from above.
#
# The easiest way to do this is to:
# - investigate an image where the **lane lines are straight**, and
# - find **four points** lying along the lines that, after perspective transform, would make
# the lines look **straight and vertical** from a bird's eye view perspective.
#
# Here's an example of the result you are going for with straight lane lines:
#
# <img src="l32-warped-straight-lines.jpg" />
# In[ ]:
| [
"SherylHohman@users.noreply.github.com"
] | SherylHohman@users.noreply.github.com |
2bf74226a17840f642af1c200eaf75b60fee3328 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cuckold.py | 02ae87e466b124319f1f538d3d0c656319e5c7f3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py |
#calss header
class _CUCKOLD():
def __init__(self,):
self.name = "CUCKOLD"
self.definitions = [u'a man whose wife deceives him by having a sexual relationship with another man']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7caed470f07428ee6ecc9f422d867e1662fe2a59 | d58a90ea4c2ff3af32eeea40db95b69d8b0955de | /extensions.py | 0c18fbab78fc76e8feb94462569b16d6c6742182 | [] | no_license | marsalan06/flask_restful_api_tutorial | 626bb2434201e292bbc5b9c9dcc7a3be6377a797 | 4106ec5d0f4bfb19717fe162e486a2236529107c | refs/heads/main | 2023-04-05T17:06:04.951563 | 2021-04-21T23:52:08 | 2021-04-21T23:52:08 | 360,338,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from db_config import VideoModel | [
"arsalan.9798@gmail.com"
] | arsalan.9798@gmail.com |
470a2ba41670de0bcaf3f71ad21f29748ec55cfe | 29736bc4633244f845b2e9defbadbdf5a3dd4309 | /object_detection/utils/label_map_util.py | 8bc87ab062ae2f5b121e9918ff67ee08cb4828b1 | [] | no_license | Abhishek98765/Emergency-Vehicle-Detection-using-Image-Processing | 045df875bfdf1883e79953bbde35de18967acafd | 748af81176ed4f762f337f5e4ea94e2306c299df | refs/heads/master | 2022-12-14T11:09:34.187442 | 2020-09-06T17:09:57 | 2020-09-06T17:09:57 | 293,274,871 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,701 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from six import string_types
from six.moves import range
import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path_or_proto,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
# TODO(rathodv): Add a prefix 'class_' here once the tool to generate
# teacher annotation adds this prefix in the data.
label_map_dict[str(value)] = value
return label_map_dict
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Create category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| [
"abhishek.april16@gmail.com"
] | abhishek.april16@gmail.com |
c7e48d978b5fe1639d5759f2c11b49831e3f7ab3 | 90a4b469f41b02aeb430c378ca513a58ea6bd0b5 | /Tools/STM32H7_SWO_Output_Solution.py | a3beb7588cbf40c8ee58f32168b3f6bbd0aedfd8 | [] | no_license | znfgnu/stm32h7xx-cubemx-makefile-openocd-gdbgui-example | a734d4c47571a258a2c39fc096b0ae1fb59a9996 | 388e17af8fb595379d93e4206eb553a710064724 | refs/heads/master | 2022-04-06T20:34:55.574158 | 2020-01-20T13:48:49 | 2020-01-20T13:48:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | #!/usr/bin/env python3
"""
# STM32H7 SWO Output Solution
Author: Brian Khuu 2020
Main Copy: https://gist.github.com/mofosyne/178ad947fdff0f357eb0e03a42bcef5c
This generates codes that enables SWO output for STM32H7 and was tested on NUCLEO-H743ZI2.
This is required because stm32h7 changed the registers that OpenOCD expects for SWO output.
Best to use 400Mhz, tried lower... but had issue with getting stable uart output
You have the choice of setting up SWO in firmware.
You also have the choice of gdbinit based register code setup.
## Reference:
Based on this solution (clive1): https://community.st.com/s/question/0D50X00009ce0vWSAQ/nucleoh743zi-board-and-printf-swo-not-working
Here is some useful note about how openocd command for 'tpiu' and 'itm' works (Since last time I checked).
Along with equivalent code as well, shown below:
```
// Open OCD armv7m_trace_tpiu_config()
// TPIU_CSPSR = 1 << trace_config->port_size
// TPIU_ACPR = prescaler - 1
// TPIU_SPPR = trace_config->pin_protocol
// TPIU_FFCR = ffcr
// TPIU : Trace Port Interface Unit
TPI->CSPSR |= 1; /* TPIU_CSPSR : Current Parallel Port Size Register */
TPI->ACPR |= ((SystemCoreClock / 2000000) - 1); /* TPIU_ACPR.SWOSCALER : Set prescaler */
TPI->SPPR |= 0x00000002; /* TPIU_SPPR.TXMODE : trace output protocol (2: SWO NRZ, 1: SWO Manchester encoding) */
TPI->FFCR &= ~TPI_FFCR_EnFCont_Msk; /* TPIU_FFCR.EnFCont : Clear Continuous TPIU Formatting (Using ITM only) */
//TPI->FFCR |= TPI_FFCR_EnFCont_Msk; /* TPIU_FFCR.EnFCont : Enable Continuous TPIU Formatting (Using ITM & ETM) */
```
```
// Open OCD armv7m_trace_itm_config()
// ITM_LAR = ITM_LAR_KEY;
// ITM_TCR = (1 << 0) | (1 << 3) | ...
// ITM_TERx = trace_config->itm_ter[x])
// ITM : Instrumented Trace Macrocell
ITM->LAR |= 0xC5ACCE55; /* ITM_LAR : "Lock Access Register", C5ACCE55 enables more write access to Control Register 0xE00 : 0xFFC */
ITM->TCR |= ITM_TCR_ITMENA_Msk | ITM_TCR_DWTENA_Msk | ((0x7FUL & 1) << ITM_TCR_TraceBusID_Pos); /* ITM_TCR : ITM Trace Control Register */
ITM->TER |= 0x1; /* ITM_TER : "ITM Trace Enable Register" Enabled tracing on stimulus ports. One bit per stimulus port. */
```
## Viewing SWO stream on openocd
Found this works best for me
https://github.com/robertlong13/SWO-Parser
"""
SWO_BASE =0x5C003000
SWO_CODR = SWO_BASE + 0x010
SWO_SPPR = SWO_BASE + 0x0F0
SWO_FFSR = SWO_BASE + 0x300
SWO_CLAIMSET = SWO_BASE + 0xFA0
SWO_CLAIMCLR = SWO_BASE + 0xFA4
SWO_LAR = SWO_BASE + 0xFB0
SWO_LSR = SWO_BASE + 0xFB4
SWO_AUTHSTAT = SWO_BASE + 0xFB8
SWO_DEVID = SWO_BASE + 0xFC8
SWO_DEVTYPE = SWO_BASE + 0xFCC
SWO_PIDR4 = SWO_BASE + 0xFD0
SWTF_BASE=0x5C004000
SWTF_CTRL = SWTF_BASE + 0x000
SWTF_PRIORITY = SWTF_BASE + 0x004
SWTF_CLAIMSET = SWTF_BASE + 0xFA0
SWTF_CLAIMCLR = SWTF_BASE + 0xFA4
SWTF_LAR = SWTF_BASE + 0xFB0
SWTF_LSR = SWTF_BASE + 0xFB4
SWTF_AUTHSTAT = SWTF_BASE + 0xFB8
SWTF_DEVID = SWTF_BASE + 0xFC8
SWTF_DEVTYPE = SWTF_BASE + 0xFCC
SWTF_PIDR4 = SWTF_BASE + 0xFD0
SWTF_PIDR0 = SWTF_BASE + 0xFE0
SWTF_PIDR1 = SWTF_BASE + 0xFE4
SWTF_PIDR2 = SWTF_BASE + 0xFE8
SWTF_PIDR3 = SWTF_BASE + 0xFEC
SWTF_CIDR0 = SWTF_BASE + 0xFF0
SWTF_CIDR1 = SWTF_BASE + 0xFF4
SWTF_CIDR2 = SWTF_BASE + 0xFF8
SWTF_CIDR3 = SWTF_BASE + 0xFFC
SystemCoreClock = 400000000 # Note: Urge you to use 400Mhz for stable SWO output
SWOSpeed_Hz = 2000000
SWOPrescaler = round(SystemCoreClock / SWOSpeed_Hz) - 1
print(SWOPrescaler)
c_code = f"""\
extern uint32_t SystemCoreClock;
void SWO_ITM_enable(void)
{{
/*
This functions recommends system speed of {SystemCoreClock}Hz and will
use SWO clock speed of {SWOSpeed_Hz}Hz
# GDB OpenOCD commands to connect to this:
monitor tpiu config internal - uart off {SystemCoreClock}
monitor itm port 0 on
Code Gen Ref: https://gist.github.com/mofosyne/178ad947fdff0f357eb0e03a42bcef5c
*/
/* Setup SWO and SWO funnel (Note: SWO_BASE and SWTF_BASE not defined in stm32h743xx.h) */
// DBGMCU_CR : Enable D3DBGCKEN D1DBGCKEN TRACECLKEN Clock Domains
DBGMCU->CR = DBGMCU_CR_DBG_CKD3EN | DBGMCU_CR_DBG_CKD1EN | DBGMCU_CR_DBG_TRACECKEN; // DBGMCU_CR
// SWO_LAR & SWTF_LAR : Unlock SWO and SWO Funnel
*((uint32_t *)({SWO_LAR :#08x})) = 0xC5ACCE55; // SWO_LAR
*((uint32_t *)({SWTF_LAR :#08x})) = 0xC5ACCE55; // SWTF_LAR
// SWO_CODR : {SystemCoreClock}Hz -> {SWOSpeed_Hz}Hz
// Note: SWOPrescaler = ((sysclock_Hz / SWOSpeed_Hz) - 1) --> {SWOPrescaler:#08x} = {SWOPrescaler} = ({SystemCoreClock} / {SWOSpeed_Hz}) - 1)
*((uint32_t *)({SWO_CODR :#08x})) = ((SystemCoreClock / {SWOSpeed_Hz}) - 1); // SWO_CODR
// SWO_SPPR : (2: SWO NRZ, 1: SWO Manchester encoding)
*((uint32_t *)({SWO_SPPR :#08x})) = 0x00000002; // SWO_SPPR
// SWTF_CTRL : enable SWO
*((uint32_t *)({SWTF_CTRL:#08x})) |= 0x1; // SWTF_CTRL
/* SWO GPIO Pin Setup */
//RCC_AHB4ENR enable GPIOB clock
*(__IO uint32_t*)(0x580244E0) |= 0x00000002;
// Configure GPIOB pin 3 as AF
*(__IO uint32_t*)(0x58020400) = (*(__IO uint32_t*)(0x58020400) & 0xffffff3f) | 0x00000080;
// Configure GPIOB pin 3 Speed
*(__IO uint32_t*)(0x58020408) |= 0x00000080;
// Force AF0 for GPIOB pin 3
*(__IO uint32_t*)(0x58020420) &= 0xFFFF0FFF;
}}
"""
gdbinit_openocd = f"""\
#*****************************************************************************
# Enable ITM support (SWO Output)
# This is a workaround for openocd STM32H7 SWO support.
# Expects Core Clock of {SystemCoreClock}Hz for SWO speed of {SWOSpeed_Hz}Hz
# Code Gen Ref: https://gist.github.com/mofosyne/178ad947fdff0f357eb0e03a42bcef5c
#*****************************************************************************
# DBGMCU_CR : Enable D3DBGCKEN D1DBGCKEN TRACECLKEN Clock Domains
set *0x5C001004 = 0x00700000
# SWO_LAR & SWTF_LAR : Unlock SWO and SWO Funnel
set *{SWO_LAR:#08x} = 0xC5ACCE55
set *{SWTF_LAR:#08x} = 0xC5ACCE55
# SWO_CODR : systemCoreClock -> SWO_Hz == {SystemCoreClock}Hz -> {SWOSpeed_Hz}Hz
# SWO_CODR = {SWOPrescaler:#08x} = {SWOPrescaler} = ({SystemCoreClock} / {SWOSpeed_Hz}) - 1)
set *{SWO_CODR:#08x} = {SWOPrescaler:#08x}
# SWO_SPPR : (2: SWO NRZ, 1: SWO Manchester encoding)
set *{SWO_SPPR:#08x} = 0x00000002
# SWTF_CTRL : enable SWO
set *{SWTF_CTRL:#08x} = (*{SWTF_CTRL:#08x}) | 0x1
monitor tpiu config internal - uart off {SystemCoreClock}
monitor itm port 0 on
"""
print_solution = f"""
# STM32H7 SWO Output Solution
## C
```.c
{c_code}\
```
## GDBINIT
```.gdbinit
{gdbinit_openocd}\
```
"""
print(print_solution) | [
"mofosyne@gmail.com"
] | mofosyne@gmail.com |
0cd3055eda98921d51bcc30e87b903e4a577e640 | c4fe6807966f20d67930d6deb12e9159e225c298 | /modeling.py | ad4e1a00e2c0d485b1344d09851cb59238d910ae | [] | no_license | liuyang0711/classifier_multi_label_seq2seq_attention | f9d283823d374a3d7ce70d5c375b4fdb4f2ccd28 | cd0214a55a8408582574f5a6425429d41cbd9ce2 | refs/heads/main | 2023-07-07T19:50:11.407782 | 2021-08-20T03:31:54 | 2021-08-20T03:31:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,493 | py | """The main ALBERT model and related functions.
For a description of the algorithm, see https://arxiv.org/abs/1909.11942.
"""
import collections
import copy
import json
import math
import re
import numpy as np
import six
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import layers as contrib_layers
class AlbertConfig(object):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
def __init__(self,
vocab_size,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
down_scale_factor=1,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.down_scale_factor = down_scale_factor
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `AlbertConfig` from a Python dictionary of parameters."""
config = AlbertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `AlbertConfig` from a json file of parameters."""
# with tf.gfile.GFile(json_file, "r") as reader:
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class AlbertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted from strings into ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.AlbertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.AlbertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for AlbertModel.
Args:
config: `AlbertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output,
self.output_embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.embedding_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=input_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_hidden_groups=config.num_hidden_groups,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
inner_group_num=config.inner_group_num,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.output_embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
init_vars_name = [name for (name, _) in init_vars]
if num_of_group > 0:
assignment_map = []
for gid in range(num_of_group):
assignment_map.append(collections.OrderedDict())
else:
assignment_map = collections.OrderedDict()
for name in name_to_variable:
if name in init_vars_name:
tvar_name = name
elif (re.sub(r"/group_\d+/", "/group_0/",
six.ensure_str(name)) in init_vars_name and
num_of_group > 1):
tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name))
elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/attention_\d+/", "/attention_1/",
six.ensure_str(name))
else:
tf.logging.info("name %s does not get matched", name)
continue
tf.logging.info("name %s match to %s", name, tvar_name)
if num_of_group > 0:
group_matched = False
for gid in range(1, num_of_group):
if (("/group_" + str(gid) + "/" in name) or
("/ffn_" + str(gid) + "/" in name) or
("/attention_" + str(gid) + "/" in name)):
group_matched = True
tf.logging.info("%s belongs to %dth", name, gid)
assignment_map[gid][tvar_name] = name
if not group_matched:
assignment_map[0][tvar_name] = name
else:
assignment_map[tvar_name] = name
initialized_variable_names[name] = 1
initialized_variable_names[six.ensure_str(name) + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_timing_signal_1d_given_position(channels,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Get sinusoids of diff frequencies, with timing position given.
Adapted from add_timing_signal_1d_given_position in
//third_party/py/tensor2tensor/layers/common_attention.py
Args:
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
position: a Tensor with shape [batch, seq_len]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor of timing signals [batch, seq_len, channels]
"""
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
return signal
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def dense_layer_3d(input_tensor,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
num_attention_heads=1,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2] / num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act, "query")
# `key_layer` = [B, T, N, H]
k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act, "key")
# `value_layer` = [B, T, N, H]
v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0):
"""A network with attention-ffn as sub-block.
Args:
layer_input: float Tensor of shape [batch_size, from_seq_length,
from_width].
hidden_size: (optional) int, size of hidden layer.
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
attention_head_size: int. Size of attention head.
attention_probs_dropout_prob: float. dropout probability for attention_layer
intermediate_size: int. Size of intermediate hidden layer.
intermediate_act_fn: (optional) Activation function for the intermediate
layer.
initializer_range: float. Range of the weight initializer.
hidden_dropout_prob: (optional) float. Dropout probability of the hidden
layer.
Returns:
layer output
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
num_attention_heads=num_attention_heads,
name="dense")
with tf.variable_scope("output"):
ffn_output = dense_layer_2d(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
num_attention_heads=num_attention_heads,
name="dense")
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
inner_group_num=1,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_hidden_groups: int. Number of group for the hidden layers, parameters
in the same group are shared.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
inner_group_num: int, number of inner repetition of attention and ffn.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = hidden_size // num_attention_heads
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
all_layer_outputs = []
if input_width != hidden_size:
prev_output = dense_layer_2d(
input_tensor, hidden_size, create_initializer(initializer_range),
None, name="embedding_hidden_mapping_in")
else:
prev_output = input_tensor
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
for layer_idx in range(num_hidden_layers):
group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups)
with tf.variable_scope("group_%d" % group_idx):
with tf.name_scope("layer_%d" % layer_idx):
layer_output = prev_output
for inner_group_idx in range(inner_group_num):
with tf.variable_scope("inner_group_%d" % inner_group_idx):
layer_output = attention_ffn_block(
layer_output, hidden_size, attention_mask,
num_attention_heads, attention_head_size,
attention_probs_dropout_prob, intermediate_size,
intermediate_act_fn, initializer_range, hidden_dropout_prob)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| [
"noreply@github.com"
] | liuyang0711.noreply@github.com |
05abc1222c4d6787e03d129f59fe59e47c85d30b | 2b55f8db7c5b713d3c6df6bd67c438caa6e9ebdd | /Python/CCC '12 S5 - Mouse Journey.py | 4f58a7294d8101d0fcb178941f8b3673c04d6e4b | [
"MIT"
] | permissive | RobinNash/Solutions-to-Competition-Problems | c68bf8a487b4d003fb7955b1f2d7e297ab39a518 | c29f4498e806ce16078934caa9b0975d63c88a79 | refs/heads/main | 2023-06-18T14:22:39.554912 | 2021-07-22T02:58:27 | 2021-07-22T02:58:27 | 367,485,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # Mouse Journey #
# September 29, 2019
# By Robin Nash
r,c = map(int, input().split())
# 2D array to hold where cages are
cages = [[False for i in range(c+1)] for i in range(r+1)]
# 2D array to dynamically allocate the number of paths to each point
grid = [[0 for i in range(c+1)] for i in range(r+1)]
# add the cages to the cage array
for i in range(int(input())):
y,x = map(int, input().split())
cages[y][x] = True
grid[1][0] = 1
for y in range(1,r+1):
for x in range(1,c+1):
if cages[y][x]:
continue
# The paths to one point are the sum of the paths
# to the points above and to the left
grid[y][x] = grid[y-1][x] + grid[y][x-1]
# Output numbe of paths to destination
print(grid[r][c])
#1569785186.0
| [
"noreply@github.com"
] | RobinNash.noreply@github.com |
dafd124fb00c29dca97d60c09735878fd6622765 | fc1e575351c6142244892261cf5d95db2dd5aab9 | /Полезные методы Selenium/Основные методы Selenium/Theory/lesson4_step3.py | b4bbe357597284a386586ec8d1c159c41ed5a635 | [] | no_license | genakoganovich/stepik-python-selenium | f8471e8463e20ae5026df08c7a178595c8ce36f7 | 8c8c4ca3cb24d7cb8bacfbe43ea014c09d593267 | refs/heads/master | 2023-01-08T22:49:20.753290 | 2020-11-13T18:11:54 | 2020-11-13T18:11:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from selenium import webdriver
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/wait1.html")
button = browser.find_element_by_id("verify")
button.click()
message = browser.find_element_by_id("verify_message")
assert "successful" in message.text
| [
"genakoganovich@gmail.com"
] | genakoganovich@gmail.com |
e9c9d5026bc592946cbec9fb06e1a0f42089d088 | cd2cb47552a5ee914898bb27e5a7eb02362c4e0d | /projecteRestaurant/urls.py | e781eaf7d9de35eb8d6aab1bfab3b45ece5918b8 | [] | no_license | Jap9/Django_Restaurant | 02bcfb46d24bd83ea10bbc4c9db6c9b17a851396 | 9f9e0d36a1368f8fb7d9faf7a3be0c1586e69544 | refs/heads/master | 2021-01-10T16:33:50.811446 | 2016-04-30T12:44:07 | 2016-04-30T12:44:07 | 54,205,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from Restaurantapp.views import *
from Restaurantapp import views
urlpatterns = patterns('',
url(r'^$',mainpage),
url(r'^admin/', include(admin.site.urls)),
url(r'^post/new/$', views.new_restaurant, name='new_restaurant'),
url(r'^post/reservar/$', views.reservar_restaurant, name='reservar_restaurant'),
url(r'^delete_restaurant/(?P<rest_pk>\d+)/$', views.delete_restaurant, name='delete_restaurant')
)
| [
"josepalbertpifarre@gmail.com"
] | josepalbertpifarre@gmail.com |
b4ff6d5b6215e6bf92673e7ecde69612b402be36 | 32de904cf7a14d5838fee0206600d5627b0cd0c8 | /EXIF/EXIF.py | 8dba1ce8b34950d0de757ece93e0b06c6a101461 | [] | no_license | Wildog/pythonista-toys | 087f4214eb6be61f35072829cae228bf684dab70 | 692579f7cb863b685afa4efd67f5ec29993330ec | refs/heads/master | 2020-04-18T10:11:15.914277 | 2016-09-19T18:29:11 | 2016-09-19T18:29:11 | 67,137,601 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,904 | py | import appex
from PIL import Image
from PIL.ExifTags import TAGS
from console import hud_alert
from datetime import datetime
from geopy.geocoders import Nominatim
import photos
import ui
import io
import os
def get_exif(img):
ret = {}
try:
info = img._getexif()
except:
pass
else:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
def get_histogram(img):
if not img.mode.startswith('RGB'):
img = img.convert('RGB')
hist = img.histogram()
max_h = float(max(hist))
height = 180
with ui.ImageContext(430, height) as ctx:
a = 1
rgb = [(1, 0, 0, a), (0, 1, 0, a), (0, 0, 1, a)]
for i, color in enumerate(rgb):
ui.set_color(color)
for j, count in enumerate(hist[i*256:i*256+256]):
bar_height = count / max_h * (height - 5)
ui.fill_rect(2*j, height-bar_height, 2, bar_height)
return ctx.get_image()
def pil2ui(imgIn):
b = io.BytesIO()
imgIn.save(b, 'JPEG')
imgOut = ui.Image.from_data(b.getvalue())
b.close()
return imgOut
def size_fmt(num, suffix='B'):
for unit in ['','Ki','Mi']:
if abs(num) < 1024.0:
return '{num:.2f} {unit}{suffix}'.format(num=num, unit=unit, suffix=suffix)
num /= 1024.0
return '{num:.1f} {unit}{suffix}'.format(num=num, unit='Gi', suffix=suffix)
def generate():
if not appex.is_running_extension():
img = photos.pick_image(show_albums=True)
size_field.text = 'Open in Extension to view file size'
else:
img_path = appex.get_attachments()[0]
size_field.text = size_fmt(os.path.getsize(img_path))
img = appex.get_image()
if img:
exif = get_exif(img)
width, height = img.size
if width > 3000 or height > 3000:
img.thumbnail((1000, 1000))
orientations = {
1: 0,
2: 0,
3: 180,
4: 0,
5: 0,
6: 270,
7: 0,
8: 90
}
if exif.get('Orientation'):
img = img.rotate(orientations.get(exif['Orientation'], 0))
img_view.image = pil2ui(img)
hist = get_histogram(img)
hist_view.image = hist
if exif.get('FocalLength'):
focal_field.text = '%d' % (exif['FocalLength'][0] / exif['FocalLength'][1])
else:
focal_field.text = '--'
if exif.get('FNumber'):
f = exif['FNumber']
aperture_field.text = '%.1f' % (float(f[0]) / f[1])
else:
aperture_field.text = '--'
if exif.get('ExposureTime'):
shutter_field.text = '%d/%d' % exif['ExposureTime']
else:
shutter_field.text = '----'
iso_field.text = str(exif.get('ISOSpeedRatings', '--'))
if exif.get('DateTimeOriginal'):
date = datetime.strptime(exif['DateTimeOriginal'], '%Y:%m:%d %H:%M:%S')
date_field.text = date.strftime('%B %d, %Y at %H:%M')
else:
date_field.text = 'No date and time information'
wh_field.text = '%d x %d (%.1fMP)' % (width, height, width * height / 1000000.0)
camera_field.text = exif.get('Model', 'Unknown')
lens = exif.get('LensModel', 'Unknown')
lens_field.text = lens
artist_field.text = exif.get('Artist', 'Unknown')
programs = {
0: 'Unknown',
1: 'Manual',
2: 'Program AE',
3: 'Aperture-priority AE',
4: 'Shutter-priority AE',
5: 'Sleep speed',
6: 'High speed',
7: 'Portrait',
8: 'Landscape',
9: 'Bulb'}
program_field.text = programs.get(exif.get('ExposureProgram', 0), 'Unknown')
flashes = {
0x0: 'No Flash',
0x1: 'Fired',
0x5: 'Fired, Return not detected',
0x7: 'Fired, Return detected',
0x8: 'On, Did not fire',
0x9: 'On, Fired',
0xd: 'On, Return not detected',
0xf: 'On, Return detected',
0x10: 'Off, Did not fire',
0x14: 'Off, Did not fire, Return not detected',
0x18: 'Auto, Did not fire',
0x19: 'Auto, Fired',
0x1d: 'Auto, Fired, Return not detected',
0x1f: 'Auto, Fired, Return detected',
0x20: 'No flash function',
0x30: 'Off, No flash function',
0x41: 'Fired, Red-eye reduction',
0x45: 'Fired, Red-eye reduction, Return not detected',
0x47: 'Fired, Red-eye reduction, Return detected',
0x49: 'On, Red-eye reduction',
0x4d: 'On, Red-eye reduction, Return not detected',
0x4f: 'On, Red-eye reduction, Return detected',
0x50: 'Off, Red-eye reduction',
0x58: 'Auto, Did not fire, Red-eye reduction',
0x59: 'Auto, Fired, Red-eye reduction',
0x5d: 'Auto, Fired, Red-eye reduction, Return not detected',
0x5f: 'Auto, Fired, Red-eye reduction, Return detected',
0x60: 'Unknown'}
flash_field.text = flashes.get(exif.get('Flash', 0x60))
zoom_text = 'Unknown'
if (lens == 'iPhone 7 Plus back iSight Duo camera 6.6mm f/2.8'):
zoom_text = '2x Optical on Duo Camera'
if exif.get('DigitalZoomRatio'):
z = exif['DigitalZoomRatio']
zoom_text = '%.2gx Digital' % (float(z[0]) / z[1])
zoom_field.text = zoom_text
software_field.text = exif.get('Software', 'Unknown')
meterings = {
0: 'average.png',
1: 'average.png',
2: 'center-weighted.png',
3: 'spot.png',
4: 'spot.png',
5: 'average.png',
6: 'partial.png',
255: 'average.png'
}
metering_view.image = ui.Image(meterings.get(exif.get('MeteringMode', 0), 'average.png'))
if exif.get('WhiteBalance') == 1:
balance_field.text = 'MWB'
if exif.get('GPSInfo'):
try:
lat = [float(x)/float(y) for x, y in exif['GPSInfo'][2]]
latref = exif['GPSInfo'][1]
lon = [float(x)/float(y) for x, y in exif['GPSInfo'][4]]
lonref = exif['GPSInfo'][3]
lat = lat[0] + lat[1]/60 + lat[2]/3600
lon = lon[0] + lon[1]/60 + lon[2]/3600
if latref == 'S':
lat = -lat
if lonref == 'W':
lon = -lon
geolocator = Nominatim()
loc_str = '%f, %f' % (lat, lon)
location_field.text = loc_str + '\nDecoding location...'
location = geolocator.reverse(loc_str)
location_field.text = location.address
except KeyError:
location_field.text = 'No location data found'
else:
location_field.text = 'No location data found'
return True
else:
hud_alert('No valid photo selected', icon='error')
return False
if __name__ == '__main__':
v = ui.load_view('exif')
scroll_view = v['scrollview']
img_view = scroll_view['imageview1']
img_view.content_mode = ui.CONTENT_SCALE_ASPECT_FIT
hist_view = scroll_view['imageview2']
hist_view.content_mode = ui.CONTENT_SCALE_ASPECT_FIT
container = scroll_view['container']
highlights = container['view1']
focal_field = highlights['focal']
aperture_field = highlights['aperture']
shutter_field = highlights['shutter']
iso_field = highlights['iso']
date_field = container['date']
location_field = container['location']
size_field = container['filesize']
wh_field = container['wh']
camera_field = container['camera']
lens_field = container['lens']
artist_field = container['artist']
program_field = container['program']
flash_field = container['flash']
zoom_field = container['zoom']
software_field = container['software']
metering_view = scroll_view['metering']
balance_field = scroll_view['balance']
v.present('sheet')
generate()
| [
"i@wil.dog"
] | i@wil.dog |
d2ac8ca8d2e56b54a1ed6cdcd97c0b628f951c63 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Simulador_20200712170607.py | 1de1eda53add0e93321c63b8d3811c1a3c54cdde | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,199 | py | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.matriz_status = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8) #
self.matriz_atualizacoes_cura = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_status = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
self.matriz_status[posicao[0], posicao[1]] = status
if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura
else:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = 0
def salvar_posicionamento(self):
self.lista_matrizes_status.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice_infectante in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for indice_vizinho in lista_vizinhos:
#verificação de SADIO
if self.verifica_status(indice_vizinho) == self.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(indice_vizinho)
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append(indice_vizinho)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def checagem_morte_individual(self, chance_morte):
rng_morte = random.random()
if rng_morte <= chance_morte:
return self.MORTO
else:
return self.INFECTADO_TIPO_2
def checar_cura_individual(self, indice):
self.matriz_atualizacoes_cura[indice[0], indice[1]] = self.matriz_atualizacoes_cura[indice[0], indice[1]] - 1
if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:
return self.CURADO
else:
return self.matriz_status[indice[0], indice[1]]
def checagem_morte_lista(self, lista_infectantes):
lista_mortos = []
for indice_infectante in lista_infectantes:
novo_status = self.checagem_morte_individual(self.chance_morte)
if novo_status == Individuo.MORTO:
lista_mortos.append(indice_infectante)
return lista_mortos
def checagem_cura_lista(self, lista_infectantes):
lista_curados = []
for indice_infectante in lista_infectantes:
novo_status = self.checar_cura_individual(indice_infectante)
if novo_status == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_curados
def iterar(self):
#Verifica os novos infectados por infectantes do tipo 1 e 2
#print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
for indice in lista_novos_infectados_tipo1:
self.criar_individuo(self.INFECTADO_TIPO_1, indice)
for indice in lista_novos_infectados_tipo2:
self.criar_individuo(self.INFECTADO_TIPO_2, indice)
#Verifica morte dos infectados tipo 2
lista_mortos = self.checagem_morte_lista(self.lista_infectados_tipo_2)
#retira os indices dos individuos mortos da lista de infectados
self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_mortos]
#Instancia individuos mortos na matriz
for indice in lista_mortos:
self.criar_individuo(self.MORTO, indice)
#atualiza o número de mortos na matriz
self.num_mortos = self.num_mortos + len(lista_mortos)
#Verifica cura dos infectados tipo 1
lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)
#Verifica cura dos infectados tipo 2
lista_curados_t2 = self.checagem_cura_lista(self.lista_infectados_tipo_2 )
#Instancia individuos mortos na matriz
for indice in lista_curados_t1+lista_curados_t2:
self.criar_individuo(self.CURADO, indice)
#atualiza o número de curados na matriz
self.num_curados = self.num_curados + len(lista_curados_t1 + lista_curados_t2)
#Atualiza a lista de infectados após a cura dos individuos
self.lista_infectados_tipo_1 = [indice for indice in self.lista_infectados_tipo_1 if indice not in lista_curados_t1]
self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_curados_t2]
#movimentação
nova_lista_t1 = []
for indice in self.lista_infectados_tipo_1:
nova_lista_t1.append(self.mover_infectante(indice))
self.lista_infectados_tipo_1 = nova_lista_t1
#print(self.lista_infectados_tipo_1)
nova_lista_t2 = []
for indice in self.lista_infectados_tipo_2:
nova_lista_t2.append(self.mover_infectante(indice))
self.lista_infectados_tipo_2 = nova_lista_t2
#print(self.lista_infectados_tipo_2)
# matriz_infectantes = matriz_infectantes[matriz_infectantes < 3]
indices_infectados = list(zip(*np.where((self.matriz_status == 1) + (self.matriz_status == 2))))
# indices_infectados = list(zip(*self.matriz_status.nonzero()))
#indices_infectados = [indice for indice in indices_infectados if indice not in self.lista_infectados_tipo_1 + self.lista_infectados_tipo_2]
# self.num_curados = 0
#self.num_mortos = 0
self.lista_infectados_tipo_1 = []
self.lista_infectados_tipo_2 = []
#novos_t1 = []
#novos_t2 = []
for indice in indices_infectados:
#if indice not in self.lista_infectados_tipo_1 and indice not in self.lista_infectados_tipo_2:
# print(indice)
# print(self.matriz_status.shape)
status = self.matriz_status[indice[0], indice[1]]
if status == self.INFECTADO_TIPO_1:
self.lista_infectados_tipo_1.append(indice)
#novos_t1.append(indice)
if status == self.INFECTADO_TIPO_2:
self.lista_infectados_tipo_2.append(indice)
#novos_t2.append(indice)
#self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + novos_t1
#self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + novos_t2
dict = {'num_sadios': self.populacao_inicial - len(self.lista_infectados_tipo_1) -len(self.lista_infectados_tipo_2) -self.num_curados-self.num_mortos,
'num_infect_t1': len(self.lista_infectados_tipo_1),
'num_infect_t2': len(self.lista_infectados_tipo_2),
'num_curados': self.num_curados,
'num_mortos': self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipos 1
for i in range(self.num_inicial_tipo1-1):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)
self.lista_infectados_tipo_2.append(indice)
def trocar(self,matriz,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = matriz[x_fin,y_fin]
matriz[x_fin,y_fin] = matriz[x_ini,y_ini]
matriz[x_ini,y_ini] = aux
def verifica_status(self, indice):
return self.matriz_status[indice[0], indice[1]]
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar(self.matriz_status, posicao_inicial, posicao_final)
self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)
return posicao_final
proporcao_inicial_infectados = random.random()
proporcao_t1 = random.random()
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.5
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0#proporcao_t1*proporcao_inicial_infectados
percentual_inicial_tipo2 = 0#(1-proporcao_t1)*proporcao_inicial_infectados
#print("% inicial t1: ",percentual_inicial_tipo1)
#print("% inicial t2: ",percentual_inicial_tipo2)
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
while (sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2']) > 0:
#plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
# print(sim.dataframe.iloc[-1])
sim.iterar()
# print(sim.num_atualizacoes)
#print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
#plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
print(sim.dataframe)
plt.show()
# for i in range(30):
# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
# sim.iterar()
# print(sim.dataframe)
# plt.show()
| [
"eduardo_dut@edu.unifor.br"
] | eduardo_dut@edu.unifor.br |
32b1c34d9bfe6e0492cae5e46eb37a434046e163 | fcee849921db7153446b2fcef5cc698b530c8c8b | /backend/tracker/migrations/0002_alter_route_timing.py | b97d828d24b8d4f7aaedff7aa2a3a63b1895e9fb | [] | no_license | josiahphua/climbdjango | 6a0cca8c66e73d114cc7a671eed43881b7b8933a | 175050853891772f1ab832df1f58a3758202d0b3 | refs/heads/main | 2023-06-29T15:48:44.416455 | 2021-07-16T04:17:48 | 2021-07-16T04:17:48 | 392,588,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.2.5 on 2021-07-14 03:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='timing',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
]
| [
"twangxh@Twangs-MBP.lan"
] | twangxh@Twangs-MBP.lan |
39ac8c429fe95711c58d28d502807484f6054868 | 58e810c96b5be58d681520744784f69d79d02f47 | /mf_module/run.py | f2dd26b62348053715714690914892226665c5f2 | [] | no_license | littleso-so/Recommender | 7e391db9878796cc89d5d6f74ab7756263e2f61f | 7ea0e475580557d91e3939f67ca0da6064eca6dc | refs/heads/master | 2020-04-02T09:16:27.610479 | 2018-09-04T03:24:53 | 2018-09-04T03:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,255 | py | '''
Created on Dec 9, 2015
@author: donghyun
'''
import argparse
import sys
from data_manager import Data_Factory
parser = argparse.ArgumentParser()
# Option for pre-processing data
parser.add_argument("-c", "--do_preprocess", type=bool,
help="True or False to preprocess raw data for ConvMF (default = False)", default=False)
parser.add_argument("-r", "--raw_rating_data_path", type=str,
help="Path to raw rating data. data format - user id::item id::rating")
parser.add_argument("-m", "--min_rating", type=int,
help="Users who have less than \"min_rating\" ratings will be removed (default = 1)", default=1)
parser.add_argument("-t", "--split_ratio", type=float,
help="Ratio: 1-ratio, ratio/2 and ratio/2 of the entire dataset (R) will be training, valid and test set, respectively (default = 0.2)", default=0.2)
# Option for pre-processing data and running ConvMF
parser.add_argument("-d", "--data_path", type=str,
help="Path to training, valid and test data sets")
parser.add_argument("-a", "--aux_path", type=str, help="Path to R, D_all sets")
# Option for running ConvMF
parser.add_argument("-k", "--dimension", type=int,
help="Size of latent dimension for users and items (default: 50)", default=100)
parser.add_argument("-u", "--lambda_u", type=float,
help="Value of user regularizer")
parser.add_argument("-v", "--lambda_v", type=float,
help="Value of item regularizer")
parser.add_argument("-P", "--lambda_p", type=float,
help="Value of l2_loss regularizer")
parser.add_argument("-Q", "--lambda_q", type=float,
help="Value of l1_loss regularizer")
parser.add_argument("-n", "--max_iter", type=int,
help="Value of max iteration (default: 200)", default=500)
parser.add_argument("-F","--flag",type=str,help="class flag",default="PMF")
parser.add_argument("-G","--momentum_flag",type=int,help="momentum_flag",default=1)
args = parser.parse_args()
do_preprocess = args.do_preprocess
data_path = args.data_path
aux_path = args.aux_path
if data_path is None:
sys.exit("Argument missing - data_path is required")
if aux_path is None:
sys.exit("Argument missing - aux_path is required")
data_factory = Data_Factory()
if do_preprocess:
path_rating = args.raw_rating_data_path
min_rating = args.min_rating
split_ratio = args.split_ratio
print "=================================Preprocess Option Setting================================="
print "\tsaving preprocessed aux path - %s" % aux_path
print "\tsaving preprocessed data path - %s" % data_path
print "\trating data path - %s" % path_rating
print "\tmin_rating: %d\n\t split_ratio: %.1f"% (min_rating, split_ratio)
print "==========================================================================================="
R =data_factory.preprocess(path_rating, min_rating)
data_factory.save(aux_path,R)
data_factory.generate_train_valid_test_file_from_R(data_path, R, split_ratio)
else:
methods = args.flag
dimension = args.dimension
lambda_u = args.lambda_u
lambda_v = args.lambda_v
lambda_p=args.lambda_p
lambda_q=args.lambda_q
max_iter = args.max_iter
momentum_flag=args.momentum_flag
if lambda_u is None:
sys.exit("Argument missing - lambda_u is required")
if lambda_v is None:
sys.exit("Argument missing - lambda_v is required")
print "===================================%s Option Setting==================================="%(methods)
print "\t approach -%s"%methods
print "\taux path - %s" % aux_path
print "\tdata path - %s" % data_path
print "\tdimension: %d\n\tlambda_u: %.4f\n\tlambda_v: %.4f\n\tmax_iter: %d\n\t" \
% (dimension, lambda_u, lambda_v, max_iter)
print "==========================================================================================="
R = data_factory.load(aux_path)
train_user = data_factory.read_rating(data_path + '/train_user.dat')
train_item = data_factory.read_rating(data_path + '/train_item.dat')
valid_user = data_factory.read_rating(data_path + '/valid_user.dat')
test_user = data_factory.read_rating(data_path + '/test_user.dat')
if methods=="PMF":
from models.PMF import PMF
PMF(max_iter=max_iter, lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension,
train_user=train_user, train_item=train_item, valid_user=valid_user, test_user=test_user, R=R)
elif methods == "BiasMF":
from models.BiasMF import BiasMF
BiasMF(max_iter=max_iter, lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension,
train_user=train_user, train_item=train_item, valid_user=valid_user, test_user=test_user, R=R,momentum_flag=momentum_flag)
elif methods == "BiasMF_Constant":
from models.BiasMF_Constant import BiasMF_Constant
BiasMF_Constant(max_iter=max_iter, lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension,
train_user=train_user, train_item=train_item, valid_user=valid_user, test_user=test_user, R=R)
elif methods == "WNMF":
from models.WNMF import WNMF
WNMF(max_iter=max_iter, train_user=train_user, train_item=train_item,valid_user=valid_user,test_user=test_user,R=R,dimension=dimension)
elif methods == "JMF-S":
from models.JMF_S import JMF_S
print "######### Test start lambda_u={},lambda_v={},lambda_p=-,lambda_q={}############".format(lambda_u,lambda_v,lambda_q)
JMF_S(max_iter=max_iter, lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension, train_user=train_user,
train_item=train_item, valid_user=valid_user, test_user=test_user, R=R,lambda_p=lambda_p,lambda_q=lambda_q,momentum_flag=momentum_flag)
elif methods == "JMF-SU":
from models.JMF_SU import JMF_SU
print "######### Test start lambda_u={},lambda_v={},lambda_p=-,lambda_q={}############".format(lambda_u,lambda_v,lambda_q)
JMF_SU(max_iter=max_iter, lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension, train_user=train_user,
train_item=train_item, valid_user=valid_user, test_user=test_user, R=R,lambda_p=lambda_p,lambda_q=lambda_q,momentum_flag=momentum_flag)
elif methods == "JONMF-P":
from models.JONMF_P import JONMF_P
print "######### Test start lambda_u={},lambda_v={},lambda_p={},lambda_q={}############".format(lambda_u,lambda_v,lambda_p,lambda_q)
JONMF_P(max_iter=max_iter,lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension, train_user=train_user,
train_item=train_item, valid_user=valid_user, test_user=test_user, R=R,lambda_p=lambda_p,lambda_q=lambda_q)
elif methods =="JMF-Double":
from models.JMF_Double import JMF_Double
JMF_Double(max_iter=max_iter,lambda_u=lambda_u, lambda_v=lambda_v, dimension=dimension, train_user=train_user,
train_item=train_item, valid_user=valid_user, test_user=test_user, R=R,lambda_p=lambda_p,lambda_q=lambda_q)
print "###### method {} end lambda_u={},lambda_v={},lambda_p={},lambda_q={} ########".format(methods,lambda_u,lambda_v,lambda_p,lambda_q)
| [
"lmm6895071@126.com"
] | lmm6895071@126.com |
120b3991e75b3590f66f073c15ed29eeaf93108e | 6a6b1ca96853bb6b49bb66329b72bf6c33472f3a | /web/django/mysite/board/models.py | 6398e1198a588cac77e5727809c3ac88b106f355 | [] | no_license | gghotted/TIL | 70e33622b142b1a0ee8cbcd8bca010cd09477cd4 | 5de1e664ec5caa1a0e769e1043f4ddabe63870a5 | refs/heads/master | 2020-11-29T19:43:09.017234 | 2020-04-25T18:07:13 | 2020-04-25T18:07:13 | 230,201,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from django.db import models
from django.utils import timezone
class Board(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
views = models.PositiveIntegerField(default=0)
image = models.CharField(max_length=200, null=True, blank=True)
category = models.CharField(max_length=10, default='common')
class Meta:
ordering = ['-created_date']
def __str__(self):
return self.title
| [
"gghotted2@naver.com"
] | gghotted2@naver.com |
7420b28bdb41ab90ac3ba325620d4e04886c8617 | 4665d9e55f056f2f82785f4f458627c104b12d1a | /stats.py | c7b8c933db9971c9f32b2cd43799757160fe853a | [] | no_license | merfill/text_normalization | eeb1dc59fa733212d042c23f37a8f52d7a217c72 | 4175072151c0cf9467d9132be75c75b399215b43 | refs/heads/master | 2021-10-14T09:45:20.111327 | 2019-02-04T08:12:31 | 2019-02-04T08:12:31 | 108,391,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py |
import os
import csv
# read data into memory
classes = {}
print 'start reading data into memory...'
with open('data/en_train.csv', 'rb') as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if row['class'] not in classes:
classes[row['class']] = 0
classes[row['class']] += 1
for cl in classes.keys():
print '{0}: {1}'.format(cl, classes[cl])
| [
"mefrill@yandex.ru"
] | mefrill@yandex.ru |
b0dfaf8ef3d0bae96e9be256f13702376ed147c3 | d231b042ec27012e1949be74e199afadbb20396f | /gym_minigrid/envs/putnear.py | b93de26afd2c92a5de80020f58f5b3424c54852c | [
"BSD-3-Clause"
] | permissive | planetceres/gym-minigrid | 090ea02952b1ccdc49c0b6c055dd7bb4e152f6da | 5e4d517a057d11f8b9eb599398f37563654ba946 | refs/heads/master | 2021-04-15T18:55:13.339789 | 2018-04-14T15:50:49 | 2018-04-14T15:50:49 | 126,549,762 | 1 | 0 | BSD-3-Clause | 2018-04-14T15:50:50 | 2018-03-23T23:50:20 | Python | UTF-8 | Python | false | false | 3,734 | py | from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class PutNearEnv(MiniGridEnv):
"""
Environment in which the agent is instructed to place an object near
another object through a natural language string.
"""
def __init__(
self,
size=6,
numObjs=2
):
self.numObjs = numObjs
super().__init__(
grid_size=size,
max_steps=5*size,
# Set this to True for maximum speed
see_through_walls=True
)
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height-1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width-1, 0)
# Types and colors of objects we can generate
types = ['key', 'ball', 'box']
objs = []
objPos = []
def near_obj(env, p1):
for p2 in objPos:
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
if abs(dx) <= 1 and abs(dy) <= 1:
return True
return False
# Until we have generated all the objects
while len(objs) < self.numObjs:
objType = self._rand_elem(types)
objColor = self._rand_elem(COLOR_NAMES)
# If this object already exists, try again
if (objType, objColor) in objs:
continue
if objType == 'key':
obj = Key(objColor)
elif objType == 'ball':
obj = Ball(objColor)
elif objType == 'box':
obj = Box(objColor)
pos = self.place_obj(obj, reject_fn=near_obj)
objs.append((objType, objColor))
objPos.append(pos)
# Randomize the agent start position and orientation
self.place_agent()
# Choose a random object to be moved
objIdx = self._rand_int(0, len(objs))
self.move_type, self.moveColor = objs[objIdx]
self.move_pos = objPos[objIdx]
# Choose a target object (to put the first object next to)
while True:
targetIdx = self._rand_int(0, len(objs))
if targetIdx != objIdx:
break
self.target_type, self.target_color = objs[targetIdx]
self.target_pos = objPos[targetIdx]
self.mission = 'put the %s %s near the %s %s' % (
self.moveColor,
self.move_type,
self.target_color,
self.target_type
)
def step(self, action):
preCarrying = self.carrying
obs, reward, done, info = super().step(action)
u, v = self.get_dir_vec()
ox, oy = (self.agent_pos[0] + u, self.agent_pos[1] + v)
tx, ty = self.target_pos
# If we picked up the wrong object, terminate the episode
if action == self.actions.pickup and self.carrying:
if self.carrying.type != self.move_type or self.carrying.color != self.moveColor:
done = True
# If successfully dropping an object near the target
if action == self.actions.drop and preCarrying:
if self.grid.get(ox, oy) is preCarrying:
if abs(ox - tx) <= 1 and abs(oy - ty) <= 1:
reward = 1
done = True
return obs, reward, done, info
class PutNear8x8N3(PutNearEnv):
def __init__(self):
super().__init__(size=8, numObjs=3)
register(
id='MiniGrid-PutNear-6x6-N2-v0',
entry_point='gym_minigrid.envs:PutNearEnv'
)
register(
id='MiniGrid-PutNear-8x8-N3-v0',
entry_point='gym_minigrid.envs:PutNear8x8N3'
)
| [
"maximechevalierb@gmail.com"
] | maximechevalierb@gmail.com |
baa70e074451135b2ba2795acc1426ea1a7845dd | c0ad33c6e01062582f3a4d04c9140b54915938b5 | /week-11/Day_76/same_tree.py | 07648f3f35661592d7237064b453558441ec243e | [] | no_license | ShahriarCodes/100_days_LeetCode_challenge | 9574a464b7f2c623630899bf32051c31ab7a6532 | ade6196695fa2ef3214cd79a4d4d6d8adb4fc508 | refs/heads/master | 2023-02-03T05:34:41.285159 | 2020-12-25T17:59:04 | 2020-12-25T17:59:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if p is None or q is None:
return p == q
if p.val != q.val:
return False
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
| [
"shahriar.hasan100@gmail.com"
] | shahriar.hasan100@gmail.com |
e41a5132f505cecaa25c00592368e3e0c1418a4d | 06180bf039f24ab3d6e557b9393728a1f10c1c4e | /allocation.py | e525ef82072dd4b4a254bf6f979dde2d29bb1fb1 | [] | no_license | YwM3829/allocationbot | 4a26fb9694a18c7c0f7e775f1d5abe8f1b711a6e | 2af981b41ddbba67dc143e2d525b69c2ccd758e2 | refs/heads/master | 2020-03-18T12:46:58.452075 | 2018-05-28T13:59:16 | 2018-05-28T13:59:16 | 134,743,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | #Allocation discord bot by YwM#3829
import os
import discord
from discord.ext import commands
from discord.ext.commands import bot
import asyncio
client = discord.Client()
bot = commands.Bot(command_prefix = "#")
bot.remove_command('help')
my_user_id = "312615388854878208"
@bot.event
async def on_ready():
print("Allocation is now running!")
print("It's ID is" + (bot.user.id))
await bot.change_presence(game=discord.Game(name="https://allocationbot.weebly.com/"))
@bot.command(pass_context = True)
async def version(ctx):
await bot.say("This is Allocation pre release.")
@bot.command(pass_context = True)
@commands.has_role("Bot Admin")
async def kick(ctx, userName: discord.User):
await bot.kick(userName)
@bot.command(pass_context = True)
async def info(ctx):
await bot.say("Allocation bot pre release, made by me (YwM#3829). Only to be used in my server.")
@bot.command(pass_context = True)
@commands.has_role("Bot Owner")
async def dm(ctx, member : discord.Member, *, content: str):
await bot.send_message(member, content)
@bot.command(pass_context = True)
@commands.has_role("Bot Owner")
async def shutdown(ctx):
await bot.logout()
@bot.command(pass_context = True)
@commands.has_role("Bot Owner")
async def say(ctx, *args):
mesg = ' '.join(args)
await bot.delete_message(ctx.message)
return await bot.say(mesg)
await bot.delete_message(ctx.message)
@bot.command(pass_context=True)
@commands.has_role("Bot Owner")
async def ask(ctx, member : discord.Member, *, content: str):
await bot.send_message(member, content)
await bot.send_message(member, "Please respond with your answer to the question with #answer YOUR ANSWER")
@bot.command(pass_context=True)
async def answer(ctx, *, response):
owner = await bot.get_user_info(my_user_id)
await bot.send_message(owner, "{} responded: {}".format(ctx.message.author.name, response))
#@bot.command(pass_context=True)
#async def buy(ctx):
# await bot.send_message(ctx.message.author, "Please respond with '#sendkey YOUR GIFT CARD CODE'")
#@bot.command(pass_context=True)
#async def sendkey(ctx, *, response):
# owner = await bot.get_user_info(my_user_id)
# await bot.send_message(owner, "{} responded: {}".format(ctx.message.author.name, response))
bot.run(os.environ["TOKEN"])
#fin
| [
"noreply@github.com"
] | YwM3829.noreply@github.com |
5bc3d3b181019fbd4be3e1dd8b566cd998e88813 | c5480a7d900b1415d498701eb364ccca8ea5a862 | /ML Concepts implementations/SVM/SVM-kernel-trick-linearly-inseparable-data-part-2.py | 55ef3864a7f2226b359c6671a72cb6082f1468a4 | [] | no_license | activeskygate/Machine-Learning-Examples | 61b37e9bf2ca89e21a119da223a16a85b588fa57 | f9d7f8af019442656d6333c6289cbb26e51e4fbb | refs/heads/master | 2023-03-14T20:31:54.949612 | 2018-03-09T08:42:19 | 2018-03-09T08:42:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 22 19:27:43 2017
@author: sraghunath
Transforms the data onto a higher dimension and projects it back using the linear trick via RBF Kernel
"""
import matplotlib.pyplot as plt
import numpy as np
from plot_data import plot_decision_regions
from sklearn.svm import SVC
np.random.seed(1)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0,
X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
svm = SVC(C=10.0,kernel='rbf',gamma=0.01,random_state=1)
svm.fit(X_xor,y_xor)
plot_decision_regions(X_xor, y_xor,
classifier=svm)
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_14.png', dpi=300)
plt.show() | [
"siddharth.raghunath@hds.com"
] | siddharth.raghunath@hds.com |
d77eb9b1e235b598f39734d82b125907fdbf8480 | a11ce502842987411d6b8aa047ffa4e712b5efb9 | /ul.py | 32ff13988d4d378e846d73e08993ff7dba523322 | [] | no_license | aryavikas/Kalman-Filter-Based-Tracking-for-Channel-Aging-in-Massive-MIMO-Systems | 6d3a714192a9d579e437775a1b8f882d68d4b3fb | 022f83d727a3d97cd769ed021ea61731f43abcf1 | refs/heads/master | 2020-03-10T21:36:06.082554 | 2018-04-27T06:08:52 | 2018-04-27T06:08:52 | 129,598,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | from pylab import *
from matplotlib import rc,rcParams
import matplotlib.pyplot as plt
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
import numpy as np
x = [20,30,40,50,60,70]
datal1=[19.0376,22.5148,25.0996,26.7576,28.6828,31.1158]
datal3=[25.4709,29.7743,33.2772,35.7501,39.031,41.3629]
datal9=[35.3015,41.5615,46.1395,49.108,51.7216,53.1989]
datal15=[42.598,48.7627,52.8551,56.6696,57.6845,60.318]
plt.hold(True)
line1, = plt.plot(x,datal1, 'r^-', label="$l = 1$")
line2, = plt.plot(x,datal3, 'mo-', label="$l = 3$")
line3, = plt.plot(x,datal9, 'g*-', label="$l = 9$")
line4, = plt.plot(x,datal15, 'bv-', label="$l = 15$")
first_legend = plt.legend(handles=[line1,line2,line3,line4], loc='best')
plt.xlabel('Number of antennas')
plt.ylabel('Average Sum Rate [bps/Hz]')
plt.grid()
plt.show()
| [
"noreply@github.com"
] | aryavikas.noreply@github.com |
75e9c141a537af48687fa3c58e33c21199d6dd77 | 5e277a32c166ae45bea28310074dc459a0d99cf6 | /.metadata/.plugins/org.eclipse.core.resources/.history/a2/3054477d7e9b00161299af2c090fd9cc | 0d19a34a5371b728d5025ab1101811006b6ba44d | [] | no_license | vgvcode/pos | 4d7172d7905f60157fcae445c650475d17a9a390 | a9dba2c5c3fc8c4529c6619a3dc92c9608a4c70d | refs/heads/master | 2021-01-13T13:12:37.833510 | 2016-11-02T22:28:42 | 2016-11-02T22:28:42 | 72,686,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,011 | #!/usr/bin/python
from __future__ import print_function # Python 2/3 compatibility
import json
from decimal import *
import time
import uuid
import boto3
from copy import deepcopy
from boto3.dynamodb.conditions import Key
import commonmodule
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class CatalogSchema:
'base class for DDL operations on catalog'
tableName = 'catalog'
def __init(self, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
self.__table = self.__dynamodb.Table(Catalog.tableName)
def createTable(self):
'create a new catalog'
result = True
try:
self.__table = self.__dynamodb.create_table(
TableName=CatalogSchema.tableName,
KeySchema=[
{
'AttributeName': 'CatalogID',
'KeyType': 'HASH' #Partition key
},
{
'AttributeName': 'ItemID',
'KeyType': 'RANGE' #Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'CatalogID',
'AttributeType': 'S'
},
{
'AttributeName': 'ItemID',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def deleteTable(self):
result = True
try:
self.__table.delete()
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
class Catalog:
'common base class for catalog'
tableName = 'catalog'
def __init__(self, catalogId, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
self.__table = self.__dynamodb.Table(Catalog.tableName)
self.__catalogId = catalogId
self.__items = {}
def get(self):
return {
"catalogId" : self.__catalogId,
"endpoint" : self.__endpoint,
"tableName" : Catalog.tableName,
"items" : self.__items
}
def load(self, fileName):
result = True
try:
'load the catalog from a json file'
with open(fileName) as json_file:
catalog = json.load(json_file, parse_float = Decimal)
for catItem in catalog:
CatalogID = catItem['CatalogID']
ItemID = catItem['ItemID']
Info = catItem['Info']
print("Adding item:", CatalogID, ItemID, Info)
self.__table.put_item(
Item={
'CatalogID': CatalogID,
'ItemID': ItemID,
'Info': Info
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def fetchFromDB(self):
'fetch the catalog'
'first clear the existing catalog'
result = True
self.__items = {}
try:
response = self.__table.query(KeyConditionExpression=Key('CatalogID').eq(self.__catalogId))
for r in response['Items']:
self.__items[r['ItemID']] = r['Info']
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def addItem(self, idy, name, price):
'add item to the catalog'
result = True
print('Adding to DB: {}, {}, {}'.format(idy, name, price))
createdTicks = time.time()
createdTime = time.asctime(time.localtime(createdTicks))
info = {
'ItemId': idy,
'CreatedTime': createdTime,
'CreatedTicks': Decimal(createdTicks),
'UpdatedTime': "0",
'UpdatedTicks': Decimal(0),
'Name': name,
'Price': commonmodule.money(price)
}
try:
response = self.__table.put_item(
Item={
'CatalogID': self.__catalogId,
'ItemID': idy,
'Info': info
}
)
'add the item to the catalog in memory'
self.__items[idy] = info
#print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def getItems(self):
return self.__items
def updatePrice(self, itemId, price):
'update item price'
result = True
updatedTicks = time.time()
updatedTime = time.asctime(time.localtime(updatedTicks))
try:
response = self.__table.update_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
UpdateExpression="set Info.Price = :p, Info.UpdatedTime = :u, Info.UpdatedTicks = :t",
ExpressionAttributeValues={
':p': commonmodule.money(price),
':u': updatedTime,
':t': Decimal(updatedTicks),
},
ReturnValues="UPDATED_NEW"
)
print("Item updated")
result = True
#print("UpdateItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
'update the item in the catalog in memory'
self.__items[itemId]['Price'] = commonmodule.money(price)
self.__items[itemId]['UpdatedTime'] = updatedTime
self.__items[itemId]['UpdatedTicks'] = updatedTicks
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def removeItem(self, itemId):
'remove item from catalog'
result = False
try:
response = self.__table.delete_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
)
'remove the item from the catalog in memory'
del self.__items[itemId]
#print("DeleteItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
finally:
return result
def print(self):
for itm in self.__items:
print('{}: {}'.format(itm, self.__items[itm]))
print('There are {} items in the catalog'.format(len(self.__items)))
| [
"vgvcode@gmail.com"
] | vgvcode@gmail.com | |
f37cb3349f2258ce131ece0a60b7c4eee6a8f4bf | cda974da2632b8486fbe8373af337826d73f614b | /src/stse/__init__.py | 1fe5881e1852a07e68576afa848cef476195760d | [] | no_license | sstoma/stse | 0acd03e2c7d2b6ed480ab9843a3206ac16633269 | 6b51cd71b55fb68092fd67deebf08d5e34f1519f | refs/heads/master | 2020-05-17T08:51:27.471813 | 2015-01-04T20:58:02 | 2015-01-04T20:58:02 | 28,810,112 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import core
import tools
import growth
import gui
import io
import structures
import visu
| [
"sstoma@users.noreply.github.com"
] | sstoma@users.noreply.github.com |
67be99880fd7439a17fda45cd702fd09abb7a76b | 7e43652b347e601b3ff35ea28e838e334d84ee32 | /myenv/bin/django-admin.py | f0f0008dffe66e2458827f5288079961524886de | [] | no_license | PunjabiAI/aivaid | 3b187200750af6503d6d4f4661ce7c1e6489b4d1 | a1e69e7ed1295bf90a965b5dd68f91c9885bf5f6 | refs/heads/master | 2023-06-02T16:19:14.824238 | 2021-06-17T07:45:08 | 2021-06-17T07:45:08 | 377,430,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #!/home/webtunix/Music/apiwork/aivaid/myenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"priyanka@webtunix.com"
] | priyanka@webtunix.com |
0e857fadcc4938ff913f75f2ed8f300b41fd2a8f | 54f0008346411fd389e9b656daa42faf0a3ece3e | /sets/models.py | 23437cefda2f2a6f3f311b705e9a35b01b65d037 | [] | no_license | abitty/altergot | 7fd75dfa414719be44946e15426fe62ccd60180a | 6982d5a564ea0c83e257e50cb317a0a0b65471c8 | refs/heads/master | 2021-12-12T10:24:58.059234 | 2021-09-05T12:14:09 | 2021-09-05T12:14:09 | 105,701,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from django.db import models
from django.utils.http import urlencode
from django.urls import reverse
# Create your models here.
class Collection(models.Model):
COLLECTION_CHOICES = (
('CN','Монеты'),
('BN','Купюры'),
)
owner = models.ForeignKey('auth.User',on_delete=models.CASCADE)
kind = models.CharField("Тип", default = 'CN', max_length = 2,choices = COLLECTION_CHOICES, blank=False)
name = models.CharField("Название", max_length = 128, blank=False)
public = models.BooleanField("Показывать",default=True)
def __str__(self):
return self.name
def coll_by_kind(self,akind):
obj = Collection.objects.filter(kind=akind, public=True).order_by('name') # was Collection.objects.filter
return obj
def url(self):
if self.kind == 'CN':
lq = {}
lq['coll'] = self.id
lc = reverse('sel')+'?'+urlencode(lq)+'&clr=1'
#lc = '?'+urlencode(lq)
print ("lc=",lc)
elif self.kind == 'BN':
lq = {}
lq['coll'] = self.id
#lc = '?'+urlencode(lq)+'&clr=1'
lc = reverse('bsel')+'?'+urlencode(lq)+'&clr=1'
print ("lc=",lc)
return lc
def coins_coll(self):
return self.coll_by_kind(self,'CN')
def bones_coll(self):
return self.coll_by_kind(self,'BN')
class Meta:
verbose_name = "Коллекция"
verbose_name_plural = "Коллекции"
| [
"abitty@ya.ru"
] | abitty@ya.ru |
a946e22c2ec36535b3110adba013e4c8add97df0 | 4ddb2e0f79e69350179e50cc0cd220dc51de6ca6 | /pages/panel.py | 88a3af56c649cd482e50a53bb7bfe1dbbc1b8bd3 | [] | no_license | 384782946/ProjectWizard | 08ff9db2c95056b32717b6159c203040d73882e5 | 985163fe2d3f611be1997ec27b954d77a3b809d6 | refs/heads/master | 2021-01-09T20:42:32.185239 | 2017-04-24T07:38:15 | 2017-04-24T07:38:15 | 62,059,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # coding:utf-8
'''
作者:张潇健
日期:2016-6-14
概述:列举之前向导的用户设定
'''
import app
from PyQt4.QtGui import QWizardPage,QHBoxLayout,QVBoxLayout,QLabel,QLineEdit,QPushButton,QListWidget,QDateEdit,QFileDialog
from PyQt4.QtCore import QStringList,Qt,QDate
class PanelPage(QWizardPage):
def __init__(self):
super(PanelPage,self).__init__()
self.setTitle('生成信息')
self.setSubTitle('显示工程配置的简要信息和即将生成的文件')
def initializePage(self):
super(PanelPage, self).initializePage()
rootLayout = QVBoxLayout()
rootLayout.setContentsMargins(20, 30, 20, 30)
row0 = QHBoxLayout()
lable0 = QLabel(' 依赖库:')
lable0.setAlignment(Qt.AlignTop | Qt.AlignHCenter)
self.lw_files = QListWidget()
items0 = QStringList()
for moudel in app.g_configurations.libs:
items0.append(moudel['name'])
self.lw_files.addItems(items0)
row0.addWidget(lable0)
row0.addWidget(self.lw_files)
row1 = QHBoxLayout()
lable1 = QLabel('工程文件:')
lable1.setAlignment(Qt.AlignTop | Qt.AlignHCenter)
self.lw_files = QListWidget()
items1 = QStringList()
for file in app.g_configurations.config['files']:
items1.append(file['target'])
self.lw_files.addItems(items1)
row1.addWidget(lable1)
row1.addWidget(self.lw_files)
rootLayout.addLayout(row0)
rootLayout.addLayout(row1)
self.setLayout(rootLayout)
def validatePage(self):
return True | [
"384782946@qq.com"
] | 384782946@qq.com |
61b2cf414d60349c206376834ad4464683e92bd6 | 328c9ba6eccc7ef2c0916be369f8bc0cd954cea8 | /code/run_grid.py | 95df865342e70c448612b08de0d24153ef101c2b | [
"MIT"
] | permissive | caioc2/dengue_release | 933a5bcb7fbbc66d5b092978969d72dcab4fef17 | 316a8da7b166f0a9a985ee1817d79f1ae6ae6e4a | refs/heads/master | 2023-08-29T21:27:25.015560 | 2021-11-12T16:47:49 | 2021-11-12T16:51:16 | 300,068,437 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 27,260 | py | import pandas as pd
from glob import glob
import numpy as np
import os
import sys
import math
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from scipy.spatial.distance import cdist
from sklearn.preprocessing import normalize
from sklearn.base import BaseEstimator, TransformerMixin
from scipy.signal import find_peaks
import warnings
import matplotlib
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
import timeit
import csv
import time
from scipy.stats import entropy
import re
warnings.filterwarnings("ignore")
matplotlib.use("Agg")
######################################
# Begin of Utils
######################################
def gaussian_kernel(n):
x = np.linspace(-n, n, 2*n + 1)
sigma = (2 * n + 1) / 6;
y = 1 / (np.sqrt(np.pi) * sigma) * np.exp(-((x / sigma) ** 2) / 2)
y = y / np.sum(y)
return y
def process_data(temp, precip, base, n):
nrow, ncol = temp.shape #Year, Days of Year
filt_t = np.zeros((nrow, n, ncol))
filt_p = np.zeros((nrow, n, ncol))
old_t = np.copy(temp);
old_p = np.copy(precip);
for i in range(0, n-1): #Power
j = int(np.round(base**(i+1)))
kg = gaussian_kernel(j)
displ = max(((2*j+1 - ncol)//2,0))
for l in range(0, nrow): # Year
c_t = np.convolve(temp[l,:], kg, mode='same')[displ:(displ+ncol)]
c_p = np.convolve(precip[l,:], kg, mode='same')[displ:(displ+ncol)]
filt_t[l, i, :] = old_t[l,:] - c_t
filt_p[l, i, :] = old_t[l,:] - c_p
old_t[l,:] = c_t
old_p[l,:] = c_p
for l in range(0, nrow):
filt_t[l, n-1, :] = old_t[l,:]
filt_p[l, n-1, :] = old_t[l,:]
return filt_t, filt_p
def pack_data(x_temp, x_precip, start_date, n_days, l):
year, length, days = x_temp.shape
h_size = n_days * (l[1] - l[0])
v_size = 2 * h_size
x_data = np.zeros((year, v_size))
ia = l[0]
ib = l[1]
ja = min(start_date, days)
jb = min(start_date + n_days, days)
size = min((ib-ia)*(jb-ja), h_size)
x_data[:, 0:size] = x_temp[:, ia:ib, ja:jb].reshape(year, -1)
size = min((ib-ia)*(jb-ja) + h_size, v_size)
x_data[:, h_size:size] = x_precip[:, ia:ib, ja:jb].reshape(year, -1)
return x_data
def make_grid(x_t_train, x_t_val, x_t_test, x_p_train, x_p_val, x_p_test, y_d_train, y_d_val, y_d_test, n_days, l, model):
years, lengths, days = x_t_test.shape
m_train = {}
m_val = {}
m_test = {}
m_train['accuracy'] = np.zeros(days - n_days)
m_val['accuracy'] = np.zeros(days - n_days)
m_test['accuracy'] = np.zeros(days - n_days)
m_train['f1'] = np.zeros(days - n_days)
m_val['f1'] = np.zeros(days - n_days)
m_test['f1'] = np.zeros(days - n_days)
m_train['recall'] = np.zeros(days - n_days)
m_val['recall'] = np.zeros(days - n_days)
m_test['recall'] = np.zeros(days - n_days)
m_train['precision'] = np.zeros(days - n_days)
m_val['precision'] = np.zeros(days - n_days)
m_test['precision'] = np.zeros(days - n_days)
for start in range(0, days - n_days):
X_train = pack_data(x_t_train, x_p_train, start, n_days, l)
Y_train = y_d_train
X_val = pack_data(x_t_val, x_p_val, start, n_days, l)
Y_val = y_d_val
X_test = pack_data(x_t_test, x_p_test, start, n_days, l)
Y_test= y_d_test
model.fit(X_train, Y_train)
y_p_train = model.predict(X_train)
y_p_val = model.predict(X_val)
y_p_test = model.predict(X_test)
m_train['accuracy'][start] = accuracy_score(Y_train, y_p_train)
m_val['accuracy'][start] = accuracy_score(Y_val, y_p_val)
m_test['accuracy'][start] = accuracy_score(Y_test, y_p_test)
m_train['f1'][start] = f1_score(Y_train, y_p_train)
m_val['f1'][start] = f1_score(Y_val, y_p_val)
m_test['f1'][start] = f1_score(Y_test, y_p_test)
m_train['recall'][start] = recall_score(Y_train, y_p_train)
m_val['recall'][start] = recall_score(Y_val, y_p_val)
m_test['recall'][start] = recall_score(Y_test, y_p_test)
m_train['precision'][start] = precision_score(Y_train, y_p_train)
m_val['precision'][start] = precision_score(Y_val, y_p_val)
m_test['precision'][start] = precision_score(Y_test, y_p_test)
return m_test, m_val, m_train
def progress(count, total, suffix=''):
bar_len = 30
filled_len = int(round(bar_len * count / float(total+sys.float_info.epsilon)))
percents = round(100.0 * count / float(total+sys.float_info.epsilon), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush() # As suggested by Rom Ruben
def formatTime(t):
s = t
m = int(s/60)
h = int(m/60)
s = int(math.fmod(s, 60))
m = int(math.fmod(m, 60))
return str(h)+":"+str(m).zfill(2)+":"+str(s).zfill(2)
"""
Diffusion Maps class scikit-learn style
eps - floating point value, default=None(automatic)
Epsilon power to which the distance matrix is raised.
It controls the ammount of connections between points
features - tuple (a,b) int values, default=None(use all features)
Selects which range of features are returned when tranforming the data.
When None is set, return all available features.
Given data with n samples and m features, ie, X(n,m), the maximun
number of available features is **n**(yes n 'samples' not m 'features'
for this transform)
"""
class DiffusionMaps(BaseEstimator, TransformerMixin):
def __init__(self, eps=None, features=None, target_std=0.1, min_eps=0.001, max_eps=1000):
self._eps = eps
if(features is not None):
features = [i for i in range(features[0], features[1])]
self._features = features
self._target_std = target_std
self._min_eps = min_eps
self._max_eps = max_eps
def fit(self, X, y = None ):
self.X = X.copy()
D = cdist(X, X, 'sqeuclidean')
self.D=D
if(self._eps is None):
self._eps = self.__find_eps()
E = np.exp(-D / (self._eps**2))
self.P = normalize(E, axis=1, norm='l1')
e, V = np.linalg.eig(self.P)
idx = np.argsort(e.real)
e = e.real[idx[::-1]]
V = V.real[:, idx[::-1]]
self.e = e
self.V = V
return self
def transform(self, X, y = None):
D = cdist(X, self.X, 'sqeuclidean')
E = np.exp(-D / (self._eps**2))
P = normalize(E, axis=1, norm='l1')
if(self._features is None):
return P.dot(self.V).dot(np.diag(1.0 / self.e))
else:
return P.dot(self.V).dot(np.diag(1.0 / self.e))[:, self._features]
def __find_eps(self):
n = self.D.shape[0]
target = (1 / n) * self._target_std / 6;
i = 0
maxit = 100
a = self._max_eps
b = self._min_eps
P = normalize(np.exp(-self.D / (a**2)), axis=1, norm='l1')
fa = np.std(P.ravel())
P = normalize(np.exp(-self.D / (b**2)), axis=1, norm='l1')
fb = np.std(P.ravel())
while((abs(fa - fb) > (target/50)) and i < maxit):
c = (a+b)/2
i = i + 1
P = normalize(np.exp(-self.D / (c**2)), axis=1, norm='l1')
fc = np.std(P.ravel())
if(fc < target):
a = c
fa = fc
else:
b = c
fb = fc
if(i >= maxit):
print("(Diffusion Maps)Reached max iterations without finding eps")
return c
def getPOrdered(self):
idx = np.argsort(self.V[:,1])
P = self.P[:, idx]
P = P[idx, :]
return P
def getP(self):
return self.P
def hist(self):
y = np.sort(self.D.reshape(-1))
x = np.linspace(0,1, num=len(y))
plt.plot(x,y)
def getE(self):
return self.e
def index2Date(index):
days=(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
month=("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
xticks=np.roll(days, -5)
xticks_cum=np.cumsum(xticks)
xtlabels = np.roll(month, -5);
for i in range(0, len(xticks)):
total = xticks_cum[i]
day = xticks[i]
mon = xtlabels[i]
if(index < total):
return mon + " " + str(1 + day - (total - index));
return "error";
def format_plot(ylim=True):
days=(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
month=("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
plt.xlabel("Date", fontsize=22)
plt.ylabel("Score", fontsize=22)
xticks=np.roll(days, -4)
xticks[0]=5
xticks=np.cumsum(xticks)
xtlabels = np.roll(month, -5);
plt.xticks(xticks[0:9], xtlabels[0:9], fontsize=18)
plt.grid()
if(ylim):
plt.yticks(np.arange(0, 1.01, step=0.1), fontsize=18)
plt.ylim((0.0, 1.01))
def plot_confidence(m, s, color):
x = np.linspace(1, len(m), len(m))
k=3
plt.fill_between(x, m+k*s, m-k*s, color=color+'33')
plt.plot(x, m, color=color,linewidth=3)
def chi2_distance(A, B):
chi = 0.5 * np.sum([((a - b) ** 2) / (a + b)
for (a, b) in zip(A, B)])
return chi
def write_line(csv_file, data, name):
writer = csv.writer(csv_file, delimiter=',')
line = list(data)
line.insert(0, name)
writer.writerow(line)
######################################
# End of Utils
######################################
######################################
# Begin of Grid Model Generation
######################################
#input files
path = "data";
grid_path = os.path.join("results", "intermediate", "grid")
temp_file = "temp_avg.csv"
precip_file = "precip.csv"
dengue_file = "dengue.csv"
#Cities name
states = glob(os.path.join("..", path, "*/"))
##
#Run for each city
##
for which_state in states:
##
#Read csv files
##
temp = pd.read_csv(os.path.join(which_state,temp_file))
precip = pd.read_csv(os.path.join(which_state,precip_file))
dengue = pd.read_csv(os.path.join(which_state,dengue_file))
##
#get out of sample test data
##
precip_out = precip.iloc[11:]
temp_out = temp.iloc[11:]
dengue_out = dengue.iloc[11:]
##
#get train data
##
precip = precip.iloc[0:11]
temp = temp.iloc[0:11]
dengue = dengue.iloc[0:11]
t_m = np.mean(temp.values)
t_s = np.std(temp.values)
p_m = np.mean(precip.values)
p_s = np.std(precip.values)
##
#create output path
##
state_name = which_state.split(os.sep)[2]
rpath = os.path.join("..", grid_path, state_name)
print("Grid search for " + state_name + "\n\n\n")
if not os.path.exists(rpath):
os.makedirs(rpath)
time.sleep(1)
##
#Process the data
##
outbreak_threshold = 100.0 #incidence threshold
x_temp = (temp.values - t_m) / t_s
x_precip = (precip.values - p_m) / p_s
y_dengue = np.zeros((len(dengue['incidence'])), dtype=int)
y_dengue[dengue['incidence'] >= outbreak_threshold] = 1
x_temp_out = (temp_out.values - t_m) / t_s
x_precip_out = (precip_out.values - p_m) / p_s
y_out = np.zeros((len(dengue_out['incidence'])), dtype=int)
y_out[dengue_out['incidence'] >= outbreak_threshold] = 1
#Data processing parameters
base = np.sqrt(2) #filter suport base
n_levels = 18 #number of levels to generate
x_t_train, x_p_train = process_data(x_temp, x_precip, base, n_levels)
x_t_out, x_p_out = process_data(x_temp_out, x_precip_out, base, n_levels)
##
#Generation of validation noisy data
##
n_rep = 2000 #number of noise data repetitions
precip_rep = np.repeat(precip.values, n_rep, axis=0)
temp_rep = np.repeat(temp.values, n_rep, axis=0)
y_rep = np.repeat(y_dengue, n_rep, axis=0)
size = precip_rep.shape
precip_noise = np.random.normal(0.0, 1.0, size=size)
temp_noise = np.random.normal(0.0, 1.0, size=size)
s = 2.5 #Noise intensity (sigma)
x_p_val = ((precip_rep + precip_noise*s) - p_m) / p_s
x_t_val = ((temp_rep + temp_noise*s) - t_m) / t_s
x_t_val, x_p_val = process_data(x_t_val, x_p_val, base, n_levels)
y_d_train = y_dengue
y_d_out = y_out
y_d_val = y_rep
##
# Grid Search
##
colors = {'accuracy_val': '#7777FF',
'accuracy_test': '#FF7777',
'accuracy_train': '#77FF77',}
##
#Grid parameters
##
best =[(1, 4), (4, 7), (7, 10), (10, 13), (12, 15)] #Set of bands to use
bdays = [3, 5, 9, 12] #Set of windows (size) to use
nfeatures = [3] #Set of features to use (2,n), uses feature (2,3) since feature 1 is constant (Diffusion Maps)
epsilon = ["auto"] #Epsilon for DM, automatic
#For the set of bands
for k in range(0, len(bdays)):
n_days = bdays[k]
#For the set of windows
for j in range(0,len(best)):
l=best[j]
#For the set of features
for f in range(0,len(nfeatures)):
features=(1,nfeatures[f])
#For the set of epsilon
for e in range(0, len(epsilon)):
eps = epsilon[e]
fname = "ndays."+str(n_days)+"_range."+str(l)+"_feat."+str(features)+"_eps."+str(eps)
print("N Days = " + str(n_days) + " Range = " + str(l) + " Features = " + str(features) + " Epsilon = " + str(eps))
model = Pipeline(steps =[("Scaler", StandardScaler()),
("DM", DiffusionMaps(features=features)),
("SVM", SVC(C=10, gamma=1, kernel="rbf"))])
start_time = timeit.default_timer()
m_test_, m_val_, m_train_ = make_grid(x_t_train, x_t_val, x_t_out, x_p_train, x_p_val, x_p_out, y_d_train, y_d_val, y_d_out, n_days, l, model)
elapsed = timeit.default_timer() - start_time
progress(s, s, " Total time: " + formatTime(elapsed))
with open(os.path.join(rpath, fname+".csv"), "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for key in m_test_:
line = list(m_test_[key])
line.insert(0, str(key)+"_test")
writer.writerow(line)
for key in m_train_:
line = list(m_train_[key])
line.insert(0, str(key)+"_train")
writer.writerow(line)
for key in m_val_:
line = list(m_val_[key])
line.insert(0, str(key)+"_val")
writer.writerow(line)
fig = plt.figure(figsize=(20,10))
acc = m_test_['accuracy']
acc2 = m_train_['accuracy']
acc3 = m_val_['accuracy']
print("\n count: " + str(len(acc3[acc3 > 0.8])) +
" mean acc: " + str(np.mean(acc3[acc3 > 0.8])) +
" max acc: " + str(max(acc3)))
plt.plot(acc, color=colors["accuracy_test"], linewidth=4)
plt.plot(acc2, color=colors["accuracy_train"], linewidth=4)
plt.plot(acc3, color=colors["accuracy_val"], linewidth=4)
plt.legend(["Test acc.", "Train acc.", "Val. acc."], fontsize=22)
format_plot()
plt.show()
fig.savefig(os.path.join(rpath, fname+".png"))
plt.close()
######################################
# End of Grid Model Generation
######################################
######################################
# Begin of Smoothing and Functionals
######################################
colors = {'accuracy_val': '#7777FF',
'accuracy_test': '#FF7777',
'accuracy_train': '#77FF77',}
base_size = 15 #Smooth (convolution) filter size
mean_filter = np.ones((base_size,)) * 1/base_size
smoothed_path = os.path.join("results", "intermediate", "smoothed")
#For each city
for state in states:
state_name = state.split(os.sep)[2]
dirname = os.path.join("..", smoothed_path, state_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
time.sleep(1)
#For each grid result
for file in glob(os.path.join("..", grid_path, state_name, "*.csv")):
##
#Read data
##
data = pd.read_csv(file, header=None, index_col=[0]).transpose()
filename = file.split(os.sep)[-1]
acc_test = data["accuracy_test"].values
acc_val = data["accuracy_val"].values
acc_train = data["accuracy_train"].values
##
#Smooth data
##
mean_acc_test = np.convolve(acc_test, mean_filter, mode="same")
mean_acc_val = np.convolve(acc_val, mean_filter, mode="same")
mean_acc_train = np.convolve(acc_train, mean_filter, mode="same")
acc_test_pad = np.pad(acc_test, base_size//2, mode="reflect")
acc_val_pad = np.pad(acc_val, base_size//2, mode="reflect")
acc_train_pad = np.pad(acc_train, base_size//2, mode="reflect")
mean_acc_test_pad = np.pad(acc_test, base_size//2, mode="reflect")
mean_acc_val_pad = np.pad(acc_val, base_size//2, mode="reflect")
mean_acc_train_pad = np.pad(acc_train, base_size//2, mode="reflect")
##
#Calculate some other properties for each grid model
##
vsize = len(mean_acc_test)
un_test = np.zeros((vsize,))
un_val = np.zeros((vsize,))
un_train = np.zeros((vsize,))
div_test = np.zeros((vsize,))
div_val = np.zeros((vsize,))
chi2_test = np.zeros((vsize,))
chi2_val = np.zeros((vsize,))
for i in range(0, vsize):
un_test[i] = np.mean((acc_test_pad[i:(i+base_size)] - mean_acc_test[i])**2)
un_val[i] = np.mean((acc_val_pad[i:(i+base_size)] - mean_acc_val[i])**2)
un_train[i] = np.mean((acc_train_pad[i:(i+base_size)] - mean_acc_train[i])**2)
div_test[i] = entropy(mean_acc_train_pad[i:(i+base_size)], mean_acc_test_pad[i:(i+base_size)])
div_val[i] = entropy(mean_acc_train_pad[i:(i+base_size)], mean_acc_val_pad[i:(i+base_size)])
chi2_test[i] = chi2_distance(mean_acc_train_pad[i:(i+base_size)], mean_acc_test_pad[i:(i+base_size)])
chi2_val[i] = chi2_distance(mean_acc_train_pad[i:(i+base_size)], mean_acc_val_pad[i:(i+base_size)])
un_test = np.sqrt(un_test)
un_val = np.sqrt(un_val)
un_train = np.sqrt(un_train)
##
#Save results
##
with open(os.path.join("..", smoothed_path, state_name, filename), "w") as csv_file:
write_line(csv_file, mean_acc_test, "mean_accuracy_test")
write_line(csv_file, mean_acc_val, "mean_accuracy_val")
write_line(csv_file, mean_acc_train, "mean_accuracy_train")
write_line(csv_file, acc_test, "accuracy_test")
write_line(csv_file, acc_val, "accuracy_val")
write_line(csv_file, acc_train, "accuracy_train")
write_line(csv_file, un_test, "uncertainty_test")
write_line(csv_file, un_val, "uncertainty_val")
write_line(csv_file, un_train, "uncertainty_train")
write_line(csv_file, div_val, "divergence_val")
write_line(csv_file, div_test, "divergence_test")
write_line(csv_file, chi2_val, "chi2_val")
write_line(csv_file, chi2_test, "chi2_test")
print(file)
fig = plt.figure(figsize=(20,10))
plot_confidence(mean_acc_test, un_test, color=colors["accuracy_test"])
plot_confidence(mean_acc_train, un_train, color=colors["accuracy_train"])
plot_confidence(mean_acc_val, un_val, color=colors["accuracy_val"])
plt.legend(["Test acc.", "Train acc.", "Val acc."])
format_plot()
plt.show()
fig.savefig(os.path.join("..", smoothed_path, state_name, filename[:-3]+"png"))
plt.close()
######################################
# End of Smoothing and Functionals
######################################
######################################
# Begin of Result Selection
######################################
base_size = 10 #distance between peaks to find in the data
rmax = []
rmean = []
selection_path = os.path.join("results", "intermediate", "selection")
result_data = pd.DataFrame(columns=["city", "max_acc_test", "mean_acc_test",
"max_acc_train", "mean_acc_train",
"max_acc_val", "mean_acc_val",
"uncertainty", "divergence", "diff", "chi2",
"index", "date", "file", "sum",
"ndays", "band_a", "band_b", "feat_a", "feat_b", "eps"])
fname_pattern = re.compile('ndays\.(\d+)\_range\.\((\d+),\s(\d+)\)\_feat\.\((\d+),\s(\d+)\)\_eps\.(\w+)\.csv')
#For each city
for state in states:
state_name = state.split(os.sep)[2]
mean_score = pd.DataFrame(columns=["max_acc_test", "mean_acc_test",
"max_acc_train", "mean_acc_train",
"max_acc_val", "mean_acc_val",
"uncertainty", "divergence", "diff", "chi2",
"index", "date", "file"])
max_score = pd.DataFrame(columns=["max_acc_test", "mean_acc_test",
"max_acc_train", "mean_acc_train",
"max_acc_val", "mean_acc_val",
"uncertainty", "divergence", "diff", "chi2",
"index", "date", "file"])
#For each grid model
for file in glob(os.path.join("..", smoothed_path, state_name, "*.csv")):
data = pd.read_csv(file, header=None, index_col=[0]).transpose()
##
#Read the data
##
acc_test = data["accuracy_test"].values
acc_val = data["accuracy_val"].values
acc_train = data["accuracy_train"].values
uncertainty = data["uncertainty_val"].values
divergence = data["divergence_val"].values * 100
chi2 = data["chi2_val"].values
mean_acc_test = data["mean_accuracy_test"].values
mean_acc_val = data["mean_accuracy_val"].values
mean_acc_train = data["mean_accuracy_train"].values
##
#Find peaks based on mean accuracy
##
idx_mean, _ = find_peaks(mean_acc_val, distance=base_size//2)
for i_mean in idx_mean:
mean_score = mean_score.append({ "max_acc_test": acc_test[i_mean],
"mean_acc_test": mean_acc_test[i_mean],
"max_acc_train": acc_train[i_mean],
"mean_acc_train": mean_acc_train[i_mean],
"max_acc_val": acc_val[i_mean],
"mean_acc_val": mean_acc_val[i_mean],
"uncertainty": uncertainty[i_mean],
"divergence": divergence[i_mean],
"diff": acc_train[i_mean]-acc_val[i_mean],
"chi2": chi2[i_mean],
"index": i_mean,
"date": index2Date(i_mean),
"file": file.split(os.sep)[-1]}, ignore_index=True)
##
#Find peaks based on accuracy
##
idx_max, _ = find_peaks(acc_val, distance=base_size//2)
for i_max in idx_max:
max_score = mean_score.append({ "max_acc_test": acc_test[i_max],
"mean_acc_test": mean_acc_test[i_max],
"max_acc_train": acc_train[i_max],
"mean_acc_train": mean_acc_train[i_max],
"max_acc_val": acc_val[i_max],
"mean_acc_val": mean_acc_val[i_max],
"uncertainty": uncertainty[i_max],
"divergence": divergence[i_max],
"diff": acc_train[i_max]-acc_val[i_max],
"chi2": chi2[i_max],
"index": i_max,
"date": index2Date(i_max),
"file": file.split(os.sep)[-1]}, ignore_index=True)
max_score["sum"] = (max_score["mean_acc_val"]+max_score["mean_acc_train"])/2
max_score = max_score.sort_values(["sum"], ascending=False)
mean_score["sum"] = (mean_score["mean_acc_val"]+mean_score["mean_acc_train"])/2
mean_score = mean_score.sort_values(["sum"], ascending=False)
rmax.append(max_score)
rmean.append(mean_score)
if(max_score.size > 0):
line = max_score.iloc[0];
values = fname_pattern.match(line['file']).groups()
line["city"] = state_name
line["ndays"] = values[0]
line["band_a"] = values[1]
line["band_b"] = values[2]
line["feat_a"] = values[3]
line["feat_b"] = values[4]
line["eps"] = values[5]
result_data = result_data.append(line)
print("\n\n"+state_name+"\n\n")
print(line)
dirname = os.path.join("..", selection_path, state_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
time.sleep(1)
max_score.to_csv(os.path.join(dirname,"max_"+str(base_size)+".csv"), float_format="%.2f", index=False)
mean_score.to_csv(os.path.join(dirname,"mean_"+str(base_size)+".csv"), float_format="%.2f", index=False)
######################################
# End of Result Selection
######################################
######################################
# Begin Save Final Result
######################################
result_path = "results"
dirname = os.path.join("..", result_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
time.sleep(1)
result_data.to_csv(os.path.join(dirname, "result.csv"), float_format="%.2f", index=False)
######################################
# End Save Final Result
######################################
print("Done!") | [
"caioc2bolado@gmail.com"
] | caioc2bolado@gmail.com |
06482a9d416856f3cc042110e582582a828ae569 | 96139fd4b616bccef2401bd3fb8a25d5fd5d33f1 | /tests/conftest.py | b2523f8db482bc99427389c74b1f7e855d0d7d6b | [] | no_license | nathancooperjones/2048-ai | af9d6fec1af4a9d1ac47cba8eee78b8f643ddeec | ad0e35cb684bfb355babce0d70dc0797c22c6136 | refs/heads/master | 2020-08-24T13:36:28.503146 | 2020-01-03T23:41:38 | 2020-01-03T23:41:38 | 216,836,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), 'fixtures'))
| [
"nathancooperjones@gmail.com"
] | nathancooperjones@gmail.com |
51fbf6df906d530407c2aa6d975e24a0544b48d3 | 2d3198d326c695fdbe9d1f7eaed1072908e8d246 | /csv2tvm/csv2tvm.py | 198a54b0c773d78c1ee7cf4b6803cdd8e84a79fa | [] | no_license | pollozhao/Hydro | 8da02e6dae7b1819b296f54fb084a363641341cb | cc0879f78eed9ac3404c2849324fdfceac3f19b3 | refs/heads/master | 2020-04-15T14:11:31.282703 | 2019-01-10T04:55:10 | 2019-01-10T04:55:10 | 164,745,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | # csv format WITHOUT heading: start,node,Kh,Kv,Ss,Sy
# sorted by: start,node
inputfile = '004_total_TVM.csv'
outputfile = inputfile.replace('.csv','.tvm')
total_sp = 108
fout = open(outputfile,'w')
fout.write(' # MODFLOW-USG Time-Variant Materials (TVM) Package \n\
1 0 0 0 0 -1\n\
0 0 0 0 0 Start SP 1\n')
sp_list = []
with open(inputfile) as f:
for l in f:
sp_list.append(l.split(',')[0])
#print(len(sp_list))
outstack = []
write_flag = True
for c_sp in range(1,total_sp + 1):
if str(c_sp) in sp_list:
fin = open(inputfile)
for line in fin:
c_in = line.replace('\n','').split(',')
if c_in[0] == str(c_sp):
c_count = str(sp_list.count(c_in[0]))
if write_flag == True:
outstr = ' '+c_count+' '+c_count+' '+c_count+' '+c_count+' 0 End SP ' + str(c_in[0]) + '\n'
fout.write(outstr)
write_flag = False
outstack.append([c_in[1],c_in[2],c_in[3],c_in[4],c_in[5]])
for item in outstack:
fout.write(' '+ item[0] + ' ' + item[1] +'\n')
for item in outstack:
fout.write(' '+ item[0] + ' ' + item[2] +'\n')
for item in outstack:
fout.write(' '+ item[0] + ' ' + item[3] +'\n')
for item in outstack:
fout.write(' '+ item[0] + ' ' + item[4] +'\n')
fin.close()
outstack = []
else:
outstr = ' 0 0 0 0 0 End SP ' + str(c_sp) + '\n'
fout.write(outstr)
write_flag = True
fout.close()
print('done~')
| [
"noreply@github.com"
] | pollozhao.noreply@github.com |
ceaa02895cc7b072cfdd7f6c27db13f28b462bad | ad5542c705938d1d52ab14e363035b4575e8abcd | /i2b2tools/converters/__init__.py | c113950945eb5caf0f40c62e6e7f27d95128985a | [
"Apache-2.0"
] | permissive | cjliux/i2b2tools | 0811236675ef83277a8784c000d46fff8dd9501a | 305f2555121de4f09780ca13c018644b8759e363 | refs/heads/master | 2020-05-04T15:35:29.498884 | 2019-04-25T08:13:19 | 2019-04-25T08:13:19 | 179,247,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | from .inline import standoff_to_inline, inline_to_standoff
from .common import *
| [
"cjliux@gmail.com"
] | cjliux@gmail.com |
a4e081e2f2324bde8cb985aa063647de3d17c7ee | 59da8c75021610887304acc36fdecad84289847c | /강병수/Etc/210402_python/problem01.py | 1202d39d28a5df6125aca88fd94af80fd59be347 | [] | no_license | kb-ict/20201126_cSharp_java_class | 8e3d041566a74f39911c660c89fd1174a3eec224 | e5be9e11f1d0e2f58fbbdade6c28c6de1fefc023 | refs/heads/main | 2023-05-09T12:29:51.721299 | 2021-05-24T07:25:36 | 2021-05-24T07:25:36 | 340,220,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import pyperclip, pyautogui, time
pyautogui.hotkey('win','r')
pyautogui.write('mspaint')
time.sleep(1.5)
pyautogui.hotkey('enter')
time.sleep(2)
pyautogui.moveTo(342,92, duration=2)
pyautogui.click(x=342, y=92)
time.sleep(1)
pyautogui.click(x=500, y=400)
pyautogui.write('Well done!') | [
"biscottimeru@gmail.com"
] | biscottimeru@gmail.com |
f3396d3389170eab0fa52c05e0fa2c94ee90fb82 | f473eba3a4a32c8dbbca99d1e25a4715abf5767e | /datasets/demon_dataset.py | 02ec59a0a6134e5f4cd8f914d678af01df940c00 | [
"BSD-2-Clause"
] | permissive | xubin1994/multi_view_stereonet | 1ef82e2bac994ab68b462795ee436959cb790e1d | ab37254ed6c17a9057d0a006dc7ea2dd636cdd2f | refs/heads/main | 2023-06-14T20:56:50.823749 | 2021-05-21T22:41:45 | 2021-07-14T17:13:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,505 | py | # Copyright 2021 Massachusetts Institute of Technology
#
# @file demon_dataset.py
# @author W. Nicholas Greene
# @date 2020-10-09 11:55:58 (Fri)
import os
import glob
import random
from PIL import Image
import numpy as np
import torch.utils.data as tud
from utils import depthmap_utils
class DeMoNDataset(tud.Dataset):
"""Dataset class for DeMoN data based on the implementation in DPSNet.
Assumes data is organized as follows:
root/scene_1/0000000.jpg
root/scene_1/0000001.jpg
...
root/scene_1/cam.txt
root/scene_1/poses.txt
root/scene_2/0000000.jpg
root/scene_2/0000001.jpg
...
root/scene_2/cam.txt
root/scene_2/poses.txt
...
The train text file should list each scene desired:
scene_1
scene_2
...
"""
def __init__(self, data_dir, input_file, num_right_images=1, num_left_images=0, transform=None):
self.data_dir = data_dir
self.input_file = input_file
self.num_right_images = num_right_images
self.num_left_images = num_left_images
self.transform = transform
# Get scenes.
scenes = []
with open(os.path.join(self.data_dir, self.input_file), "r") as stream:
scenes = stream.readlines()
scenes = [os.path.join(self.data_dir, scene.strip()) for scene in scenes]
self.scenes = sorted(scenes)
# Generate samples.
self.samples = self.generate_samples(self.num_right_images)
shuffle_on_read = True
if shuffle_on_read:
random.shuffle(self.samples)
if self.num_left_images > 0:
self.samples = self.samples[:self.num_left_images]
self.left_filename_to_idx = {}
for idx in range(len(self.samples)):
self.left_filename_to_idx[self.samples[idx]["left_filename"]] = idx
return
def generate_samples(self, num_right_images):
samples = []
demi_length = (num_right_images + 1) // 2
for scene in self.scenes:
assert(os.path.exists(os.path.join(scene, "cam.txt")))
assert(os.path.exists(os.path.join(scene, "poses.txt")))
K3 = np.genfromtxt(os.path.join(scene, "cam.txt")).astype(np.float32).reshape((3, 3))
K = np.eye(4, dtype=np.float32)
K[:3, :3] = K3
inv_poses = np.genfromtxt(os.path.join(scene, "poses.txt")).astype(np.float32)
images = sorted(glob.glob(os.path.join(scene, "*.jpg")))
if len(images) < num_right_images + 1:
continue
for left_idx in range(len(images)):
if left_idx < demi_length:
shifts = list(range(0, num_right_images + 1))
shifts.pop(left_idx)
elif left_idx >= len(images) - demi_length:
shifts = list(range(len(images) - (num_right_images + 1), len(images)))
shifts.pop(left_idx - len(images))
else:
shifts = list(range(left_idx - demi_length, left_idx + (num_right_images + 1 + 1) // 2))
shifts.pop(demi_length)
assert(len(shifts) == num_right_images)
left_filename = images[left_idx]
left_depthmap_true_filename = os.path.splitext(left_filename)[0] + ".npy"
T_world_in_left = np.concatenate((inv_poses[left_idx, :].reshape((3, 4)), np.array([[0, 0, 0, 1]])), axis=0)
sample = {"K": K,
"left_filename": left_filename,
"left_depthmap_true_filename": left_depthmap_true_filename,
"right_filename": [],
"right_depthmap_true_filename": [],
"T_right_in_left": []}
for right_idx in shifts:
right_filename = images[right_idx]
sample["right_filename"].append(right_filename)
sample["right_depthmap_true_filename"].append(os.path.splitext(right_filename)[0] + ".npy")
assert(os.path.exists(sample["right_depthmap_true_filename"][-1]))
T_world_in_right = np.concatenate((inv_poses[right_idx, :].reshape((3, 4)), np.array([[0, 0, 0, 1]])), axis=0)
T_right_in_left = T_world_in_left @ np.linalg.inv(T_world_in_right)
T_right_in_left = T_right_in_left.astype(np.float32)
sample["T_right_in_left"].append(T_right_in_left)
assert(len(sample["right_filename"]) == num_right_images)
assert(len(sample["T_right_in_left"]) == num_right_images)
samples.append(sample)
return samples
def __getitem__(self, idx):
raw_sample = self.samples[idx]
# Read in images.
left_image = Image.open(raw_sample["left_filename"])
left_depthmap_true = np.load(raw_sample["left_depthmap_true_filename"]).astype(np.float32)
right_images = []
right_depthmap_true = []
for idx in range(len(raw_sample["right_filename"])):
right_images.append(Image.open(raw_sample["right_filename"][idx]))
right_depthmap_true.append(np.load(raw_sample["right_depthmap_true_filename"][idx]).astype(np.float32))
sample = {"left_filename": raw_sample["left_filename"],
"right_filename": raw_sample["right_filename"],
"left_image": left_image,
"right_image": right_images,
"K": raw_sample["K"],
"T_right_in_left": raw_sample["T_right_in_left"],
"left_depthmap_true": left_depthmap_true,
"right_depthmap_true": right_depthmap_true}
assert(len(sample["right_filename"]) == self.num_right_images)
assert(len(sample["T_right_in_left"]) == self.num_right_images)
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.samples)
class DeMoNStereoDataset(tud.Dataset):
"""Wrapper around DeMoNDataset for stereo.
"""
def __init__(self, data_dir, input_file, num_left_images=0, transform=None):
self.transform = transform
self.demon_dataset = DeMoNDataset(data_dir, input_file, num_right_images=1,
num_left_images=num_left_images, transform=None)
return
def __len__(self):
return len(self.demon_dataset)
def __getitem__(self, idx):
sample = self.demon_dataset[idx]
# Remove outer lists around right image.
assert(len(sample["right_filename"]) == 1)
sample["right_filename"] = sample["right_filename"][0]
sample["right_image"] = sample["right_image"][0]
sample["right_depthmap_true"] = sample["right_depthmap_true"][0]
sample["T_right_in_left"] = sample["T_right_in_left"][0]
if self.transform:
sample = self.transform(sample)
return sample
def get_groundtruth_disparity(self, image_file):
idx = self.demon_dataset.left_filename_to_idx[image_file]
sample = self.__getitem__(idx)
disparity = depthmap_utils.depthmap_to_disparity(
sample["K"][0, :3, :3].cpu().numpy(),
sample["T_right_in_left"][0, ...].cpu().numpy(),
sample["left_depthmap_true"][0, ...].cpu().numpy())
return disparity
| [
"wnick.greene@strio.ai"
] | wnick.greene@strio.ai |
ffc7449e3ebf51010762bd5fb7423dfa78e2f390 | dfcdf18ec829d7926e1fad2df82e6156fdc0c2d0 | /spit/zscale.py | f40490127cd102372df644d17c747f2b374b8591 | [
"BSD-3-Clause"
] | permissive | pypeit/spit | c87d4906ce35c20e68bcaaa536d7e5622c3f5de0 | 77f0687c9aeae11ad56f0c5ac2a2b2ad21eed7fb | refs/heads/master | 2022-01-15T02:37:51.432828 | 2019-08-13T20:58:49 | 2019-08-13T20:58:49 | 112,011,236 | 2 | 2 | BSD-3-Clause | 2019-08-14T05:34:00 | 2017-11-25T14:49:25 | Python | UTF-8 | Python | false | false | 6,513 | py | """
This file is part of the STScI numdisplay package:
https://www.stsci.edu/trac/ssb/stsci_python/browser/stsci_python/trunk/numdisplay/lib/stsci/numdisplay/zscale.py?rev=13225
under the following license:
Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import math
import numpy
MAX_REJECT = 0.5
MIN_NPIXELS = 5
GOOD_PIXEL = 0
BAD_PIXEL = 1
KREJ = 2.5
MAX_ITERATIONS = 5
def zscale(image, nsamples=1000, contrast=0.25):
"""Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
image is a 2-d numpy array
returns (z1, z2)
"""
# Sample the image
samples = zsc_sample(image, nsamples)
return zscale_samples(samples, contrast=contrast)
def zsc_sample(image, maxpix, bpmask=None, zmask=None):
# Figure out which pixels to use for the zscale algorithm
# Returns the 1-d array samples
# Don't worry about the bad pixel mask or zmask for the moment
# Sample in a square grid, and return the first maxpix in the sample
nc = image.shape[0]
nl = image.shape[1]
stride = max(1.0, math.sqrt((nc - 1) * (nl - 1) / float(maxpix)))
stride = int(stride)
samples = image[::stride,::stride].flatten()
# remove NaN and Inf
samples = samples[numpy.isfinite(samples)]
return samples[:maxpix]
def zscale_samples(samples, contrast=0.25):
npix = len(samples)
samples.sort()
zmin = samples[0]
zmax = samples[-1]
# For a zero-indexed array
center_pixel = int((npix - 1) // 2)
if npix%2 == 1:
median = samples[center_pixel]
else:
median = 0.5 * (samples[center_pixel] + samples[center_pixel + 1])
#
# Fit a line to the sorted array of samples
minpix = max(MIN_NPIXELS, int(npix * MAX_REJECT))
ngrow = max(1, int (npix * 0.01))
ngoodpix, zstart, zslope = zsc_fit_line(samples, npix, KREJ, ngrow,
MAX_ITERATIONS)
#print "slope=%f intercept=%f" % (zslope, zstart)
if ngoodpix < minpix:
z1 = zmin
z2 = zmax
else:
if contrast > 0: zslope = zslope / contrast
z1 = max(zmin, median - (center_pixel - 1) * zslope)
z2 = min(zmax, median + (npix - center_pixel) * zslope)
return z1, z2
def zsc_fit_line(samples, npix, krej, ngrow, maxiter):
if npix <= 1:
return npix, 0, 1
#
# First re-map indices from -1.0 to 1.0
xscale = 2.0 / (npix - 1)
xnorm = numpy.arange(npix)
xnorm = xnorm * xscale - 1.0
ngoodpix = npix
minpix = max(MIN_NPIXELS, int(npix*MAX_REJECT))
last_ngoodpix = npix + 1
# This is the mask used in k-sigma clipping. 0 is good, 1 is bad
badpix = numpy.zeros(npix, dtype="int32")
#
# Iterate
for niter in range(maxiter):
if (ngoodpix >= last_ngoodpix) or (ngoodpix < minpix):
break
# Accumulate sums to calculate straight line fit
goodpixels = numpy.where(badpix == GOOD_PIXEL)
sumx = xnorm[goodpixels].sum()
sumxx = (xnorm[goodpixels]*xnorm[goodpixels]).sum()
sumxy = (xnorm[goodpixels]*samples[goodpixels]).sum()
sumy = samples[goodpixels].sum()
sum = len(goodpixels[0])
delta = sum * sumxx - sumx * sumx
# Slope and intercept
intercept = (sumxx * sumy - sumx * sumxy) / delta
slope = (sum * sumxy - sumx * sumy) / delta
# Subtract fitted line from the data array
fitted = xnorm*slope + intercept
flat = samples - fitted
# Compute the k-sigma rejection threshold
ngoodpix, mean, sigma = zsc_compute_sigma (flat, badpix, npix)
threshold = sigma * krej
# Detect and reject pixels further than k*sigma from the fitted line
lcut = -threshold
hcut = threshold
below = numpy.where(flat < lcut)
above = numpy.where(flat > hcut)
badpix[below] = BAD_PIXEL
badpix[above] = BAD_PIXEL
# Convolve with a kernel of length ngrow
kernel = numpy.ones(ngrow,dtype="int32")
badpix = numpy.convolve(badpix, kernel, mode='same')
ngoodpix = len(numpy.where(badpix == GOOD_PIXEL)[0])
niter += 1
# Transform the line coefficients back to the X range [0:npix-1]
zstart = intercept - slope
zslope = slope * xscale
return ngoodpix, zstart, zslope
def zsc_compute_sigma (flat, badpix, npix):
# Compute the rms deviation from the mean of a flattened array.
# Ignore rejected pixels
# Accumulate sum and sum of squares
goodpixels = numpy.where(badpix == GOOD_PIXEL)
sumz = flat[goodpixels].sum()
sumsq = (flat[goodpixels]*flat[goodpixels]).sum()
ngoodpix = len(goodpixels[0])
if ngoodpix == 0:
mean = None
sigma = None
elif ngoodpix == 1:
mean = sumz
sigma = None
else:
mean = sumz / ngoodpix
temp = sumsq / (ngoodpix - 1) - sumz*sumz / (ngoodpix * (ngoodpix - 1))
if temp < 0:
sigma = 0.0
else:
sigma = math.sqrt (temp)
return ngoodpix, mean, sigma
| [
"xavier@ucolick.org"
] | xavier@ucolick.org |
ce7653c10072b6131971eb3c9e2d75e9fcff2f60 | 45dadbb39beb9d3ddea48fdf088b07d7b8f06f27 | /coroblog/asgi.py | 9d1eed0fea1d1284bc34b25dc8f72d7f57ff5574 | [] | no_license | jod35/Memo | edcf7a00ccdb76fc9525354a8151332aa5082f19 | 53bdbc85f2c4ccfab84c5e91e369cf6136c827ce | refs/heads/master | 2023-01-23T15:07:02.566048 | 2020-11-12T18:13:00 | 2020-11-12T18:13:00 | 305,370,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for coroblog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coroblog.settings')
application = get_asgi_application()
| [
"jod35@test.com"
] | jod35@test.com |
111dfde5ae2aa71082fdb16043ff83be4fc8d48c | 9f02973cd0b8e7886085b7cff75b0f515ddf1a37 | /PLSA(PLSI)/src/PLSA_slow.py | 41ff4a446e9382892c5e4c84f4ff32ab7acc026b | [] | no_license | damo894127201/MachineLearning | 9c578628936ded8e4c26c232d6adabc58e09bf54 | ca0d43c9ba8ff7d1353606ba893291e3bf10f9e7 | refs/heads/master | 2020-07-23T12:23:48.141435 | 2019-11-20T02:06:48 | 2019-11-20T02:06:48 | 207,554,934 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,333 | py | # -*- coding: utf-8 -*-
# @Time : 2019/11/10 10:14
# @Author : Weiyang
# @File : PLSA_slow.py
#======================================================================================================================
# 概率潜在语义分析(probabilistic latent semantic analysis,PLSA):含有隐变量的模型,学习算法是EM算法
# 也称概率潜在语义索引(probabilistic latent semantic indexing,PLSI),是一种利用概率生成模型对文本集合进行话题分析的无监督学习
# 方法。模型最大的特点是 用隐变量表示话题;整个模型表示 文本生成话题,话题生成单词,从而得到 单词-文本共现数据 的过程;假设每个
# 文本由一个话题分布决定,每个话题由一个单词分布决定。
#
# 概率潜在语义分析受潜在语义分析的启发提出,两者可以通过矩阵分解关联起来。概率潜在语义分析模型中的矩阵U'和V'是非负的、规范化的
# 表示条件概率分布,而潜在语义分析模型中的矩阵U和V是正交的,未必非负,并不表示概率分布。
# 概率潜在语义分析就是发现由隐变量表示的话题,即潜在语义。
# 概率潜在语义分析模型:生成模型 和 共现模型
# 1. 生成模型:
# 1. 概念:生成模型表示文本生成话题,话题生成单词,从而得到 单词-文本共现数据T 的过程;假设每个文本由一个话题分布决定,
# 每个话题由 一个单词分布决定。
# 单词变量 w 和 文本变量 d 是观测变量,话题变量z 是 隐变量,生成模型的定义如下:
# 单词-文本共现数据T,是一个矩阵,其行表示单词,列表示文本,元素表示 单词-文本对(w,d)的出现次数。单词-文本共现数据T
# 的生成概率为所有单词-文本对(w,d)的生成概率的乘积,即
# P(T) = ∏_(w,d) P(w,d)^n(w,d)
# 每个单词-文本对(w,d)的生成概率为:
# P(w,d) = P(d)P(w|d) = P(d) * (∑_{z} P(z|d) * P(w|z))
# P(d) 是文档出现的概率,可以直接统计出来;P(z|d) 是 文本d 生成话题z 的条件概率分布;
# P(w|z)是话题z 生成 单词w 的条件概率分布;n(w,d)表示(w,d)的出现次数
# 2. 属概率有向图模型
# 3. 生成模型假设在话题z给定条件下,单词w与文本d条件独立,即
# P(w,z|d) = p(z|d)P(w|z,d) = p(z|d)P(w|z)
# 2. 共现模型:
# 1. 概念:生成模型描述 单词-文本共现数据T 拥有的模式,共现模型的定义如下:
# P(T) = ∏_(w,d) P(w,d)^n(w,d)
# P(w,d) = ∑_z P(z)P(w|z)P(d|z)
# 2. 属概率有向图模型。
# 3. 共现模型假设在话题z给定条件下,单词w 与 文本d 是条件独立的,即
# P(w,d|z) = P(w|z)P(d|z)
# 3. 生成模型与共现模型在概率公式意义上是等价的,但是拥有不同的性质。生成模型刻画单词-文本共现数据T 生成的过程,共现模型描述
# 单词-文本共现数据T 拥有的模式。
# 4. 学习策略:观测数据的极大似然估计
# 5. 学习算法:EM算法,EM算法是一种迭代算法,每次迭代包括交替的两步:E步,求期望;M步,求极大。E步是计算Q函数,即完全数据的对
# 数似然函数对不完全数据的条件分布的期望。M步是对Q函数求极大化,更新模型参数,这一步一般采用拉格朗日法,求得参数
# 的解析解。
# 1. 概率潜在语义分析模型是含有隐变量的模型,目标函数对数似然函数的优化无法用解析方法求解,故用EM算法。
# 2. E步:计算Q函数,Q函数为完全数据的对数似然函数对不完全数据的条件分布的期望
# 3. M步:极大化Q函数,一般是通过约束最优化求解Q函数,采用拉格朗日法求解参数的解析解
# 4. 算法执行过程:
# 1. 输入:单词集合W={w1,w2,...,w_M},文本集合D={d1,d2,...,d_N},话题集合Z={z1,z2,...,z_K},共现数据{n(wi,dj)},其中
# i=1,2,...,M ; j=1,2,...,N
# 2. 输出:话题z_k 生成 单词w_i 的条件概率分布P(w_i|z_k) 和 文本d_j 生成话题z_k 的条件概率分布P(z_k|d_j)
# 3. 迭代执行以下E步,M步,直到收敛为止
# E步:简化后的Q函数:
# P(z_k | w_i,d_j) = P(w_i|z_k)P(z_k|d_j) / {∑_{k=1,2,..,K} P(w_i|z_k)P(z_k|d_j)}
# M步:
# P(w_i|z_k) =
# ∑_{j=1,..,N}n(w_i,d_j)P(z_k|w_i,d_j) / {∑_{m=1,..M}∑_{j=1,..,N}n(w_m,d_j)P(z_k|w_m,d_j)}
#
# P(z_k|d_j) =
# ∑_{i=1,...,M}n(w_i,d_j)P(z_k|w_i,d_j) / n(d_j)
# n(d_j) = ∑_{i=1,...,M}n(w_i,d_j) ,表示文本d_j中的单词个数
# n(w_i,d_j)表示单词w_i在文本d_j中出现的次数
# 6. 模型参数:如果直接定义单词与文本的共现概率P(w,d),模型参数的个数是O(M*N),其中M是单词数,N是文本数。概率潜在语义分析的
# 生成模型和共现模型的参数个数是O(M*K + N*K) ,其中K是话题数。现实中,K<<M,所以概率潜在语义分析通过话题对数据
# 进行了更简洁地表示,减少了学习过程中过拟合的可能性。
#======================================================================================================================
import numpy as np
import jieba as jb
from collections import defaultdict
import pandas as pd
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(message)s')
class PLSA(object):
'''概率潜在语义分析(概率潜在语义索引):生成模型的 EM算法实现'''
def __init__(self,filePath=None):
self.matrix_frequent,self.token2id,self.id2token,self.doc2id,\
self.id2doc,self.wordDict,self.num_Documents = self._loadData(filePath)
def _loadData(self,filePath):
'''读取文档集数据,返回 两个 单词-文档矩阵X ,其中每个元素分别是是 词频 和 TF-IDF
filePath :文档集路径,其中,每一行代表一篇文档'''
wordDict = [] # 单词词典
Documents = [] # [[{word:num},{word:num},...],...] ,存储每篇文档中每个词的词频
with open(filePath,'r',encoding='utf-8') as fi:
for doc in fi:
words = [word for word in jb.cut(doc.strip()) if '\u4e00' <= word <= '\u9fff'] # 剔除非中文字符
# 统计每篇文档中每个词的词频
docContent = defaultdict(int)
for word in words:
docContent[word] += 1
Documents.append(docContent)
wordDict.extend(words) # 单词加入词包
# 对词包去重
wordDict = list(set(wordDict))
# 对单词和文档进行编码
token2id = {token:id for id,token in enumerate(wordDict)}
id2token = {id:token for id,token in enumerate(wordDict)}
doc2id = {'Doc: '+ str(id):id for id in range(len(Documents))}
id2doc = {id:'Doc: '+ str(id) for id in range(len(Documents))}
# 单词向量空间模型:单词-文档矩阵T(单词-文本共现数据T)
matrix_frequent = np.zeros((len(wordDict),len(Documents)))
for i,word in enumerate(wordDict):
for j in range(len(Documents)):
matrix_frequent[i][j] = Documents[j][word] * 1.0 # 元素为词频
matrix_frequent = pd.DataFrame(matrix_frequent,columns=['Doc: '+ str(id) for id in range(len(Documents))],
index=wordDict)
return matrix_frequent,token2id,id2token,doc2id,id2doc,wordDict,len(Documents)
def fit(self,X,n_topics=5,max_iters=300,threshold=1e-5):
'''概率潜在语义分析
X 是单词-文本共现数据T,pd.DataFrame结构
n_topics 人为设定的主题的个数
max_iters 最大的迭代次数
threshold 前后两次参数的差值,小于该阈值,则停止迭代
输入:
共现数据{n(w_i,d_j)},即单词-文本共现数据T,每行代表一个单词,每列代表一个文本,每个元素存储的都是单词在相应文档中的词频
输出:
话题z_k 生成 单词w_i 的条件概率分布P(w_i|z_k) 和 文本d_j 生成话题z_k 的条件概率分布P(z_k|d_j) ,这是两个矩阵
'''
logger = logging.getLogger('Training')
# 初始化 话题z_k生成单词w_i的条件概率分布P(w_i|z_k),每一行代表一个主题,每一列代表一个单词,
# 每个元素表示相应的行主题生成对应列单词的概率,同一个主题生成所有单词的概率之和为1,即每一行元素的和为1
topic_generate_word_matrix = np.ones((n_topics,len(self.wordDict))) / float(len(self.wordDict))
# 转为pd.DataFrame
topic_generate_word_matrix = pd.DataFrame(topic_generate_word_matrix,index=['Topic:' + str(i) for i in range(n_topics)],
columns=self.wordDict)
# 初始化 文本d_j 生成话题z_k 的条件概率分布P(z_k|d_j),每一行代表一个文档,每一列代表一个主题,
# 每个元素表示相应的行文档生成对应列单词的概率,同一个文档生成所有主题的概率之和为1,即每一行元素的和为1
doc_generate_topic_matrix = np.ones((self.num_Documents,n_topics)) / float(n_topics)
# 转为pd.DataFrame
doc_generate_topic_matrix = pd.DataFrame(doc_generate_topic_matrix,index=['Doc: ' + str(id) for id in range(self.num_Documents)],
columns=['Topic:' + str(i) for i in range(n_topics)])
# Q函数 P(z_k|w_i,d_j)
word_and_doc_generate_topic_matrix = np.zeros((len(self.wordDict) * self.num_Documents, n_topics))
# 转为pd.DataFrame
index = ['(' + word + ',' + 'Doc: '+ str(id) + ')' for word in self.wordDict for id in range(self.num_Documents)]
word_and_doc_generate_topic_matrix = pd.DataFrame(word_and_doc_generate_topic_matrix,
index=index,
columns=['Topic:' + str(i) for i in range(n_topics)])
# 主题
Topics = ['Topic:' + str(i) for i in range(n_topics)]
for step in range(1,max_iters+1):
# 拷贝一份参数矩阵,用于比较前后更新之间参数变化的大小
doc_generate_topic_matrix_copy = np.copy(doc_generate_topic_matrix.values)
topic_generate_word_matrix_copy = np.copy(topic_generate_word_matrix.values)
# E步:求期望,即Q函数 P(z_k|w_i,d_j)
# 先遍历每个主题
for topic in Topics:
# 存储 同一主题下的概率之和,即分母
sum_value = 0
# 遍历每个单词
for word in self.wordDict:
# 遍历每个文档
for doc in doc_generate_topic_matrix.index:
temp = topic_generate_word_matrix.loc[topic][word] * doc_generate_topic_matrix.loc[doc][topic]
sum_value += temp
word_and_doc_generate_topic_matrix.loc['(' + word + ',' + doc + ')'][topic] = temp
# 该列除以分母
if sum_value != 0:
word_and_doc_generate_topic_matrix[topic] /= float(sum_value)
# M步:求Q函数的极大,更新参数
# 先遍历每个主题
for topic in Topics:
# 记录P(w_i|z_k)的分母的值
sum_value1 = 0
# 遍历每个单词
for word in self.wordDict:
# 记录P(w_i|z_k)的分子的值
sum_value2 = 0
# 遍历每个文档
for doc in doc_generate_topic_matrix.index:
temp = X.loc[word][doc] * word_and_doc_generate_topic_matrix.loc['(' + word + ',' + doc + ')'][topic]
sum_value2 += temp
# 更新P(w_i|z_k)参数
topic_generate_word_matrix.loc[topic][word] = sum_value2
sum_value1 += sum_value2
# 更新P(w_i|z_k)参数
if sum_value1 != 0:
topic_generate_word_matrix.loc[topic] /= float(sum_value1)
# 更新P(z_k|d_j)
# 遍历每个文档
for doc in doc_generate_topic_matrix.index:
# 记录P(z_k|d_j)分母的值
sum_value = 0
# 遍历每个单词
for word in self.wordDict:
temp = X.loc[word][doc] * word_and_doc_generate_topic_matrix.loc['(' + word + ',' + doc + ')'][topic]
sum_value += temp
# 更新参数
doc_generate_topic_matrix.loc[doc][topic] = sum_value / float(np.sum(X[doc]))
# 比较前后两次迭代参数差值之和是否达到指定阈值,即参数是否稳定
# 差值计算
difference1 = np.sum(np.sum(np.abs(topic_generate_word_matrix.values - topic_generate_word_matrix_copy),axis=0))
difference2 = np.sum(np.sum(np.abs(doc_generate_topic_matrix.values - doc_generate_topic_matrix_copy),axis=0))
difference = difference1 + difference2
logger.info('epochs:{}\tdifferences:{}'.format(step, difference))
if difference < threshold:
logger.info('Training Finished!')
break
return topic_generate_word_matrix,doc_generate_topic_matrix
if __name__ == '__main__':
model = PLSA(filePath='../data/documents.txt')
W,H = model.fit(model.matrix_frequent,n_topics=5)
print(W)
print(H)
| [
"894127201@qq.com"
] | 894127201@qq.com |
e2b8e7c470a6c659c0a81869ec8129e631a50ab7 | 760c7b015b30603bc8041f2bca602d63a9549680 | /Website-WebApp/NowUI/NowUI/settings.py | aafec1789f25c13365512c7fabab508874b79542 | [] | no_license | Rac-Ro007/Decentralized-Fleet-Management-System | 555e79cc049851bce0ef4577a1e6d6524257bf09 | c89c9af0fbd309a70836746193bb141e3e50c644 | refs/heads/master | 2022-11-11T09:23:35.027000 | 2020-06-30T06:20:16 | 2020-06-30T06:20:16 | 276,010,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | """
Django settings for NowUI project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+*x+*yzcp=9c=ircl3@nx_@fkqw2$8jztqyynpa+*y6txu4m&9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dash',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NowUI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NowUI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| [
"ronak.vadhaiya@somaiya.edu"
] | ronak.vadhaiya@somaiya.edu |
e6aae4664af1c395e025ebab20e82e126d8262a2 | c675eaf3dc367e901e68805cce36f69781187b04 | /Stack/Protostar-exploits/Nebula/Level 17/level17.py | fbfa908b08a8913ef54be8b52a6bd1127a130adf | [] | no_license | nu11secur1ty/Linux_hardening_and_security | ce97b32bf26dd681079a53cd659f123fb276bd1a | bf00a6ef37aee358be28e60d0af2511d7ee028e2 | refs/heads/master | 2023-03-06T14:26:08.416964 | 2023-02-22T12:05:51 | 2023-02-22T12:05:51 | 97,936,635 | 14 | 5 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/python
import os
import pickle
import time
import socket
import signal
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
def server(skt):
line = skt.recv(1024)
obj = pickle.loads(line)
for i in obj:
clnt.send("why did you send me " + i + "?\n")
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
skt.bind(('0.0.0.0', 10007))
skt.listen(10)
while True:
clnt, addr = skt.accept()
if(os.fork() == 0):
clnt.send("Accepted connection from %s:%d" % (addr[0], addr[1]))
server(clnt)
exit(1)
| [
"venvaropt@gmail.com"
] | venvaropt@gmail.com |
0e312a2df090eea62b82c07fd7402e9aa4d0c1cd | 5d093e51efa7a5e87feb54785192bf34837cb4f8 | /noise_fft.py | 5462a75f4d1edfc7b616a04a9af60944f10c25c5 | [] | no_license | shigasy/fft-example | 8f2ce9265b674011e9cbb4748037dffc02e4e345 | 4149e1063df59a39119fbcb9fcae51b0e06a2b12 | refs/heads/master | 2022-11-08T16:13:36.689727 | 2020-06-16T06:50:41 | 2020-06-16T06:50:41 | 272,333,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,958 | py | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0) # 乱数seed固定
N = 128
dt = 0.01 # サンプリング周期(sec) 100Hz
freq = 4 # 周波数 Hz 1秒あたりに繰り返される回数のこと 周期は1波長の数 周波数は1秒あたりに繰り返される回数のこと? 4Hz
amp = 1 # 振幅
t = np.arange(0, N * dt, dt)
f = amp * np.sin(2 * np.pi * freq * t) + np.random.randn(N) * 0.3 # 信号
plt.xlabel('time(sec)', fontsize=14)
plt.ylabel('signal', fontsize=14)
plt.plot(t, f)
plt.show()
F = np.fft.fft(f)
F_abs = np.abs(F)
F_abs_amp = F_abs / N * 2 # 交流成分はデータ数で割って2倍
F_abs_amp[0] = F_abs_amp[0] / 2 # 直流成分(今回は扱わないけど)は2倍不要
# 周波数軸 linspace(開始,終了,分割数) 等差数列。サンプリング周波数にX軸を揃える
fq = np.linspace(0, 1.0/dt, N)
print(fq)
print(F_abs_amp)
# グラフ表示(FFT解析結果)
plt.xlabel('freqency(Hz)', fontsize=14)
plt.ylabel('amplitude', fontsize=14)
# Xの要素とYの要素でグラフ化
plt.plot(fq, F_abs_amp)
plt.show()
# そのまま普通にIFFTで逆変換した場合
F_ifft = np.fft.ifft(F) # IFFT
F_ifft_real = F_ifft.real # 実数部
plt.plot(t, F_ifft_real, c="g") # グラフ
plt.show()
F2 = np.copy(F) # FFT結果コピー
# --------------
# 周波数でフィルタリング処理
fc = 10 # カットオフ(周波数)
F2[(fq > fc)] = 0 # カットオフを超える周波数のデータをゼロにする(ノイズ除去)
# フィルタリング処理したFFT結果の確認
# FFTの複素数結果を絶対値に変換
F2_abs = np.abs(F2)
# 振幅をもとの信号に揃える
F2_abs_amp = F2_abs / N * 2 # 交流成分はデータ数で割って2倍
F2_abs_amp[0] = F2_abs_amp[0] / 2 # 直流成分(今回は扱わないけど)は2倍不要
# グラフ表示(FFT解析結果)
plt.xlabel('freqency(Hz)', fontsize=14)
plt.ylabel('amplitude', fontsize=14)
plt.plot(fq, F2_abs_amp, c='r')
plt.show()
# 周波数でフィルタリング(ノイズ除去)-> IFFT
F2_ifft = np.fft.ifft(F2) # IFFT
F2_ifft_real = F2_ifft.real * 2 # 実数部の取得、振幅を元スケールに戻す
# グラフ表示:オリジナルとフィルタリング(ノイズ除去)
plt.plot(t, f, label='original')
plt.plot(t, F2_ifft_real, c="r", linewidth=4, alpha=0.7, label='filtered')
plt.legend(loc='best')
plt.xlabel('time(sec)', fontsize=14)
plt.ylabel('singnal', fontsize=14)
plt.show()
# --------------
# グラフ再表示(FFT結果・フィルタリングなし)
plt.xlabel('freqency(Hz)', fontsize=14)
plt.ylabel('amplitude', fontsize=14)
plt.hlines(y=[0.2], xmin=0, xmax=100, colors='r', linestyles='dashed')
plt.plot(fq, F_abs_amp)
plt.show()
F3 = np.copy(F) # FFT結果コピー
# 振幅強度でフィルタリング処理
F3 = np.copy(F) # FFT結果コピー
ac = 0.2 # 振幅強度の閾値
F3[(F_abs_amp < ac)] = 0 # 振幅が閾値未満はゼロにする(ノイズ除去)
# 振幅でフィルタリング処理した結果の確認
# FFTの複素数結果を絶対値に変換
F3_abs = np.abs(F3)
# 振幅をもとの信号に揃える
F3_abs_amp = F3_abs / N * 2 # 交流成分はデータ数で割って2倍
F3_abs_amp[0] = F3_abs_amp[0] / 2 # 直流成分(今回は扱わないけど)は2倍不要
# グラフ表示(FFT解析結果)
plt.xlabel('freqency(Hz)', fontsize=14)
plt.ylabel('amplitude', fontsize=14)
plt.plot(fq, F3_abs_amp, c='orange')
plt.show()
# 振幅強度でフィルタリング(ノイズ除去)-> IFFT
F3_ifft = np.fft.ifft(F3) # IFFT
F3_ifft_real = F3_ifft.real # 実数部の取得
# グラフ(オリジナルとフィルタリングを比較)
plt.plot(t, f, label='original')
plt.plot(t, F3_ifft_real, c="orange", linewidth=4, alpha=0.7, label='filtered')
plt.legend(loc='best')
plt.xlabel('time(sec)', fontsize=14)
plt.ylabel('singnal', fontsize=14)
plt.show()
| [
"kxx555u.u@gmail.com"
] | kxx555u.u@gmail.com |
e3f696f1b168e75b90ad00e7e7a6c642b8e5a410 | 552c39141dab7cbc0c34245000291a46cdb41495 | /lte_enb/src/acceptance/s1ap/cm_xta_pycmd.py | 00cae7aa8528453c4f59778e97c32505320da2dc | [] | no_license | cmjeong/rashmi_oai_epc | a0d6c19d12b292e4da5d34b409c63e3dec28bd20 | 6ec1784eb786ab6faa4f7c4f1c76cc23438c5b90 | refs/heads/master | 2021-04-06T01:48:10.060300 | 2017-08-10T02:04:34 | 2017-08-10T02:04:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,720 | py | #
#/********************************************************************20**
#
# Name: SIP
#
# Type: Python File
#
# Desc:
#
# File: cm_xta_pycmd.py
#
# Sid: cm_xta_pycmd.py@@/main/26 - Tue Apr 10 12:26:14 2012
#
# Prg:
#
#*********************************************************************21*/
intCmdDict = {
'tapa.timeout' : 'Expect : No message',
'tapa.delay' : 'Delay',
#cm_xta_pycmd_py_001.main_6 loop changes starts
'tapa.repeat.start' : 'Repeat Start',
'tapa.repeat.end' : 'Repeat End',
'tapa.repeat.start.lch.cfg' : 'Repeat Start LCH Cfg',
#loop changes ends
'tapa.sip.delay' : 'SIP Delay',
'tapa.sip.global.directive' : 'SIP Global Directive : Set',
'tapa.global.directive' : 'Global Directive : Set',
'tapa.directive' : 'Directive : Set',
'tapa.memShow' : 'SSI Memory Info : Info show',
'tapa.memLeak' : 'Memory leak info : Memory leak',
'tapa.comment' : 'Comment',
'tapa.mr.directive' : 'Message router Directive',
'tapa.sip.directive' : 'SIP Directive : Set',
'tapa.iuup.directive' : 'IUUP Directive : Set',
'tapa.expect' : 'Expect',
'tapa.expseq' : 'Expect a sequence of',
'tapa.expopt' : 'Expect Optional',
'tapa.expset' : 'Exp Set',
'tapa.send' : 'Send',
'tapa.sz.sct.delay' : 'Delay if Sct interface is loosely coupled',
'tapa.pdcp.delay' : 'PDCP Delay',
'tapa.pdcp.global.directive' : 'PDCP Global Directive : Set', 'tapa.pdcp.directive' : 'PDCP Directive : Set',
'tapa.pdcp.directive' : 'PDCP Directive : Set',
'tapa.sot.cfgRegDom' : 'SOT Registrar domain configuration',
'tapa.sot.cfgRegAuth' : 'SOT Registrar third party data base configuration',
'tapa.sot.uncldPrim' : 'SOT SDK uncalled primitive invocation',
'tapa.tfu.directive' : 'Sending Directive for TFU interface',
'tapa.dcfi.stub' : 'Executing DCFI Stub'
}
# cm_xta_pycmd_py_001.main_3: Updated simple commands for FTHA
simpleCmdDict = {
'tapa.rlu.datind' : 'Sending Data Indication to upper layer from RLC',
'tapa.rru.ccchdatind' : 'Expecting data indication in the upper layer',
'tapa.rru.ccchdatrsp' : 'sending data response to RRC',
'tapa.rru.dldcchdatreq' : 'DL DCCH Message',
'tapa.rlu.bndreq' : 'Sending Bind request to RLU layer from RLC',
'tapa.rlu.bndcfm' : 'Sending Bind cfm to RLU layer from RLC',
'tapa.rlu.unbndreq' : 'Sending UnBind request to RLU layer from RLC',
'tapa.cmk.bndreq' : 'Cmk Bind Request',
'tapa.cmk.bndcfm' : 'Cmk bind confirm',
'tapa.cmk.ubndreq' : 'Cmk unbind request',
'tapa.rlu.datreq' : 'RLU Data Request',
'tapa.crl.bndreq' : 'CRL Bind Request',
'tapa.crl.bndcfm' : 'CRL bind confirm',
'tapa.crl.ubndreq' : 'CRL unbind request',
'tapa.crl.cfgreq' : 'CRL Configuration Request',
'tapa.crl.cfgcfm' : 'CRL Configuration Confirm',
'tapa.crl.ueidchngreq' : 'CRL UE Id Change request',
'tapa.crl.ueidchngcfm' : 'CRL UE Id Change confirm',
'tapa.crl.countcreq' : 'Sending Count-C request',
'tapa.crl.countccfm' : 'Sending Count-C confirm',
'tapa.crl.suspendreq' : 'Sending Suspend Req',
'tapa.crl.suspendcfm' : 'Sending Suspend Cfm',
'tapa.crl.resumereq' : 'Sending Resume Request',
'tapa.crl.staind' : 'Sending Status Indication ',
'tapa.ctc.bndreq' : 'CTC Bind Request',
'tapa.ctc.bndcfm' : 'CTC bind confirm',
'tapa.ctc.unbndreq' : 'CTC unbind request',
'tapa.ctc.cfgreq' : 'CTC Configuration Request',
'tapa.ctc.cfgcfm' : 'CTC Configuration Confirm',
'tapa.ctc.relreq' : 'CTC Release Request',
'tapa.ctc.relocreq' : 'CTC Relocation Request',
'tapa.ctc.reloccfm' : 'CTC Relocation Request',
'tapa.ctc.ctxtreq' : 'CTC ctxt request',
'tapa.ctc.ctxtcfm' : 'CTC Ctxt cfm',
'tapa.cmk.cfgcfm' : 'CMK configuration Confirm',
'tapa.cmk.cfgreq' : 'CMK Configuration Request',
'tapa.cmk.countcreq' : 'CMK Count-C request',
'tapa.cmk.countccfm' : 'CMK Count-C confirm',
'tapa.rlu.datind' : 'RLU Data Indication',
'tapa.cmk.mesind' : 'CMK Measurement Indication',
'tapa.cmk.mesreq' : 'CMK Measurement Request',
'tapa.rru.bndreq' : 'RRU Bind Request',
'tapa.rru.bndcfm' : 'RRU Bind Confirm',
'tapa.rru.unbindreq' : 'RRU UnBind Request',
'tapa.rru.datreq' : 'RRU Data Request',
'tapa.rru.datind' : 'RRU Data Indication',
'tapa.rru.datrsp' : 'RRU Data Response',
'tapa.rru.datcfm' : 'RRU Data Confirm',
'tapa.rru.mbmsdatreq' : 'RRU MBMS Data request',
'tapa.rru.mcchtxreq' : 'RRU MCCH Tx request',
'tapa.rru.encdatreq' : 'RRU Encode Data Request',
'tapa.rru.encdatcfm' : 'RRU Encode Data Confirm',
'tapa.rru.decdatreq' : 'RRU Decode Data Request',
'tapa.rru.decdatcfm' : 'RRU Decode Data Confirm',
'tapa.rru.errind' : 'RRU Error Indication',
'tapa.rru.uecfgreq' : 'RRU UE Configuration Request',
'tapa.rru.uecfgcfm' : 'RRU UE Configuration Confirm',
'tapa.rru.bmccfgreq' : 'RRU BMC Configuration Request',
'tapa.rru.bmccfgcfm' : 'RRU BMC Configuration Confirm',
'tapa.rru.measreq' : 'RRU Measurement Request',
'tapa.rru.measrprtind' : 'RRU measurement Report Indication',
'tapa.rru.statsureq' : 'RRU Status Request',
'tapa.rru.statuscfm' : 'RRU Status Confirm',
'tapa.rru.statusind' : 'RRU Status Indication',
'tapa.rru.ueidchngreq' : 'RRU UE ID change Request',
'tapa.rru.cncluereq' : 'RRU Cancel UE Request',
'tapa.rru.predefcfgreq' : 'RRU Predefined Configuration Request',
'tapa.rru.sccpchcfgreq' : 'RRU SCCPCH Configuration Request',
'tapa.rru.sccpchcfgcfm' : 'RRU SCCPCH Configuration Confirm',
'tapa.rru.deletetransreq' : 'RRU Delete Transaction Request',
'tapa.rru.countcind' : 'RRU Count-C Indication',
'tapa.rru.countcreq' : 'RRU Count-C Request',
'tapa.rru.relocprepreq' : 'RRU Reloc Prep req',
'tapa.rru.relocprepcfm' : 'RRU Reloc Prep cfm',
'tapa.rru.reloccmtreq' : 'RRU Reloc commit req',
'tapa.rru.reloccmtcfm' : 'RRU Reloc commit cfm',
'tapa.lrr.cfg.gen' : 'General configuration request',
#cm_xta_pycmd_py_001.main_20: Updated for PSF-S1AP 1.1 Release
'tapa.lyt.cfg.gen' : 'General configuration request',
'tapa.lyt.cfgcfm' : 'S1AP configuration confirm',
'tapa.lyt.cntrl.shutdown' : 'shutdown control request for PSF-S1AP',
'tapa.lyt.cntrl.usta' : 'Unsolicited status Control Request for PSF-S1AP',
'tapa.lyt.cntrl.trc' : 'Trace Control Request for PSF-S1AP',
'tapa.lyt.cntrl.debug' : 'Debug Control Request for PSF-S1AP',
'tapa.lyt.cntrlcfm' : 'Control Confirmation for PSF-S1AP',
'tapa.lyt.sta.gen' : 'General Status request for PSF-S1AP',
'tapa.lyt.stacfm' : 'Status Confirmation for PSF-S1AP',
'tapa.lyt.sts.gen' : 'General Statistics Request for PSF-S1AP',
'tapa.lyt.stscfm' : 'Statistics Confirmation for PSF-S1AP',
'tapa.lyt.sts.rset' : 'Resource Set Statistics Request for PSF-S1AP',
'tapa.lyt.staind' : 'Status indication for PSF-S1AP',
'tapa.lyt.trcind' : 'Trace indication for PSF-S1AP',
'tapa.pit.sendtosby' : 'PIT command to Send all updates to StandBy',
'tapa.pit.dropall' : 'PIT command to Drop all updates',
'tapa.pit.update.sby' : 'PIT Inbound Data request',
'tapa.pit.confirm.actv' : 'PIT Inbound Data Confirm',
'tapa.pit.update.actv' : 'PIT Outbound Data Request',
'tapa.pit.confirm.sby' : 'PIT Outbound Data Confirm',
'tapa.pit.sndupdmsg' : 'PIT send update message',
'tapa.pit.exmupdatemsg' : 'PIT Command to Exam Update Message',
'tapa.pit.dropupdmsg' : 'PIT Command to Drop Update Message',
'tapa.pit.confirm.exmupdatemsg' : 'PIT Exam Update Message Confirm',
'tapa.lrr.cfg.lsap' : 'Lower Layer Configuration request',
'tapa.lrr.cfg.usap' : 'Upper Layer Configuration request',
'tapa.lrr.cfg.prot' : 'Protocol Configuration request',
'tapa.lrr.cfg.cfm' : 'Configuration Confirmation',
'tapa.lrr.cntrl.lsap' : 'Lower SAP Config Req',
'tapa.lrr.cntrl.usap' : 'Upper SAP Config Req',
'tapa.lrr.cntrl.lsaptrc' : 'Lower SAP trace',
'tapa.lrr.cntrl.pnodetrc' : 'Peer Node Trace',
'tapa.lrr.cntrl.debug' : 'Debug Control',
'tapa.lrr.cntrl.shutdown' : 'Shutdown',
'tapa.lrr.cntrl.start' : 'Start',
'tapa.lrr.cntrl.usta' : 'Unsolicited Status',
'tapa.lrr.cntrlcfm' : 'Control Confirmation',
'tapa.lrr.sts.gen' : 'General Statistics',
'tapa.lrr.sts.peer' : 'UE Statistics',
'tapa.lrr.sts.cell' : 'CELL Statistics',
'tapa.lrr.stscfm' : 'Statistics Confirm',
'tapa.lrr.sta.sys' : 'Sys Id status',
'tapa.lrr.sta.usap' : 'USAP status',
'tapa.lrr.sta.gen' : 'General status',
'tapa.lrr.sta.lsap' : 'LSAP status',
'tapa.lrr.stacfm' : 'Status confirmation',
'tapa.lrr.staind' : 'Status indication',
'tapa.lrr.trcind' : 'Trace indication',
'tapa.lrr.mibreq' : 'MIB Request',
'tapa.lrr.mibcfm' : 'MIB Confirmation',
'tapa.lrr.audtreq' : 'Audit Request',
'tapa.lrr.audtcfm' : 'Audit Confirmation',
'tapa.re' : 'Validate Regular Expression',
'tapa.sct.termcfm' : 'SCT Termination Confirm',
'tapa.sot.ackind' : 'SOT ACK Indication',
'tapa.sot.relind' : 'SOT Release Indication',
'tapa.sct.endpopencfm' : 'SCT Endpoint Open Confirm',
'CANCEL_R' : 'CANCEL RESPONSE',
'tapa.zot.confirm.actv' : 'ZOT Inbound Data Confirm ',
'UNKNOWN' : 'UNKNOWN',
'tapa.lhi.sts.tsap' : 'TSAP Statistics for TUCL',
'REGISTER' : 'REGISTER',
'tapa.lzo.stacfm' : 'Status Confirmation for PSF-SIP',
'tapa.lso.sts.tsap' : 'Tsap Statistics Request for SIP',
'tapa.sct.bndreq' : 'SCT Bind Request',
'tapa.sct.termind' : 'SCT Termination Indication',
'tapa.sot.cimcfm' : 'SOT CIM Confirm',
'INVITE_R_P' : 'INVITE PROV RESPONSE',
'tapa.lzo.cntrl.trc' : 'Trace Control Request for PSF-SIP',
'tapa.lhi.cntrl.shutdown' : 'Shutdown Control Request for TUCL',
'tapa.sct.datind' : 'SCT Data Indication',
'tapa.sot.any' : 'SOT ANY API',
'tapa.sot.cancelind' : 'SOT Cancel Indication',
'REFER_R' : 'REFER RESPONSE',
'tapa.sht.sync.cntrlreq' : 'Control request to Synchronize',
'tapa.lzo.staind' : 'Status indication for PSF-SIP',
'tapa.lso.sta.endp' : 'Endpoint Status Request for SIP',
'tapa.lso.sta.assoc' : 'Association Status Request for SIP',
'tapa.sot.conreq' : 'SOT Connection Request',
'tapa.sot.modreq' : 'SOT Modify Request',
'tapa.sot.cimind' : 'SOT CIM Indication',
'tapa.lzo.cntrl.shutdown' : 'Shutdown Control Request for PSF-SIP',
'tapa.lso.sta.entity' : 'Entity Status Request for SIP',
'tapa.lsb.sta.assoc' : 'SCTP Association Status Request',
'tapa.lso.sta.tpt' : 'Transport Server Status Request for SIP',
'tapa.sht.dispeer.cntrlreq' : 'Control request to Disable Peer SAP',
'tapa.lsb.cfgcfm' : 'SCTP Config Confirm',
'tapa.lso.sts.ssap' : 'Ssap Statistics Request for SIP',
'INVITE' : 'INVITE',
'MODIFY_R' : 'MODIFY RESPONSE',
'SUBSCRIBE' : 'SUBSCRIBE',
'tapa.invalid' : 'Invalid Command',
'tapa.lsb.cfg.gen' : 'SCTP Gen Config Request',
'tapa.lsb.sta.gen' : 'SCTP Gen Status Request',
'tapa.lzo.sts.gen' : 'General Statistics Request for PSF-SIP',
'tapa.lsb.cntrl.sctsap' : 'SCTP SSAP Control Request',
'tapa.lso.cfg.tpt' : 'Transport Server Configuration Request for SIP',
'tapa.sct.bndcfm' : 'SCT Bind Confirm',
'tapa.sot.relrsp' : 'SOT Release Response',
'COMET' : 'COMET',
'tapa.lsb.sts.sctsap' : 'SCTP SSAP Statistics Request',
'tapa.sot.auditreq' : 'SOT Audit Request',
'ACK' : 'ACK',
'SUBSCRIBE_R_P' : 'SUBSCRIBE PROV RESPONSE',
'SIPANY' : 'SIPANY',
'tapa.lsb.stscfm' : 'SCTP Statistics Confirm',
'tapa.lso.cfg.entity' : 'Entity Configuration Request for SIP',
'tapa.lsb.cntrl.tsap' : 'SCTP TSAP Control Request',
'tapa.lhi.cfg.gen' : 'General Configuration Request for TUCL',
'tapa.sot.anyrsp' : 'SOT ANY RSP',
'BYE_R' : 'BYE RESPONSE',
'tapa.lsb.cntrlcfm' : 'SCTP Control Confirm',
'tapa.sot.concfm' : 'SOT Connection Confirm',
'tapa.sot.modcfm' : 'SOT Modify Confirm',
'tapa.lso.cntrl.shutdown' : 'ShutDown Control Request for SIP',
'tapa.sct.stareq' : 'SCT Sctp Status Request',
'SIPANYREQ' : 'SIPANYREQ',
'tapa.lhi.cntrl.trc' : 'Trace Control Request for TUCL',
'tapa.lso.cntrl.gssap' : 'Group Ssap Control Request for SIP',
'tapa.sot.camreq' : 'SOT CAM Request',
'tapa.lzo.sts.rset' : 'Resource Set Statistics Request for PSF-SIP',
'tapa.lso.cfg.addendp' : 'Add Endpoint Configuration Request for SIP',
'tapa.sot.cimrsp' : 'SOT CIM Response',
'tapa.lso.cntrl.tpt' : 'TPT Control Request for SIP',
'tapa.sct.assocreq' : 'SCT Association Request',
'tapa.sct.setprireq' : 'SCT Set Primary Address Request',
'tapa.sot.conind' : 'SOT Connection Indication',
'tapa.sot.modind' : 'SOT Modify Indication',
'tapa.lsb.cntrl.shutdown' : 'SCTP Shutdown Control Request',
'tapa.sot.bndreq' : 'SOT Bind Request',
'tapa.sot.transreq' : 'SOT Trans Request',
'tapa.lso.sta.sys' : 'System Status Request for SIP',
'tapa.sot.auditcfm' : 'SOT Audit Confirm',
'tapa.sct.hbeatreq' : 'SCT Heart Beat Request',
'tapa.hit.discreq' : 'HIT Disconnect Request',
'tapa.sht.cntrlcfm' : 'Control confirm from layer to System Agent',
'tapa.lsb.cfg.tsap' : 'SCTP TSAP Config Request',
'tapa.lso.cntrl.usta' : 'Unsloicited Status Control Request for SIP',
'tapa.lzo.cntrlcfm' : 'Control Confirmation for PSF-SIP',
'tapa.lso.cntrl.cache' : 'Cache Control Request for SIP',
'tapa.cst.decompressreq' : 'CST De-Compress Request',
'SUBSCRIBE_R' : 'SUBSCRIBE RESPONSE',
'UNKNOWN_R' : 'UNKNOWN RESPONSE',
'tapa.lhi.cntrl.tsap' : 'TSAP Control Request for TUCL',
'MESSAGE_R' : 'MESSAGE RESPONSE',
'tapa.hit.servopenreq' : 'HIT Server Open Request',
'tapa.sct.stacfm' : 'SCT Sctp Status Confirm',
'tapa.hit.udatreq' : 'HIT Udp Data Request',
'tapa.sot.camcfm' : 'SOT CAM Confirm',
'tapa.lzo.sta.gen' : 'General Status request for PSF-SIP',
'tapa.lso.sta.cache' : 'Cache Status Request for SIP',
'SIPERROR' : 'SIPERROR',
'tapa.sct.assoccfm' : 'SCT Association Confirm',
'tapa.sct.setpricfm' : 'SCT Set Primary Address Confirm',
'tapa.lzo.cfgcfm' : 'Configuration Confirmation for PSF-SIP',
'tapa.sot.bndcfm' : 'SOT Bind Confirm',
'tapa.sot.transcfm' : 'SOT Trans Confirm',
'tapa.sct.staind' : 'SCT Sctp Status Indication',
'UPDATE' : 'UPDATE',
'tapa.lzo.cfg.gen' : 'General Configuration Request for PSF-SIP',
'tapa.lzo.stscfm' : 'Statistics Confirmation for PSF-SIP',
'tapa.sct.hbeatcfm' : 'SCT Heart Beat Confirm',
'tapa.hit.disccfm' : 'HIT Disconnection Confirm',
'tapa.sot.camind' : 'SOT CAM Indication',
'tapa.lhi.trcind' : 'Trace Indication for TUCL',
'tapa.sot.conrsp' : 'SOT Connection Response',
'tapa.sot.modrsp' : 'SOT Modify Response',
'tapa.zot.dropall' : 'ZOT command to Drop all updates ',
'tapa.sct.assocind' : 'SCT Association Indication',
'tapa.sct.assocind1' : 'SCT Association Indication',
'tapa.sht.abort.cntrlreq' : 'Control request to Abort',
'tapa.cst.decompresscfm' : 'CST De-Compress Confirm',
'tapa.sot.transind' : 'SOT Trans Indication',
'tapa.lhi.stacfm' : 'Status Confirmation for TUCL',
'tapa.hit.discind' : 'HIT Disconnection Indication',
'tapa.lso.sts.gen' : 'General Statistics Request for SIP',
'tapa.sot.proxyreq' : 'SOT Proxy Request',
'INFO' : 'INFO',
'CIMINFO' : 'CIMINFO',
'MESSAGE' : 'MESSAGE',
'tapa.lhi.staind' : 'Unsolicited Indication for TUCL ',
'tapa.lso.trcind' : 'Trace Indication for SIP',
'tapa.lso.acntreq' : 'Accounting Request for SIP',
'tapa.hit.ubndreq' : 'HIT Unbind Request',
'tapa.sot.cnstreq' : 'SOT Connection Status Request',
'tapa.sot.ubndreq' : 'SOT Unbind Request',
'tapa.hit.udatind' : 'HIT Udp Data Indication',
'BYE' : 'BYE',
'tapa.lsb.sta.tsap' : 'SCTP TSAP Status Request',
'tapa.hit.conreq' : 'HIT Connection Request',
'tapa.lhi.cntrlcfm' : 'Control Confirmation for TUCL',
'tapa.lso.stacfm' : 'Status Confirmation for SIP',
'tapa.sot.errind' : 'SOT Error Indication',
'tapa.sdk.errind' : 'SDK Error Indication',
'INVITE_R' : 'INVITE RESPONSE',
'tapa.lsb.cntrl.debug' : 'SCTP Debug Control Request',
'tapa.lso.cntrlcfm' : 'Control Confirmation for SIP',
'SIPANYRSP' : 'SIPANYRSP',
'tapa.lzo.shcntrlcfm' : 'Control confirm from layer to System Agent',
'tapa.hit.datreq' : 'HIT Tcp Data Request',
'tapa.sot.camrsp' : 'SOT CAM Response',
'SIPREL_R' : 'SIPREL_R',
'tapa.lsb.cntrl.trc' : 'SCTP Trace Control Request',
'tapa.lsb.sta.sctsap' : 'SCTP SSAP Status Request',
'COMET_R' : 'COMET RESPONSE',
'PRACK' : 'PRACK',
'tapa.sct.assocrsp' : 'SCT Association Response',
'tapa.lhi.cfg.tsap' : 'Tsap Configuration Request for TUCL',
'tapa.lso.staind' : 'Status Indication for SIP',
'tapa.hit.flcind' : 'HIT Flow Control Indication',
'tapa.sot.transrsp' : 'SOT Trans Response',
'tapa.lso.cfg.tsap' : 'TSAP Configuration Request for SIP',
'tapa.lso.cntrl.debug' : 'Debug Control Request for SIP',
'tapa.sot.proxycfm' : 'SOT Proxy Confirm',
'SIPREL' : 'SIPREL',
'tapa.lso.acntcfm' : 'Accounting Confirmation for SIP',
'NOTIFY_R' : 'NOTIFY RESPONSE',
'OPTIONS_R' : 'OPTIONS RESPONSE',
'MODIFY_R_P' : 'MODIFY PROV RESPONSE',
'tapa.lsb.cfg.sctsap' : 'SCTP SSAP Config Request',
'tapa.lso.cntrl.tsap' : 'Tsap Control Request for SIP',
'tapa.hit.concfm' : 'HIT Connection Confirm',
'tapa.sot.proxyind' : 'SOT Proxy Indication',
'tapa.sht.standby.cntrlreq' : 'Control request to Go Standby',
'tapa.zot.sendtosby' : 'ZOT command to Send all updates to StandBy ',
'tapa.zot.update.sby' : 'ZOT Inbound Data request ',
'tapa.lso.cntrl.acnt' : 'Account Control Request for SIP',
'tapa.lso.sts.entity' : 'Entity Statistics Request for SIP',
'tapa.lso.acntind' : 'Accounting Indication for SIP',
'tapa.sot.cnstind' : 'SOT Connection Status Indication',
'OPTIONS' : 'OPTIONS',
'tapa.lso.cfg.ssap' : 'SSAP Configuration Request for SIP',
'tapa.lsb.sta.sys' : 'SCTP System Status Request',
'tapa.lso.cntrl.trc' : 'Trace Control Request for SIP',
'tapa.lso.sta.gen' : 'General Status Request for SIP',
'tapa.hit.conind' : 'HIT Connection Indication',
'UPDATE_R' : 'UPDATE RESPONSE',
'tapa.sht.active.cntrlreq' : 'Control request to Go Active ',
'tapa.sht.shutdown.cntrlreq' : 'Control request to ShutDown',
#cm_xta_pycmd_py_001.main_20: Updated for PSF-S1AP 1.1 Release
'tapa.sht.usta.cntrlreq' : 'Control request to enable USTA',
'tapa.shtlsz.cntrlreq':'SHT Control request',
'tapa.lsb.sts.tsap' : 'SCTP TSAP Statistics Request',
'tapa.sct.endpclosereq' : 'SCT Endpoint Close Request',
'tapa.hit.bndreq' : 'HIT Bind Request',
'tapa.lhi.sta.sys' : 'System Status Request for TUCL',
'tapa.hit.datind' : 'HIT Tcp Data Indication',
'tapa.lso.cfg.gen' : 'General Configuration Request for SIP',
'tapa.lso.cntrl.ssap' : 'Ssap Control Request for SIP',
'tapa.lsb.trcind' : 'SCTP Trace Indication',
'INFO_R' : 'INFO RESPONSE',
'CIMINFO_R' : 'CIMINFO RESPONSE',
'tapa.lsb.stacfm' : 'SCTP Status Confirm',
'tapa.cst.compressreq' : 'CST Compress Request',
'CANCEL' : 'CANCEL',
'tapa.lhi.cfgcfm' : 'Configuration Confirmation for TUCL',
'tapa.sot.ackreq' : 'SOT ACK Request',
'tapa.sot.relreq' : 'SOT Release Request',
'tapa.sot.proxyrsp' : 'SOT Proxy Response',
'tapa.lzo.cntrl.usta' : 'Unsolicited status Control Request for PSF-SIP',
'tapa.lzo.cntrl.debug' : 'Debug Control Request for PSF-SIP',
'tapa.lsb.sta.dta' : 'SCTP Destination transport address Status Request',
'tapa.lhi.stscfm' : 'Statistics Confirmation for TUCL',
'tapa.lhi.sta.tsap' : 'TSAP Status Request for TUCL',
'tapa.lsb.cntrl.usta' : 'SCTP UnSolicited Control Request',
'tapa.lso.cfg.endp' : 'Endpoint Configuration Request for SIP',
'tapa.sct.endpclosecfm' : 'SCT Endpoint Close Confirm',
'tapa.hit.bndcfm' : 'HIT Bind Confirm',
'tapa.lsb.staind' : 'SCTP Status Indication',
'tapa.lso.sta.tsap' : 'Tsap Status Request for SIP',
'tapa.sot.refreshind' : 'SOT Refresh Indication',
'MODIFY' : 'MODIFY',
'tapa.lhi.cntrl.debug' : 'Debug Control Request for TUCL',
'tapa.lso.cntrl.entity' : 'Entity Control Request for SIP',
'tapa.sct.termreq' : 'SCT Termination Request',
'tapa.hit.conrsp' : 'HIT Connection Response',
'tapa.sot.anyreq' : 'SOT ANY REQ',
'tapa.sct.endpopenreq' : 'SCT Endpoint Open Request',
'tapa.sct.datreq' : 'SCT Sctp Data Request',
'SIPAUDIT' : 'SIPAUDIT',
'tapa.lso.cntrl.endp' : 'Endpoint Control Request for SIP',
'tapa.lso.cntrl.assoc' : 'Association Control Request for SIP',
'tapa.sot.cancelreq' : 'SOT Cancel Request',
'REGISTER_R' : 'REGISTER RESPONSE',
'tapa.lso.cfg.addtpt' : 'Add Transport Server Configuration Request for SIP',
'tapa.lhi.cfg.ctxt' : 'Context Configuration Request for TUCL',
'tapa.lso.cfgcfm' : 'Configuration Confirmation for SIP',
'tapa.sct.flcind' : 'SCT Sctp Flowcontrol Indication',
'REFER' : 'REFER',
'tapa.lsb.sts.gen' : 'SCTP Gen Statistics Request',
'tapa.sot.cimreq' : 'SOT CIM Request',
'NOTIFY' : 'NOTIFY',
'SIPREFRESH' : 'SIPREFRESH',
'tapa.zot.confirm.sby' : 'ZOT Outbound Data Confirm ',
'tapa.lso.stscfm' : 'Statistics Confirmation for SIP',
'tapa.cst.compresscfm' : 'CST Compress Confirm',
'tapa.cst.open' : 'CST Compress Open', #cm_xta_pycmd_py_001.main_
'tapa.cst.close' : 'CST Compress Close',
'PRACK_R' : 'PRACK RESPONSE',
'tapa.zot.update.actv' : 'ZOT Outbound Data Request ',
'tapa.lhi.sts.gen' : 'General Statistics for TUCL',
'tapa.lso.sta.ssap' : 'Ssap Status Request for SIP',
'tapa.sot.relcfm' : 'SOT Release Confirm',
'tapa.lhi.cntrl.usta' : 'Unsolicited Status Control Request for TUCL',
'tapa.sht.warmstart.cntrlreq' : 'Control request to Perform Warm Start',
'tapa.lzo.trcind' : 'Trace indication for PSF-SIP',
'PUBLISH' : 'PUBLISH',
'PUBLISH_R' : 'PUBLISH RESPONSE',
'tapa.aqu.bndreq' : 'AQU Bind Request',
'tapa.aqu.bndcfm' : 'AQU Bind Confirm',
'tapa.aqu.ubndreq' : 'AQU Unbind Request',
'tapa.aqu.dmmsgreq' : 'AQU Diameter Message Request',
'tapa.aqu.dmmsgind' : 'AQU Diameter Message Indication',
'tapa.aqu.dmmsgrsp' : 'AQU Diameter Message Response',
'tapa.aqu.dmmsgcfm' : 'AQU Diameter Message Confirm',
'tapa.aqu.errind' : 'AQU Error Indication',
'tapa.aqu.rmvtxreq' : 'AQU Remove Transaction Request',
'tapa.aqu.rmvtxcfm' : 'AQU Remove Transaction Confirm',
'tapa.aqu.peerdownind' : 'AQU Peer Down Indication',
'tapa.aqu.peerupind' : 'AQU Peer Up Indication',
'tapa.aqu.flcind' : 'AQU Flow Control Indication',
'tapa.hit.bndreq' : 'HIT Bind Request',
'tapa.hit.ubndreq': 'HIT Unbind Request',
'tapa.hit.servopenreq' : 'HIT Server Open Request',
'tapa.hit.conreq' : 'HIT Connection Request',
'tapa.hit.conrsp' : 'HIT Connection Response',
'tapa.hit.datreq' : 'HIT Tcp Data Request',
'tapa.hit.udatreq' : 'HIT Udp Data Request',
'tapa.hit.discreq' : 'HIT Disconnect Request',
'tapa.hit.conind' : 'HIT Connection Indication',
'tapa.hit.concfm' : 'HIT Connection Confirm',
'tapa.hit.bndcfm' : 'HIT Bind Confirm',
'tapa.hit.datind' : 'HIT Tcp Data Indication',
'tapa.hit.udatind' : 'HIT Udp Data Indication',
'tapa.hit.discind' : 'HIT Disconnection Indication',
'tapa.hit.disccfm' : 'HIT Disconnection Confirm',
'tapa.hit.flcind' : 'HIT Flow Control Indication',
'tapa.sct.bndreq' : 'SCT Bind Request',
'tapa.sct.endpopenreq' : 'SCT Endpoint Open Request',
'tapa.sct.endpclosereq' : 'SCT Endpoint Close Request',
'tapa.sct.assocreq' : 'SCT Association Request',
'tapa.sct.assocrsp' : 'SCT Association Response',
'tapa.sct.termreq' : 'SCT Termination Request',
'tapa.sct.setprireq': 'SCT Set Primary Address Request',
'tapa.sct.hbeatreq' : 'SCT Heart Beat Request',
'tapa.sct.datreq' : 'SCT Sctp Data Request',
'tapa.sct.stareq' : 'SCT Sctp Status Request',
'tapa.sct.bndcfm' : 'SCT Bind Confirm',
'tapa.sct.endpopencfm' : 'SCT Endpoint Open Confirm',
'tapa.sct.endpclosecfm' : 'SCT Endpoint Close Confirm',
'tapa.sct.assocind' : 'SCT Association Indication',
'tapa.sct.assoccfm' : 'SCT Association Confirm',
'tapa.sct.termind' : 'SCT Termination Indication',
'tapa.sct.termcfm' : 'SCT Termination Confirm',
'tapa.sct.setpricfm':'SCT Set Primary Address Confirm',
'tapa.sct.hbeatcfm' : 'SCT Heart Beat Confirm',
'tapa.sct.datind' : 'SCT Data Indication',
'tapa.sct.stacfm' : 'SCT Sctp Status Confirm',
'tapa.sct.staind' : 'SCT Sctp Status Indication',
'tapa.sct.flcind' : 'SCT Flow Control Indication',
'tapa.lhi.sts.tsap' : 'TSAP Statistics for TUCL',
'tapa.lhi.cntrl.shutdown' : 'Shutdown Control Request for TUCL',
'tapa.lhi.cfg.gen' : 'General Configuration Request for TUCL',
'tapa.lhi.cntrl.trc' : 'Trace Control Request for TUCL',
'tapa.lhi.cntrl.tsap' : 'TSAP Control Request for TUCL',
'tapa.lhi.trcind' : 'Trace Indication for TUCL',
'tapa.lhi.stacfm' : 'Status Confirmation for TUCL',
'tapa.lhi.staind' : 'Unsolicited Indication for TUCL ',
'tapa.lhi.cntrlcfm' : 'Control Confirmation for TUCL',
'tapa.lhi.cfg.tsap' : 'Tsap Configuration Request for TUCL',
'tapa.lhi.sta.sys' : 'System Status Request for TUCL',
'tapa.lhi.cfgcfm' : 'Configuration Confirmation for TUCL',
'tapa.lhi.stscfm' : 'Statistics Confirmation for TUCL',
'tapa.lhi.sta.tsap' : 'TSAP Status Request for TUCL',
'tapa.lhi.cntrl.debug' : 'Debug Control Request for TUCL',
'tapa.lhi.cfg.ctxt' : 'Context Configuration Request for TUCL',
'tapa.lhi.cntrl.usta' : 'Unsolicited Status Control Request for TUCL',
'tapa.lhi.sts.gen' : 'General Statistics for TUCL',
'tapa.lsb.sta.assoc' : 'SCTP Association Status Request',
'tapa.lsb.cfgcfm' : 'SCTP Config Confirm',
'tapa.lsb.cfg.gen' : 'SCTP Gen Config Request',
'tapa.lsb.sta.gen' : 'SCTP Gen Status Request',
'tapa.lsb.cntrl.sctsap' : 'SCTP SSAP Control Request',
'tapa.lsb.sts.sctsap' : 'SCTP SSAP Statistics Request',
'tapa.lsb.stscfm' : 'SCTP Statistics Confirm',
'tapa.lsb.cntrl.tsap' : 'SCTP TSAP Control Request',
'tapa.lsb.cntrlcfm' : 'SCTP Control Confirm',
'tapa.lsb.cntrl.shutdown' : 'SCTP Shutdown Control Request',
'tapa.lsb.cfg.tsap' : 'SCTP TSAP Config Request',
'tapa.lsb.sta.tsap' : 'SCTP TSAP Status Request',
'tapa.lsb.cntrl.debug' : 'SCTP Debug Control Request',
'tapa.lsb.cntrl.trc' : 'SCTP Trace Control Request',
'tapa.lsb.sta.sctsap' : 'SCTP SSAP Status Request',
'tapa.lsb.cfg.sctsap' : 'SCTP SSAP Config Request',
'tapa.lsb.sta.sys' : 'SCTP System Status Request',
'tapa.lsb.sts.tsap' : 'SCTP TSAP Statistics Request',
'tapa.lsb.trcind' : 'SCTP Trace Indication',
'tapa.lsb.stacfm' : 'SCTP Status Confirm',
'tapa.lsb.sta.dta' : 'SCTP Destination transport address Status Request',
'tapa.lsb.cntrl.usta' : 'SCTP UnSolicited Control Request',
'tapa.lsb.staind' : 'SCTP Status Indication',
'tapa.lsb.sts.gen' : 'SCTP Gen Statistics Request',
'tapa.laq.cfg.gen' : 'General Configuration Request for Diameter',
'tapa.laq.cfg.lsap' : 'Lower SAP Configuration Request for Diameter',
'tapa.laq.cfg.usap' : 'Upper SAP Configuration Request for Diameter',
'tapa.laq.cfg.prot' : 'Protocol Configuration Request for Diameter',
'tapa.laq.cfg.peer' : 'Peer Configuration Request for Diameter',
'tapa.laq.cfg.realm' : 'Realm Configuration Request for Diameter',
'tapa.laq.cfg.dm': 'Application Specific DM Configuration Request for Diameter',
'tapa.laq.cfg.avp': 'Application Specific AVP Configuration Request for Diameter',
'tapa.laq.cfgcfm': 'Configuration Confirmation for Diameter',
'tapa.laq.cntrl.usta': 'Unsolicited Status Control Request for Diameter',
'tapa.laq.cntrl.trc' : 'Trace Control Request for Diameter',
'tapa.laq.cntrl.debug': 'Debug Control Request for Diameter',
'tapa.laq.cntrl.lsap': 'Tsap Control Request for Diameter',
'tapa.laq.cntrl.usap': 'Usap Control Request for Diameter',
'tapa.laq.cntrl.peer': 'Peer Control Request for Diameter',
'tapa.laq.cntrl.realm' : 'Realm Control Request for Diameter',
'tapa.laq.cntrl.shutdown': 'ShutDown Control Request for Diameter',
'tapa.laq.cntrl.start' : 'Start Control Request for Diameter',
'tapa.laq.cntrlcfm':'Control Confirmation for Diameter',
'tapa.laq.sts.gen': 'General Statistics Request for Diameter',
'tapa.laq.sts.peer': 'Peer Statistics Request for Diameter',
'tapa.laq.sts.lsap': 'LSap Statistics Request for Diameter',
'tapa.laq.sts.usap': 'USap Statistics Request for Diameter',
'tapa.laq.stscfm' : 'Statistics Confirmation for Diameter',
'tapa.laq.sta.sys' : 'System ID Status Request for Diameter',
'tapa.laq.sta.usap' : 'Upper Sap Status Request for Diameter',
'tapa.laq.sta.gen':'General Status Request for Diameter',
'tapa.laq.sta.lsap': 'Lower Sap Status Request for Diameter',
'tapa.laq.sta.peer' : 'Peer Status Request for Diameter',
'tapa.laq.stacfm' : 'Status Confirmation for Diameter',
'tapa.laq.staind': 'Status Indication for Diameter',
'tapa.laq.trcind': 'Trace Indication for Diameter',
'tapa.laq.probe.peer' :'Peer Probe Request for Diameter',
'tapa.laq.probe.realm': 'Realm Probe Request for Diameter',
'tapa.laq.probe.avp' : 'AVP Probe Request for Diameter',
'tapa.laq.probe.dm': 'DM Probe Request for Diameter',
'tapa.laq.probecfm': 'Probe Confirm for Diameter',
'tapa.ltc.cfg.gen' : 'General Configuration Request for pdcp',
'tapa.ltc.cfg.ctcsap' :'CTC SAP Configuration Request for pdcp',
'tapa.ltc.cfgcfm.ctcsap' :'CTC SAP Configuration Confirm for pdcp',
'tapa.ltc.cfg.tcusap' :'TCU SAP Configuration Request for pdcp',
'tapa.ltc.cfgcfm.tcusap' :'TCU SAP Configuration Confirm for pdcp',
'tapa.ltc.cfg.rlusap' :'RLU SAP Configuration Request for pdcp',
'tapa.ltc.cfgcfm.rlusap' :'RLU SAP Configuration Confirm for pdcp',
'tapa.ltc.cfgcfm' :'Configuration Confirmation for pdcp',
'tapa.ltc.cntrl.usta' :'Unsloicited Status Control Request for PDCP',
'tapa.ltc.cntrl.trc' :'Trace Control Request for PDCP',
'tapa.ltc.cntrl.rlusap' :'Control request for pdcp Lower Sap',
'tapa.ltc.cntrl.shutdown':'ShutDown Control Request for pdcp',
'tapa.ltc.cntrlcfm' : 'Control Confirmation for pdcp',
'tapa.ltc.sta.gen' : 'General Status Request for PDCP',
'tapa.ltc.sta.ctcsap' : 'Ctc Sap Status Request for PDCP',
'tapa.ltc.sta.tcusap' : 'Tcu Sap Status Request for PDCP',
'tapa.ltc.sta.rlusap' : 'Rlu Sap Status Request for PDCP',
'tapa.ltc.sta.sys' : 'System Status Request for pdcp',
'tapa.ltc.stacfm' : 'pdcp Status Confirm',
'tapa.ltc.staind' : 'Status Indication for PDCP' ,
'tapa.ltc.trcind' : 'Trace Indication for PDCP' ,
'tapa.ltc.sts.gen' : 'General Statistics Request for PDCP',
'tapa.ltc.sts.ctcsap' : 'Ctc Sap Statistics Request for PDCP',
'tapa.ltc.sts.tcusap' : 'Tcu Sap Statistics Request for PDCP',
'tapa.ltc.sts.rlusap' : 'Rlu Sap Statistics Request for PDCP',
'tapa.ltc.stscfm' : 'Statistics Confirmation for PDCP',
'tapa.ctc.snreq' : 'CTC sequence number request',
'tapa.ctc.ubndreq' : 'CTC UnBind request',
'tapa.ctc.bndcfm' : 'CTC Bind confirm',
'tapa.ctc.cfgreq' : 'CTC Config request',
'tapa.ctc.cfgcfm' : 'CTC Config confirm',
'tapa.ctc.bndreq' : 'CTC Bind request',
'tapa.ctc.ctxtreq' : 'CTC context request',
'tapa.ctc.ctxtcfm' : 'CTC context confirm',
'tapa.ctc.relreq' : 'CTC release request',
'tapa.ctc.relocreq' : 'CTC relocation request',
'tapa.ctc.reloccfm' : 'CTC relocation confirm',
'tapa.tcu.bndreq' : 'TCU Bind Request',
'tapa.tcu.ubndreq' : 'TCU UnBind request',
'tapa.tcu.bndcfm' : 'TCU Bind confirm',
'tapa.tcu.datreq' : 'TCU Data request',
'tapa.tcu.datind' : 'TCU Data indication',
'tapa.rlu.datcfm' : 'RLU Data Confirm',
'tapa.rlu.bndreq' : 'RLU Bind request',
'tapa.rlu.ubndreq' : 'RLU UnBind request',
'tapa.rlu.bndcfm' : 'RLU Bind confirm',
'tapa.rlu.datreq' : 'RLU Data request',
'tapa.rlu.datind' : 'RLU Data indication',
'tapa.rlu.mbmsdatreq' : 'RLU MBMS Data request',
'tapa.laq.probecfm': 'Probe Confirm for Diameter',
# Support for the FTHA Integration
'tapa.lsg.cfg.gen': 'General Configuration Request for System Manager',
'tapa.lsg.cfgcfm': 'Configuration Confirmation for System Manager',
'tapa.lsg.cntrl.enablenode' : 'Enable Node Control Request for System Manager',
'tapa.lsg.cfg.entity' : 'Entity Configuration Request for System Manager',
'tapa.lsg.cntrl.disablenode' : 'Disable Node Control Request for System Manager',
'tapa.lsg.cntrl.swapnode' : 'Swap Node Control Request for System Manager',
'tapa.lsg.cntrlcfm' : 'Control Confirmation for System Manager',
'tapa.lsg.cntrl.abort' : 'Abort Control Request for System Manager',
#cm_xta_pycmd_py_001.main_20: Updated for PSF-S1AP 1.1 Release
'tapa.lsg.cntrl.debug' : 'Debug Control Request for System Manager',
'tapa.lmr.cntrlcfm' : 'Control Confirmation for PSF-SIP',
'tapa.lmr.cfg.gen' : 'General Configuration Request for PSF-SIP',
'tapa.lmr.cfgcfm' : 'Configuration Confirmation for PSF-SIP',
'tapa.lmr.cntrl.shutdown' : 'Shutdown Control Request for PSF-SIP',
'tapa.lmr.cntrlcfm' : 'Control Confirmation for PSF-SIP',
'tapa.lsh.cfg.gen' : 'SCTP Gen Config Request',
'tapa.lsh.cfgcfm' : 'SCTP Config Confirm',
'tapa.lsh.stscfm' : 'SCTP Statistics Confirm',
'tapa.lsh.cntrlcfm' : 'SCTP Control Confirm',
'tapa.lsh.cntrl.trc' : 'SCTP Trace Control Request',
'tapa.lsh.sts.gen' : 'SCTP Gen Statistics Request',
# Adding simple commands for RPT
'tapa.lhr.cfg.gen' : 'General Configuration Request for RTP',
'tapa.lhr.cfg.ssap' : 'Ssap Configuration Request for RTP',
'tapa.lhr.cfg.tsap' : 'Tsap Configuration Request for RTP',
'tapa.lhr.cfgcfm' : 'Configuration Confirmation for RTP',
'tapa.lhr.cntrl.usta' : 'Unsolicited Status Control Request for RTP',
'tapa.lhr.cntrl.trc' : 'Trace Control Request for RTP',
'tapa.lhr.cntrl.tsap' : 'TSAP Control Request for RTP',
'tapa.lhr.cntrl.ssap' : 'SSAP Control Request for RTP',
'tapa.lhr.cntrl.debug' : 'Debug Control Request for RTP',
'tapa.lhr.cntrl.shutdown' : 'Shutdown Control Request for RTP',
'tapa.lhr.cntrlcfm' : 'Control Confirmation for RTP',
'tapa.lhr.sts.sess' : 'Session Statistics for RTP',
'tapa.lhr.sts.tsap' : 'TSAP Statistics for RTP',
'tapa.lhr.sts.ssap' : 'SSAP Statistics for RTP',
'tapa.lhr.stscfm' : 'Statistics Confirmation for RTP',
'tapa.lhr.sta.sys' : 'System Status Request for RTP',
'tapa.lhr.sta.tsap' : 'TSAP Status Request for RTP',
'tapa.lhr.sta.ssap' : 'SSAP Status Request for RTP',
'tapa.lhr.stacfm' : 'Status Confirmation for RTP',
'tapa.lhr.staind' : 'Unsolicited Indication for RTP',
'tapa.lhr.trcind' : 'Trace Indication for RTP',
'tapa.hrt.bnd.req' : 'Bind Request to RTP',
'tapa.hrt.bnd.cfm' : 'Handels bind confirmation from RTP',
'tapa.hrt.ubnd.req' : 'Unbind Request to RTP',
'tapa.hrt.sess.req' : 'Session Creation Request to RTP',
'tapa.hrt.sess.cfm' : 'Handels Session Confirmation from RTP',
'tapa.hrt.rel.req' : 'Release session request to RTP',
'tapa.hrt.rel.cfm' : 'Session Release confirmation form RTP',
#Added for Dummy RTP
'tapa.dhrt.hdlbnd.req' : 'Handel Bind Request to Dummy RTP',
'tapa.daal.bnd.req' : 'Handel Bind Request to Dummy AAL2',
'tapa.aal.dat.req' : 'Handel Data Request to Dummy AAL2',
'tapa.aal.dat.ind' : 'Handel Data Indication to Dummy AAL2',
'tapa.daal.bnd.cfm' : 'Handel Bind Comfirm to Dummy AAL2',
'tapa.daal.ubnd.req' : 'Handel UNBind Request to Dummy AAL2',
'tapa.dhrt.hdlbnd.cfm' : 'Handel bind confirmation for Dummy RTP',
'tapa.dhrt.hdlubnd.req' : 'Handel Unbind Request for Dummy RTP',
'tapa.dhrt.hdlsess.req' : 'Handel Session Creation Request for Dummy RTP',
'tapa.dhrt.hdlsess.ind' : 'Handel Session indication for Dummy RTP',
'tapa.dhrt.hdlsess.rsp' : 'Handels Session Response for Dummy RTP',
'tapa.dhrt.hdlsess.cfm' : 'Handels Session Confirmation for RTP',
'tapa.dhrt.hdlrel.req' : 'Handel session release request for Dummy RTP ',
'tapa.dhrt.hdlrel.ind' : 'Handel Session release indication for Dummy RTP',
'tapa.dhrt.hdlrel.rsp' : 'Handel Session release response for Dummy RTP',
'tapa.dhrt.hdlrel.cfm' : 'Handel Session release confirmation for Dummy RTP',
'tapa.dhrt.hdludat.req' : 'Unit Data Request for Dummy RTP',
'tapa.dhrt.hdludat.ind' : 'Unit Data Indication for Dummy RTP',
#Added Simple commands for GTP
'tapa.lgt.cfg.gen' : 'General Configuration for GTP',
'tapa.lgt.cfg.extgen' : 'Extended General Configuration for GTP',
'tapa.lgt.cfg.ggusap' : 'GGU SAP Configuration for GTP',
'tapa.lgt.cfg.tsap' : 'TSAP Configuration for GTP',
'tapa.lgt.cfg.server' : 'Server Configuration for GTP',
'tapa.lgt.cfgcfm' : 'Configuration Confirmation for GTP',
'tapa.lgt.cntrl.usta' : 'Control Request for GTP',
'tapa.lgt.cntrl.trc' : 'Trace Control Request for GTP',
'tapa.lgt.cntrl.tsap' : 'TSAP Control Request for GTP',
'tapa.lgt.cntrl.ggusap' : 'GGU SAP Control Request for GTP',
'tapa.lgt.cntrl.server' : 'Server Control Request for GTP',
'tapa.lgt.cntrl.debug' : 'Debug Control Request for GTP',
'tapa.lgt.cntrl.shutdown' : 'ShutDown Control Request for GTP',
'tapa.lgt.cntrlcfm' : 'Control Confirmation from GTP',
'tapa.lgt.sts.gen' : 'General Statistics Request for GTP',
'tapa.lgt.sts.ggusap' : 'GGU Statistics Request for GTP',
'tapa.lgt.stscfm' : 'Statistics Confirmation for GTP',
'tapa.lgt.sta.sys' : 'System Status Request for GTP',
'tapa.lgt.sta.tsap' : 'TSAP Status Request for GTP',
'tapa.lgt.sta.ggusap' : 'GGU Status Request for GTP',
'tapa.lgt.sta.gen' : 'General Status Request for GTP',
'tapa.lgt.sta.con' : 'Connection Status Request for GTP',
'tapa.lgt.stacfm' : 'Status Confirmation for GTP',
'tapa.lgt.staind' : 'Status Indication for GTP',
'tapa.lgt.trcind' : 'Trace Indication for GTP',
'tapa.ggu.bnd.req' : 'Bind Request for GTP',
'tapa.ggu.bnd.cfm' : 'Bind Confirmation for GTP',
'tapa.ggu.ubnd.req' : 'Unbind request for GTP',
'tapa.ggu.lcltnlmgmt.req' : 'Local Tunnel Management Request for GTP',
'tapa.ggu.lcltnlmgmt.cfm' : 'Local Tunnel Management Confirmation for GTP',
'tapa.ggu.sta.ind' : 'Status Indication for GTP',
'tapa.ggu.lcldel.req' : 'Local Tunnel deletion request for GTP',
'tapa.ggu.lcldel.cfm' : 'Local Tunnel deletion confirmation for GTP',
#Added for Dummy RTP
'tapa.dggu.hdlbnd.req' : 'Handle Bind Request to dummy GTP',
'tapa.dggu.hdlbnd.cfm' : 'Handle Bind Confirmation from dummy GTP',
'tapa.dggu.hdlubnd.req' : 'Handle Ubind Request from dummy GTP',
'tapa.dggu.hdllcltnlmgmt.req' : 'Handle Local Tunnel Management Request to dummy GTP',
'tapa.dggu.hdllcltnlmgmt.cfm' : 'Handle Local Tunnel Management Confirmation from dummy GTP',
'tapa.dggu.hdllcldel.req' : 'Handle Local Tunnel Management Request to dummy GTP',
'tapa.dggu.hdllcldel.cfm' : 'Handle Local Tunnel Management Cfm from dummy GTP',
'tapa.dggu.hdludat.req' : 'Handle Data Request to dummy GTP',
'tapa.dggu.hdludat.ind' : 'Handle Data Indication from dummy GTP',
'tapa.dggu.hdlflc.ind' : 'Handle Flow Control Indication from dummy GTP',
#Added Simple commands for IUUP
'tapa.llx.cfg.gen' : 'General Configuration Request for LX',
'tapa.llx.cfg.usap' : 'Usap Configuration Request for LX',
'tapa.llx.cfg.tsap' : 'Tsap Configuration Request for LX',
'tapa.llx.cfgcfm' : 'Configuration Confirmation for LX',
'tapa.llx.cntrl.usta' : 'Unsolicited Status Control Request for LX',
'tapa.llx.cntrl.trc' : 'Trace Control Request for LX',
'tapa.llx.cntrl.tsap' : 'TSAP Control Request for LX',
'tapa.llx.cntrl.usap' : 'USAP Control Request for LX',
'tapa.llx.cntrl.debug' : 'Debug Control Request for LX',
'tapa.llx.cntrl.shutdown' : 'Shutdown Control Request for LX',
'tapa.llx.cntrlcfm' : 'Control Confirmation for LX',
'tapa.llx.sts.gen' : 'General Statistics for LX',
'tapa.llx.sts.inst' : 'Inst Statistics for LX',
'tapa.llx.sts.tsap' : 'TSAP Statistics for LX',
'tapa.llx.sts.usap' : 'USAP Statistics for LX',
'tapa.llx.stscfm' : 'Statistics Confirmation for LX',
'tapa.llx.sta.sys' : 'System Status Request for LX',
'tapa.llx.sta.gen' : 'General Status Request for LX',
'tapa.llx.sta.tsap' : 'TSAP Status Request for LX',
'tapa.llx.sta.usap' : 'USAP Status Request for LX',
'tapa.llx.sta.inst' : 'Inst Status Request for LX',
'tapa.llx.stacfm' : 'Status Confirmation for LX',
'tapa.llx.staind' : 'Unsolicited Indication for LX',
'tapa.llx.trcind' : 'Trace Indication for LX',
'tapa.lxt.bndreq' : 'LXT Bind Request',
'tapa.lxt.bndcfm' : 'LXT Bind Confirm',
'tapa.lxt.ubndreq' : 'LXT Unbind Request',
'tapa.lxt.inst.cfg.req' : 'LXT Instance Configuration Request',
'tapa.lxt.inst.cfg.cfm' : 'LXT Instance Configuration confirmation',
'tapa.lxt.udatreq' : 'LXT Unit Data Request',
'tapa.lxt.udatind' : 'LXT Unit Data Indication',
'tapa.lxt.datreq' : 'LXT Data Request',
'tapa.lxt.datind' : 'LXT Data Indication',
'tapa.lxt.stareq' : 'LXT Status Request',
'tapa.lxt.staind' : 'LXT Status Indication',
'tapa.lxt.starsp' : 'LXT Status respons',
'tapa.lxt.stacfm' : 'LXT Status Confirmation',
'tapa.lxt.errind' : 'LXT Error Indication',
'tapa.lsh.sts.gen' : 'SCTP Gen Statistics Request',
'tapa.lsz.cfg.gen' : 'General Configuration Request for S1AP',
'tapa.lsz.test.invalid' : 'To test the invalid cases',
'tapa.lsz.cfg.lsap' : 'Sct Sap Configuration Request for S1AP',
'tapa.lsz.cfg.usap' : 'Szt Sap Configuration Request for S1AP',
'tapa.lsz.cfg.prot' : 'Protocol Configuration Request for S1AP',
'tapa.lsz.cfg.peer' : 'Peer Configuration Request for S1AP',
'tapa.lsz.cfgcfm' : 'Configuration Confirmation for S1AP',
'tapa.lsz.sts.req' : 'Statistics Request for S1AP',
'tapa.lsz.stscfm' : 'Statistics Confirmation for S1AP',
'tapa.lsz.sta.req' : 'Status Request for S1AP',
'tapa.lsz.stacfm' : 'Status Confirmation for S1AP',
'tapa.lsz.staInd' : 'Status Indication for S1AP',
'tapa.lsz.trcInd' : 'Trace Indication for S1AP',
'tapa.lsz.cntrl.uSap' : 'Szt Sap Control Request for S1AP',
'tapa.lsz.cntrl.lSap' : 'Sct Sap Control Request for S1AP',
'tapa.lsz.cntrl.lSapTrc' : 'Sct Sap Trace Control Request for S1AP',
'tapa.lsz.cntrl.pNodeTrc' : 'Peer Node Trace Control Request for S1AP',
'tapa.lsz.cntrl.debug' : 'Debug Control Request for S1AP',
'tapa.lsz.cntrl.peer' : 'Control Request for S1AP',
'tapa.lsz.cntrl.usta' : 'Unsloicited Status Control Request for S1AP',
'tapa.lsz.cntrlcfm' : 'Control Confirm from S1AP',
'tapa.lsz.cntrl.shutdown' :'ShutDown Control Request for S1AP',
'tapa.szt.bndreq' : 'SZT Bind Request',
'tapa.szt.bndcfm' : 'SZT Bind Confirm',
'tapa.szt.ubndreq' : 'SZT Unbind Request',
'tapa.szt.audreq' : 'SZT Audit Request',
'tapa.szt.audcfm' : 'SZT Audit Confirm',
'tapa.szt.flcind' : 'SZT Flow Control Indication',
'tapa.szt.staind' : 'SZT Status Indication',
'tapa.szt.errind' : 'SZT Error Indication',
'tapa.szt.udatreq' : 'Connection-less Data Request',
'tapa.szt.udatind' : 'Connection-less Data Indication',
'tapa.szt.conreq' : 'Associated Logical Connection Creation Request',
'tapa.szt.conind' : 'Associated Logical Connection Creation Indication',
'tapa.szt.conrsp' : 'Associated Logical Connection Creation Response',
'tapa.szt.concfm' : 'Associated Logical Connection Creation Confirmation',
'tapa.szt.relreq' : 'Associated Logical Connection Release Request',
'tapa.szt.relind' : 'Associated Logical Connection Release Indicaiton',
'tapa.szt.BldUnsucOutComeMsg' : 'Associated Logical Connection BldUnsucOutComeMsg',
'tapa.szt.relrsp' : 'Associated Logical Connection Release Reply',
'tapa.szt.relcfm' : 'Associated Logical Connection Release Confirmation',
'tapa.szt.datreq' : 'Associated Signalling Data Request',
'tapa.szt.datind' : 'Associated Signalling Data Indication',
'tapa.szt.encreq' : 'Encode S1AP Message Request',
'tapa.szt.enccfm' : 'Encode S1AP Message Confirmation',
'tapa.szt.decreq' : 'Decode S1AP Message Request',
'tapa.szt.deccfm' : 'Decode S1AP Message Confirmation',
#cm_xta_pycmd_py_001.main_24 Added for S1 Abort request support by UI
'tapa.szt.aborts1req' : 'Abort S1 Setup Request',
'tapa.szt.aborts1cfm' : 'Abort S1 Setup Confirmation',
'tapa.szt.clearMmeCb' : 'Clear MME Cb',
'tapa.szt.assocind' : 'S1AP Peer Assoc Indication',
#cm_xta_pycmd_py_001.main_20: Updated for PSF-S1AP 1.1 Release
'tapa.szt.assocrsp' : 'S1AP Peer Assoc Response',
'tapa.oam.cntrvalreq' : 'OAM L3 Counter Req',
'tapa.oam.cntrvalrsp' : 'OAM L3 Counter Value Response',
'tapa.oam.cntrclrreq' : 'OAM L3 Counter Clear Req',
'tapa.oam.cntrclrrsp' : 'OAM L3 Counter Clear Rsponse',
'tapa.czt.bmpind' : 'X2AP bmp indication',
'tapa.czt.gpreq' : 'X2AP General Procedure Request',
'tapa.czt.gprsp' : 'X2AP General Procedure Response',
'tapa.czt.gpind' : 'X2AP General Procedure indication',
'tapa.czt.bmpreq' : 'X2AP bmp request',
'tapa.czt.bmprsp' : 'X2AP bmp response',
'tapa.czt.bmpcfm' : 'X2AP bmp confirm',
'tapa.czt.relreq' : 'X2AP Local Release Request',
'tapa.czt.staind' : 'X2AP Status Indication',
'tapa.czt.errind' : 'X2AP Error Indication',
'tapa.lyt.shcntrlcfm' : 'S1AP-PSF LM SHT Control CFM',
'tapa.sht.lytcntrl.req' : 'S1AP Sht Control request',
'tapa.sht.lytcntrlcfm' : 'S1AP Sht Control cfm',
'tapa.sht.lszcntrl.req' : 'S1AP SHT Control Request',
'tapa.sht.lszcntrlcfm' : 'S1AP SHT Control Request',
#TotaleNodeB Application Acceptance
#Radisys1_S
'tapa.lwr.cztcfg' : 'Czt ReCfg Request',
#Radisys1_E
'tapa.lwr.cfgreq' : 'TotaleNodeB Cfg Request',
'tapa.lwr.recfgreq' : 'TotaleNodeB ReCfg Request',
'tapa.lwr.dyncfgreq' : 'TotaleNodeB Dyn Cfg Request',
'tapa.lwr.multidyncfgreq' : 'Multiple TotaleNodeB Dyn Cfg Request',
'tapa.lwr.cfgcfm' : 'TotaleNodeB Cfg Confirmation',
'tapa.lwr.dyncfgcfm' : 'TotaleNodeB Dyn Cfg Confirmation',
'tapa.lwr.s1init.cntrl.req' : 'TotaleNodeB S1 Init Cntrl Req',
'tapa.lwr.x2init.cntrl.req' : 'TotaleNodeB x2 Init Cntrl Req',
'tapa.lwr.cellup.cntrl.req' : 'TotaleNodeB Cell Up Cntrl Req',
'tapa.lwr.cntrlcfm' : 'TotaleNodeB cntrl Confirmation',
'tapa.nhu.cellcfgreq' : 'TotaleNodeB Cell Cfg Req to NHU',
'tapa.nhu.cellcfgcfm' : 'TotaleNodeB Cell Cfg Cfm from RRC to App',
'tapa.rgr.cfgreq' : 'TotaleNodeB SCHD Cfg Req to RGR',
'tapa.rgr.cfgreq.cfg.enb' : 'TotaleNodeB SCHD Enodeb Cfg Req to RGR',
'tapa.rgr.cfgreq.cfg.ue' : 'TotaleNodeB SCHD Ue Cfg Req to RGR',
'tapa.rgr.cfgCfm' : 'TotaleNodeB RGR cfg Cfm to App',
'tapa.ctf.cfgreq' : 'TotaleNodeB CTF cfg Req to PHY',
'tapa.ctf.cfgcfm' : 'TotaleNodeB CTF cfg CFM to App',
'tapa.ctf.eNBStopInd' : 'TotaleNodeB CTF eNB Stop Indication to App',
'tapa.rgr.sicfgreq' : 'TotaleNodeB RGR cfg Req to MAC SCHD',
'tapa.rgr.sicfgcfm' : 'TotaleNodeB RGR SI Cfm to App from SCHD',
'tapa.rgr.warningsicfgreq' : 'TotaleNodeB RGR Warning SI cfg Req to MAC SCHD',
'tapa.rgr.warningsicfgcfm' : 'TotaleNodeB RGR Warning SI Cfm to App from SCHD',
'tapa.rgr.warningsistopreq' : 'TotaleNodeB RGR Warning SI STOP Req to MAC SCHD',
'tapa.rgr.ttireq' : 'TotaleNodeB RGR TTI IND to app from MAC',
'tapa.rgr.uesta.ind' : 'TotaleNodeB RGR UE STA IND to app from MAC',
'tapa.rgr.sib7.val' : 'TotaleNodeB RGR SIB7 Vaidation from APP to MAC',
'tapa.rgr.sib8.val' : 'TotaleNodeB RGR SIB8 Vaidation from APP to MAC',
'tapa.rgr.sib3.val' : 'TotaleNodeB RGR SIB3 Vaidation from APP to MAC',
'tapa.nhu.datind' : 'TotaleNodeB Nhu dat indication to App from NHU',
'tapa.nhu.datreq' : 'TotaleNodeB Nhu dat request to NHU from App',
'tapa.nhu.datrsp' : 'TotaleNodeB Nhu dat response from App to NHU',
'tapa.nhu.sdustareq' : 'TotaleNodeB Nhu SDU Status Request',
'tapa.nhu.sdustacfm' : 'TotaleNodeB Nhu SDU Status Confirm',
'tapa.nhu.datresumereq' : 'TotaleNodeB Nhu Data Resume Request', #udaka
'tapa.nhu.datresumecfm' : 'TotaleNodeB Nhu Date Resume Confirm',
'tapa.nhu.cfgcfm' : 'TotaleNodeB Nhu Cfg Cfm to App from NHU',
'tapa.nhu.cfgreq' : 'TotaleNodeB Nhu Cfg requst to NHU from App',
'tapa.nhu.encodereq' : 'TotaleNodeB Nhu encode request to NHU from App',
'tapa.nhu.decodereq' : 'TotaleNodeB Nhu decode request to NHU from App',
'tapa.nhu.decodecfm' : 'TotaleNodeB Nhu decode Cfm to App from NHU',
'tapa.nhu.encodecfm' : 'TotaleNodeB Nhu encode Cfm to App from NHU',
'tapa.nhu.pdcpdatafwdreq':'PDCP Data Forward Request',
'tapa.nhu.mcelldatrsp' : 'TotaleNodeB Nhu dat response from App to NHU for Multi cell',
'tapa.nhu.staticreset': 'To reset the static varaible in NHU Dat Request',
'tapa.nhu.datcfm' : 'TotaleNodeB Nhu dat Cfm to App from NHU',
'tapa.nhu.cncluereq' : 'TotaleNodeB Nhu Cancel UE Request to NHU from APP',
'tapa.lwr.shutdownreq' : 'TotaleNodeB Lwr ShutDown Request to APP from LM',
'tapa.infoshow' : 'show memory',
'tapa.lwr.fullreset.req' : 'Full reset',
#EGTP
'tapa.egt.egutnlmgmtreq' : 'TotaleNodeB EGT tunnel Management request from App to EGT',
'tapa.egt.egutnlmgmtcfm' : 'TotaleNodeB EGT tunnel Management Cfm from EGT to App',
'tapa.egt.egudatreq' : 'EGTP-U data request',
'tapa.egt.egudatind' : 'EGT-U data indication',
#PDCP
'tapa.pju.datind' : 'PDCP',
'tapa.pju.datfwdind' : 'PJU Data Forward Indication',
'tapa.pju.datreq' : 'PJU Data Request',
'tapa.pju.datfwdreq' : 'PJU Data Forward Request',
'tapa.lrm.cpuloadind' : 'CPU load Indication',
'tapa.lrm.cellUpInd' : 'LRM Cell Up indication',
'tapa.rmu.ueadmitrsp' : 'UE Admit Response',
'tapa.rmu.fsmstaind' : 'FSM Status Indication',
'tapa.rmu.uerelind' : 'UE Release Indication',
'tapa.lrm.genCfgCfm' : 'Generic Config confirm',
'tapa.invalid' : 'Invalid Command',
'tapa.lrm.cellcfgreq' : 'Cell config Request',
'tapa.lrm.sapCfgReq' : 'SAP Config Request',
'tapa.lrm.lsapCfgReq' : 'Lower SAP Config Request',
'tapa.rmu.initconextsetupreq' : 'UE ICS Request',
'tapa.rmu.uerelreq' : 'UE Release Request',
'tapa.lrm.sapCfgCfm' : 'SAP Config Confirm',
'tapa.rmu.crntirecfgreq' : 'UE Crnti Reconfiguration Request',
'tapa.rmu.uerecfgrsp' : 'UE Reconfiguration Response',
'tapa.rmu.uerecfgreq' : 'UE Reconfiguration Request',
'tapa.re' : 'Validate Regular Expression',
'tapa.rmu.mmeovldstart' : 'MME Overload Start Indication',
'tapa.rmu.mmeovldstop' : 'MME Overload Stop Indication',
'tapa.lrm.enbCfgReq' : 'eNB Config Request',
'tapa.rmu.ueadmitreq' : 'UE Admit Request',
'tapa.rmu.uerelrsp' : 'UE Release Response',
'tapa.rmu.erabrecfgreq' : 'UE ERAB Reconfiguration Request',
'tapa.rmu.uecaprecfgreq' : 'UE Capacity Reconfiguration Request',
'tapa.rmu.scellrecfgreq' : 'UE Scell Addtion Reconfig Request',
'tapa.lrm.enbCfgCfm' : 'eNB Config Confirm',
'tapa.lrm.cellcfgCfm' : 'Cell Config Confirm',
'tapa.lrm.genCfgReq' : 'Generic Config Request',
'tapa.rmu.cellrecfgind' : 'Cell Reconfig Indication',
'tapa.rmu.fsmstatusind' : 'FSM Status Indication',
'tapa.rmu.uehoreq' : 'UE HO Request',
'tapa.rmu.uehorsp' : 'UE HO Response',
'tapa.rgm.prbind' : 'PRB Usage Indication',
'tapa.lrm.enbCtrlReq' : 'eNB Control Request',
'tapa.lrm.enbCtrlCfm' : 'eNB Control Confirm',
'tapa.rgm.prbCfgReq' : 'PRB report Cfg Request',
'tapa.rgm.bndReq' : 'LRM Bind Request',
'tapa.rgm.bndCfm' : 'LRM Bind Confirm',
'tapa.uemeas.eutraA1Evnt' : ' To trigger A1 Evnt ' ,
'tapa.uemeas.eutraA2Evnt' : ' To trigger A2 Evnt ',
'tapa.uemeas.eutraHoA3Evnt' : ' To trigger A3 Evnt for Handover ',
'tapa.uemeas.eutraAnrA3Evnt' : 'To trigger A3 Evnt for ANR ',
'tapa.uemeas.eutraA4Evnt' : ' To trigger A4 Evnt ',
'tapa.uemeas.eutraHoA5Evnt' : ' To trigger A5 Evnt for Handover ',
'tapa.uemeas.eutraAnrA5Evnt' : ' To trigger A5 Evnt for ANR ',
'tapa.uemeas.utraB1Evnt' : ' To trigger B1 Evnt for UTRA ',
'tapa.uemeas.cdmaB1Evnt' : ' To trigger B1 Evnt for CDMA ',
'tapa.uemeas.utraHoB2Evnt' : ' To trigger B2 Evnt for UTRA Handover ',
'tapa.uemeas.utraAnrB2Evnt' : ' To trigger B2 Evnt for UTRA ANR ',
'tapa.uemeas.cdmaHoB2Evnt' : ' To trigger B2 Evnt for CDMA Handover ',
'tapa.uemeas.cdmaAnrB2Evnt' : ' To trigger B2 Evnt for CDMA ANR ',
'tapa.uemeas.eutraRptStrngCells' : ' To trigger Report Strongest Cells for EUTRA ',
'tapa.uemeas.eutraRptCGI' : ' To trigger Report CGI for EUTRA ',
'tapa.uemeas.utraRptStrngCells' : ' To trigger Report Strongest Cells for UTRA ',
'tapa.uemeas.utraRptCGI' : ' To trigger Report CGI for UTRA ',
'tapa.uemeas.cdmaRptStrngCells' : ' To trigger Report Strongest Cells for CDMA ',
'tapa.uemeas.cdmaRptCGI' : ' To trigger Report CGI for CDMA ',
'tapa.uemeas.perf' : ' To perform Measurement ',
#SON
'tapa.nlu.newNghReq' : ' New Neighbor Request to SON ',
'tapa.nlu.pciModIndCfm' : ' PCI Modify Cfm Ind to SON ',
'tapa.uemeas.perf':'Cdma Report CGI',
'tapa.uemeas.chkBlkCLst' : 'Checks the EUTRA Black listed cells',
'tapa.uemeas.chkWhtCLst':'Checks the EUTRA White listed cells',
'tapa.uemeas.chkUtraCLst':'Checks the UTRA Listed cells',
'tapa.uemeas.chkNoIntrRatMeasObj':'Checks that UTRA/CDMA measurement object are not configured',
'tapa.uemeas.rptCfgIntrRat':'Checks Inter RAT Report Configuration parameters',
'tapa.lwr.scell.cntrl.req':'Scell Addition/deletion/Act/Deact commnad',
}
#Code added to parse product specific py files
import glob
pycmdList = glob.glob("*ac_pycmd.py")
for elm in pycmdList :
mo = __import__(elm[:-3])
intCmdDict.update(mo.intCmdDict)
simpleCmdDict.update(mo.simpleCmdDict)
#/********************************************************************30**
#
# End of file: cm_xta_pycmd.py@@/main/26 - Tue Apr 10 12:26:14 2012
#
#*********************************************************************31*/
#
#
#/********************************************************************40**
#
# Notes:
#
#*********************************************************************41*/
#
#/********************************************************************50**
#
#*********************************************************************51*/
#
#
#/********************************************************************60**
#
# Revision history:
#
#*********************************************************************61*/
#
#/********************************************************************90**
#
# ver pat init description
#------------ -------- ---- ----------------------------------------------
# cm_xta_pycmd_py_001.main_ rr
#/main/3 --- sy 1. Update of sid field
#/main/4 cm_xta_pycmd_py_001.main_3 sk 1. Updated simple commands for FTHA
#/main/5 --- rd 1. Correcting the SID line
#/main/6 --- rb 1. Updated for PDCP Software release 1.1
#/main/7 --- mr 1. RRC commands Addition. RRC Rel 2.0
#/main/8 --- aj 1. Added the support for FTHA Integration
#/main/9 --- rb 1. Updated for RRC Release 2.1
#/main/10 --- mn 1. Loop changes
#/main/11 --- rer 1. Added UEidChngCfm
#/main/12 --- rb 1. Added RRC commands.
#/main/13 --- nk 1. Updated for IuUP release
#/main/14 --- sy 1. Updated for SIP sdk release 1.1.
#/main/16 --- pc 1. Updated for IuUP release
#/main/18 --- pc 1. Updated for EGTP release 1.1
#/main/19 --- st 1. Updated for LTE MAC release 1.1
#/main/20 --- gm 1. rr013.202 patch for MAC Count-C Support
#/main/21 cm_xta_pycmd_py_001.main_20 pka 1. Updated for PSF-S1AP 1.1 Release
#/main/25 cm_xta_pycmd_py_001.main_24 va 1. Updated for S1 Abort request support by UI
#*********************************************************************91*/
| [
"sriprasads@gmail.com"
] | sriprasads@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.