content stringlengths 5 1.05M |
|---|
from pessoa import Pessoa
from base_dados import Base
class Adm(Pessoa):
def __init__(self, codigo, nome, idade):
super().__init__(nome, idade)
self.nome = nome
self.idade = idade
self.codigo = codigo
def getDados(self):
self.dados = Base()
self.visu = self.dados.getDados()
print('Administradores')
for k in self.visu['adm'].keys():
print('-------------------------------')
print(f'{k}')
print(f'Nome: {self.dados.dados["adm"][k][1]}')
print(f'Codigo: {self.dados.dados["adm"][k][2]}')
print(f'tempo como ADM: {self.dados.dados["adm"][k][3]}')
print('-------------------------------')
print('Usuário Comum')
for k in self.visu['fisica'].keys():
print('-------------------------------')
print(f'{k}')
print(f'Nome: {self.dados.dados["fisica"][k][1]}')
print(f'CPF: {self.dados.dados["fisica"][k][2]}')
print(f'Idade: {self.dados.dados["fisica"][k][3]}')
print('-------------------------------')
print('Empresas')
for k in self.visu['juridica'].keys():
print('-------------------------------')
print(f'{k}')
print(f'Nome: {self.dados.dados["juridica"][k][1]}')
print(f'CNPJ: {self.dados.dados["juridica"][k][2]}')
print(f'Idade: {self.dados.dados["juridica"][k][3]}')
print('-------------------------------')
def cadastar(self, usuario, lista, base):
base.gravar(usuario, lista)
return 'Cadastro Concluído'
def remover(self, usuario, base):
if usuario in base.dados['adm']:
del base.dados['adm'][usuario]
elif usuario in base.dados['fisica']:
del base.dados['fisica'][usuario]
elif usuario in base.dados['juridica']:
del base.dados['juridica'][usuario]
else:
return 'Usuário não encontrado. Romoção não efetuada'
return 'Remoção efetuada com sucesso'
|
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: alex.yang0326@gmail.com
@file: rel_rnn.py
@time: 2018/4/6 10:53
@desc: learn a relation matching score function v(r, q) that measures the similarity between the question & the relation
"""
import os
import logging
import random
import numpy as np
import tensorflow as tf
class RelRank(object):
def __init__(self, config, sess, word_embeddings):
self.n_words = config.n_words
self.n_relations = config.n_relations
self.embedding_size = config.embedding_size
self.n_layer = config.n_layer
self.hidden_size = config.hidden_size
self.margin = config.margin
self.batch_size = config.batch_size
self.max_sequence_len = config.max_sequence_len
self.n_epoch = config.n_epoch
self.early_stopping_step = config.early_stopping_step
self.embedding_trainable = config.embedding_trainable # whether to fine tune word embeddings
self.model_path = config.model_path
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.model_name = config.model_name
self.sess = sess
self.loss = None
self.pos_similarity = None
self.neg_similarity = None
self.train_step = None
self.stopping_step = 0
self.best_loss = 128000.0
self.best_epoch = 0
# define placeholder for input
self.question_ids = tf.placeholder(tf.int32, [None, self.max_sequence_len], name='question_ids')
self.sequence_len = tf.placeholder(tf.int32, name='sequence_len')
self.pos_rel_ids = tf.placeholder(tf.int32, name='pos_rel_ids') # positive relations
self.neg_rel_ids = tf.placeholder(tf.int32, name='neg_rel_ids') # negative relations
self.gru_keep_prob = tf.placeholder(tf.float32, name='gru_keep_prob')
# embedding layer
self.word_embeddings = tf.Variable(initial_value=word_embeddings, name='word_embeddings', dtype=tf.float32,
trainable=self.embedding_trainable)
self.relation_embeddings = tf.Variable(tf.random_uniform([self.n_relations, self.embedding_size], -0.1, 0.1),
name='relation_embeddings')
self.pos_rel_embed = tf.nn.embedding_lookup(self.relation_embeddings, self.pos_rel_ids)
self.neg_rel_embed = tf.nn.embedding_lookup(self.relation_embeddings, self.neg_rel_ids)
self.question_word_embed = tf.nn.embedding_lookup(self.word_embeddings, self.question_ids)
if not os.path.exists('log'):
os.makedirs('log')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line: %(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %b %d %H:%M:%S', filename='./log/train.log', filemode='a')
logging.info(config)
def single_gru_cell(self):
return tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(self.hidden_size))
def multi_gru_cell(self):
stack_gru = [self.single_gru_cell() for _ in range(self.n_layer)]
return tf.nn.rnn_cell.MultiRNNCell(stack_gru)
def build_model(self):
# bigru layer
cell_fw = self.multi_gru_cell()
cell_bw = self.multi_gru_cell()
_, ((_, fw_state), (_, bw_state)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=self.question_word_embed,
sequence_length=self.sequence_len,
dtype=tf.float32)
question_gru = tf.concat([fw_state, bw_state], axis=-1) # [batch_size, 2*hidden_size]
# linear layer to project final hidden state of BiGRU to the same vector as entity embedding
w_linear = tf.Variable(tf.truncated_normal([2*self.hidden_size, self.embedding_size], stddev=0.01),
name='w_linear')
b_linear = tf.Variable(tf.constant(0.1, shape=[self.embedding_size]), name='b_linear')
question_embed = tf.matmul(question_gru, w_linear) + b_linear
# similarity layer
self.pos_similarity = self.cosine_sim(question_embed, self.pos_rel_embed, name='pos_similarity')
self.neg_similarity = self.cosine_sim(question_embed, self.neg_rel_embed, name='neg_similarity')
# triplet loss
# triplet_loss = tf.nn.relu(self.margin - self.pos_similarity + self.neg_similarity)
# pos_triplets = tf.cast(tf.greater(triplet_loss, 1e-16), tf.float32)
# num_pos_triplets = tf.reduce_sum(pos_triplets)
# self.loss = tf.reduce_sum(triplet_loss) / (num_pos_triplets + 1e-16)
self.loss = tf.reduce_mean(tf.nn.relu(self.margin - self.pos_similarity + self.neg_similarity))
self.train_step = tf.train.AdamOptimizer().minimize(self.loss)
def train(self, train_data, valid_data, valid_data_metric):
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=self.n_epoch)
question_ids, sequence_len, pos_rel_ids, neg_rel_ids = train_data
loops = int((len(question_ids) + self.batch_size - 1) / self.batch_size)
for epoch in range(self.n_epoch):
avg_loss = 0.0
# shuffle data matters!
shuffle_index = list(range(question_ids.shape[0]))
random.shuffle(shuffle_index)
for i in range(loops):
feed_dict = {self.question_ids: question_ids[shuffle_index[i: i + self.batch_size]],
self.sequence_len: sequence_len[shuffle_index[i: i + self.batch_size]],
self.pos_rel_ids: pos_rel_ids[shuffle_index[i: i + self.batch_size]],
self.neg_rel_ids: neg_rel_ids[shuffle_index[i: i + self.batch_size]],
self.gru_keep_prob: 0.5}
_, loss = self.sess.run([self.train_step, self.loss], feed_dict=feed_dict)
if loss == 0.0:
break
avg_loss += loss
avg_loss /= loops
logging.info('%s %d train_loss: %f' % (self.model_name, epoch, avg_loss))
print('%s %d train_loss: %f' % (self.model_name, epoch, avg_loss))
saver.save(sess=self.sess, save_path=os.path.join(self.model_path, self.model_name), global_step=epoch)
valid_loss = self.valid(valid_data)
self.compute_accuracy(valid_data_metric, 'valid_acc')
if valid_loss < self.best_loss:
self.best_loss = valid_loss
self.best_epoch = epoch
self.stopping_step = 0
else:
self.stopping_step += 1
if self.stopping_step >= self.early_stopping_step:
print('%s early stopping is trigger at epoch: %d' % (self.model_name, epoch))
logging.info('%s early stopping is trigger at epoch: %d' % (self.model_name, epoch))
break
print('%s best epoch: %d, best loss: %f' % (self.model_name, self.best_epoch, self.best_loss))
logging.info('%s best epoch: %d, best loss: %f' % (self.model_name, self.best_epoch, self.best_loss))
self.only_save_best_epoch(self.model_path, self.model_name, self.best_epoch)
def valid(self, valid_data):
question_ids, sequence_len, pos_rel_ids, neg_rel_ids = valid_data
# restrict the amount of data to be fed to avoid OOM error
feed_dict = {self.question_ids: question_ids[:25600],
self.sequence_len: sequence_len[:25600],
self.pos_rel_ids: pos_rel_ids[:25600],
self.neg_rel_ids: neg_rel_ids[:25600],
self.gru_keep_prob: 1.0}
loss = self.sess.run(self.loss, feed_dict=feed_dict)
logging.info('%s valid_loss: %f' % (self.model_name, loss))
print('%s valid_loss: %f' % (self.model_name, loss))
return loss
def compute_accuracy(self, data_metric, data_type):
""""compute top k hits accuracy"""
q_word_ids, q_seq_len, q_pos_rel_ids, cand_rel_ids = data_metric
top1 = top3 = top5 = top10 = 0
data_size = min(len(q_word_ids), 1000)
for i in range(data_size):
score = {}
# compute score for each candidate relation
q_word_ids_npy = np.zeros([1, 60])
q_word_ids_npy[0, :len(q_word_ids[i])] = q_word_ids[i]
mul_q_word_ids_npy = np.tile(q_word_ids_npy, (len(cand_rel_ids[i]), 1))
mul_q_seq_len = np.tile(q_seq_len[i:i + 1], len(cand_rel_ids[i]))
feed_dict = {self.question_ids: mul_q_word_ids_npy,
self.sequence_len: mul_q_seq_len,
self.pos_rel_ids: cand_rel_ids[i],
self.gru_keep_prob: 1.0
}
similarity = self.sess.run(self.pos_similarity, feed_dict=feed_dict)
for j in range(len(cand_rel_ids[i])):
score[cand_rel_ids[i][j]] = similarity[j]
# rank by similarity score
sorted_score = sorted(score.items(), key=lambda x:x[1], reverse=True)
sorted_rel = [score[0] for score in sorted_score]
if q_pos_rel_ids[i] in sorted_rel[:1]:
top1 += 1
if q_pos_rel_ids[i] in sorted_rel[:3]:
top3 += 1
if q_pos_rel_ids[i] in sorted_rel[:5]:
top5 += 1
if q_pos_rel_ids[i] in sorted_rel[:10]:
top10 += 1
print('%s %s: hits@1: %f hits@3: %f hits@5: %f hits@10: %f' %
(self.model_name, data_type, top1/data_size, top3/data_size,
top5/data_size, top10/data_size))
logging.info('%s %s: hits@1: %f hits@3: %f hits@5: %f hits@10: %f' %
(self.model_name, data_type, top1/data_size, top3/data_size,
top5/data_size, top10/data_size))
@staticmethod
def cosine_sim(a, b, name='cosine_sim'):
a_norm = tf.nn.l2_normalize(a, axis=-1)
b_norm = tf.nn.l2_normalize(b, axis=-1)
return tf.reduce_sum(tf.multiply(a_norm, b_norm), axis=-1, name=name)
@staticmethod
def only_save_best_epoch(model_path, model_name, best_epoch):
data_suffix = '.data-00000-of-00001'
data_name = model_name + '-' + str(best_epoch) + data_suffix
index_suffix = '.index'
index_name = model_name + '-' + str(best_epoch) + index_suffix
meta_suffix = '.meta'
meta_name = model_name + '-' + str(best_epoch) + meta_suffix
for file in os.listdir(model_path):
if file.startswith(model_name):
if file == data_name or file == index_name or file == meta_name:
continue
elif file.endswith(data_suffix) or file.endswith(index_suffix) or file.endswith(meta_suffix):
os.remove(os.path.join(model_path, file))
|
"""
vtelem - Test the websocket daemon's correctness.
"""
# built-in
import asyncio
import time
# third-party
import websockets
# module under test
from vtelem.daemon.websocket import WebsocketDaemon
from vtelem.mtu import get_free_tcp_port
async def consumer(websocket, message, _) -> None:
"""Simple echo consumer."""
await websocket.send(message)
def test_websocket_daemon_boot():
"""Test that the daemon can be started and stopped."""
daemon = WebsocketDaemon("test", consumer)
# make sure the loop can be started again
for _ in range(5):
with daemon.booted():
time.sleep(1)
def test_websocket_daemon_basic():
"""Test basic client-server echoes with a few starts and stops."""
port = get_free_tcp_port()
daemon = WebsocketDaemon("test", consumer, ("0.0.0.0", port))
for _ in range(5):
with daemon.booted():
time.sleep(0.1)
# connect a client
async def ping_test():
"""Send an arbitrary message and expect the same back."""
uri = f"ws://localhost:{port}"
async with websockets.connect(
uri, close_timeout=1
) as websocket:
msg = "hello!"
await websocket.send(msg)
response = await websocket.recv()
assert response == msg
asyncio.get_event_loop().run_until_complete(ping_test())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-25 20:10:12
# @Author : ZubinGou (zebgou@gmail.com)
# @Link : https://github.com/ZubinGou
# @Version : $Id$
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, Scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class LaserWeaponArmory(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
print ("Hello Python World!")
print ("Hello Python Again")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_gateway_to_network_allocations_table
Revision ID: fdfb668d19e1
Revises: 221a83cfd85b
Create Date: 2016-04-19 10:07:16.224806
"""
# revision identifiers, used by Alembic.
revision = 'fdfb668d19e1'
down_revision = '221a83cfd85b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'network_allocations',
sa.Column('gateway', sa.String(64), nullable=True))
op.add_column(
'share_networks',
sa.Column('gateway', sa.String(64), nullable=True))
def downgrade():
op.drop_column('network_allocations', 'gateway')
op.drop_column('share_networks', 'gateway')
|
# coding: utf-8
# # 20 Newsgroups text classification with pre-trained word embeddings
#
# In this notebook, we'll use pre-trained [GloVe word
# embeddings](http://nlp.stanford.edu/projects/glove/) for text
# classification using TensorFlow 2.0 / Keras. This notebook is
# largely based on the blog post [Using pre-trained word embeddings in
# a Keras model]
# (https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html)
# by François Chollet.
#
# **Note that using a GPU with this notebook is highly recommended.**
#
# First, the needed imports.
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence, text
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, LSTM
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import TensorBoard
from zipfile import ZipFile
import os, datetime
import sys
import numpy as np
print('Using Tensorflow version:', tf.__version__,
'Keras version:', tf.keras.__version__,
'backend:', tf.keras.backend.backend(), flush=True)
# ## GloVe word embeddings
#
# Let's begin by loading a datafile containing pre-trained word
# embeddings. The datafile contains 100-dimensional embeddings for
# 400,000 English words.
if 'DATADIR' in os.environ:
DATADIR = os.environ['DATADIR']
else:
DATADIR = "/scratch/project_2003747/data/"
GLOVE_DIR = os.path.join(DATADIR, "glove.6B")
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'), encoding='utf-8') as f:
n_skipped = 0
for line in f:
try:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
except UnicodeEncodeError:
n_skipped += 1
print('Found {} word vectors, skipped {}.'.format(len(embeddings_index), n_skipped))
# ## 20 Newsgroups data set
#
# Next we'll load the [20 Newsgroups]
# (http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html)
# data set.
#
# The dataset contains 20000 messages collected from 20 different
# Usenet newsgroups (1000 messages from each group):
#
# alt.atheism | soc.religion.christian | comp.windows.x | sci.crypt
# talk.politics.guns | comp.sys.ibm.pc.hardware | rec.autos | sci.electronics
# talk.politics.mideast | comp.graphics | rec.motorcycles | sci.space
# talk.politics.misc | comp.os.ms-windows.misc | rec.sport.baseball | sci.med
# talk.religion.misc | comp.sys.mac.hardware | rec.sport.hockey | misc.forsale
TEXT_DATA_ZIP = os.path.join(DATADIR, "20_newsgroup.zip")
zf = ZipFile(TEXT_DATA_ZIP, 'r')
print('Processing text dataset from', TEXT_DATA_ZIP, flush=True)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for fullname in sorted(zf.namelist()):
parts = fullname.split('/')
dirname = parts[1]
fname = parts[2] if len(parts) > 2 else None
zinfo = zf.getinfo(fullname)
if zinfo.is_dir() and len(dirname) > 0:
label_id = len(labels_index)
labels_index[dirname] = label_id
print(dirname, label_id)
elif fname is not None and fname.isdigit():
with zf.open(fullname) as f:
t = f.read().decode('latin-1')
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
# ### Vectorization
#
# Vectorize the text samples into a 2D integer tensor.
MAX_NUM_WORDS = 10000
MAX_SEQUENCE_LENGTH = 1000
tokenizer = text.Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = sequence.pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# ### TF Datasets
#
# Let's now define our TF Datasets
# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/data/Dataset#class_dataset)
# for training, validation, and test data.
VALIDATION_SET, TEST_SET = 1000, 4000
BATCH_SIZE = 128
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.shuffle(20000)
train_dataset = dataset.skip(VALIDATION_SET+TEST_SET)
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=True)
validation_dataset = dataset.skip(TEST_SET).take(VALIDATION_SET)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)
test_dataset = dataset.take(TEST_SET)
test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=False)
# ### Pretrained embedding matrix
#
# As the last step in data preparation, we construct the GloVe
# embedding matrix:
print('Preparing embedding matrix.')
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_dim = 100
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print('Shape of embedding matrix:', embedding_matrix.shape)
# ## 1-D CNN
#
# ### Initialization
print('Build model...')
model = Sequential()
model.add(Embedding(num_words,
embedding_dim,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(128, activation='relu'))
model.add(Dense(20, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
# ### Learning
logdir = os.path.join(os.getcwd(), "logs",
"20ng-cnn-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
callbacks = [TensorBoard(log_dir=logdir)]
epochs = 10
history = model.fit(train_dataset, epochs=epochs,
validation_data=validation_dataset,
verbose=2, callbacks=callbacks)
# ### Inference
#
# We evaluate the model using the test set. If accuracy on the test
# set is notably worse than with the training set, the model has
# likely overfitted to the training samples.
test_scores = model.evaluate(test_dataset, verbose=2)
print("Test set %s: %.2f%%" % (model.metrics_names[1], test_scores[1]*100))
|
# students = ['小明','小红','小刚']
# students.append('3')
# del students[2]
# print(students)
# students = ['小明','小红','小刚']
#
# scores = {'小明':95,'小红':90,'小刚':90}
#
# print(len(students))
#
# print(len(scores))
# scores = {'小明':95,'小红':90,'小刚':90}
#
# print(scores['小明'])
# scores = {'小明':95,'小红':90,'小刚':90}
#
# scores['小刚']=92
#
# scores['小美']=85
#
# print(scores)
# students = {
# # '第一组':['小明','小红','小刚','小美'],
# # '第二组':['小强','小兰','小伟','小芳']
# # }
# # print(students['第一组'][2])
# # #取出'第一组'对应列表偏移量为3的元素,即'小美'
# #
# # # 最外层是中括号,所以是列表嵌套字典,先判断字典是列表的第几个元素,再找出要取出的值相对应的键
# # scores = [
# # {'小明':95,'小红':90,'小刚':100,'小美':85},
# # {'小强':99,'小兰':89,'小伟':93,'小芳':88}
# # ]
# # print(scores[0]['小刚'])
# dict = {'日本':'东京','英国':'伦敦','法国':'巴黎'}
#
# for i in dict:
#
# print(i)
# man = '' # 注:这个''代表空字符串
# while man != '有':
# man = input('有没有愿意为小龙女死的男人?没有的话就不能出古墓。')
#
# print('小龙女可以出古墓门下山啦~')
# psw = ''
# while psw != '816':
# psw = input('请输入密码: ')
# print('密码正确,已开门')
# stu = ['XM','XG','XH']
# for i in range(3):
# stu1 = stu[0]
# stu.append(stu1)
# del stu[0]
# print(stu)
#
# print(3<5)
#
# print(3>5)
#
# print('长安'=='长安')
#
# print('长安'!='金陵')
# for i in range(5):
#
# print('明日复明日')
#
# if i==3: # 当i等于3的时候触发
#
# continue # 回到循环开头
#
# print('这句话在i等于3的时候打印不出来')
# a = 24
# while True:
# b = int(input('请输入一个数字,我会告诉你是大了还是小了: '))
# if b == 24:
# print('嗯,答对了,恭喜\n')
# break
# if b < 24:
# print('这个小了点~~\n')
# continue
# if b > 24:
# print('这个大了点呢~~\n')
# continue
#
#
# def tree(Height):
#
# print('Merry Christmas!')
#
# for i in range(Height):
#
# print((Height-i)*2*' '+'o'+ i*'~x~o')
#
# print(((Height-i)*2-1)*' '+(i*2+1)*'/'+'|'+(i*2+1)*'\\')
#
# tree(4)
#
# tree(8)
# def face(name):
# return name + '的脸蛋'
# def body(name):
# return name + '的身材'
# def main(like_face,like_body):
# return '梦中情人的样子: '+face(like_face)+ '和' +body(like_body)
# print(main('AA','BB'))
# def bigger(a,b):
# if a > b:
# return a
# elif a == b:
# return '一样'
# else:
# return b
# print(bigger(8888,8888))
# 查看注释,运行代码。
# import random
#
# import time
#
# # 将抽奖程序封装成函数
#
# def choujiang(q,w,e): # 定义一个抽奖函数,带有3个参数,也就是3位候选人
#
# luckylist = [q,w,e] # 定义一个中奖名单的列表
#
# a = random.choice(luckylist) # 在中奖名单里面随机选择
#
# print('开奖倒计时',3)
#
# time.sleep(1)
#
# print('开奖倒计时',2)
#
# time.sleep(1)
#
# print('开奖倒计时',1)
#
# time.sleep(1)
#
# image = '''
#
# /\_)o<
#
# | \\
#
# | O . O|
#
# \_____/
#
# '''
#
# print(image)
#
# print('恭喜'+a+'中奖!')
#
# choujiang('虚竹','萧峰','段誉') # 调用函数
# def cards():
#
# color = ['红心', '方块', '梅花','黑桃'] # 将花色放在一个列表中待用
#
# num = list(range(2, 11))
#
# num.extend('JQKA') # 通过两行代码,生成一个 2-A 的数字列表。
#
# return [(x, y) for x in color for y in num ] # 用列表生成式完成扑克牌的生成。
#
# print(cards())
import random
import time
player_win = 0
enemy_win = 0
while True:
for i in range(1,4):
player_life = random.randint(50, 100)
player_attack = random.randint(50, 100)
enemy_life = random.randint(50, 100)
enemy_attack = random.randint(50, 100)
print('这是第{}局'.format(i))
time.sleep(1)
print('【玩家】\n血量: {}\n攻击: {}'.format(player_life,player_attack))
time.sleep(1)
print('--------------------------------')
time.sleep(1)
print('【敌人】\n血量: {}\n攻击: {}'.format(enemy_life,enemy_attack))
time.sleep(1)
print('--------------------------------')
while (player_life >= 0) and (enemy_life >= 0):
player_life = player_life - enemy_attack
enemy_life = enemy_life -player_attack
print('敌人发起了攻击,玩家目前血量为 {}\n'.format(player_life))
time.sleep(0.5)
print('玩家发起了攻击,敌人的目前血量为 {}\n'.format(enemy_life))
time.sleep(0.5)
print('--------------------------------')
time.sleep(1)
if (player_life > 0) and (enemy_life <=0):
print('玩家获胜\n')
player_win = player_win +1
time.sleep(1)
elif (player_life <= 0) and (enemy_life >0):
print('敌人赢了\n')
enemy_win = enemy_win +1
time.sleep(1)
else:
print('不好,同归于尽了\n')
time.sleep(1)
again = input('是否需要再来一局呢? 请输入 (Y/N) ')
if again == 'Y':
continue
else:
break
print('现在三局已经结束,公布最终结果: \n')
time.sleep(2)
print('最终结果就是。。。。。。。。。\n')
time.sleep(1.5)
print('玩家一共胜利了{}次\n'.format(player_win))
print('敌人一共胜利了{}次\n'.format(enemy_win))
if player_win > enemy_win:
print('玩家赢了')
elif player_win < enemy_win:
print('敌人赢了')
else:
print('平局!')
|
"""
Provides utilities for the sura_rename package
"""
import os.path
def prepend_path(file_list, path):
"""
Returns an iterable with each element replaced with a prepended path
"""
return (os.path.join(path, file) for file in file_list)
def get_ext_files(directory, ext):
"""
returns an iterable containing only files ending in ext
assumes ext is in ASCII
"""
allfiles = (file for file in prepend_path(os.listdir(directory), directory))
return (file for file in allfiles if os.path.splitext(file)[1].lower() == ext.lower())
def get_mp3_files(directory):
"""
Returns an iterable with all the mp3 files in the given directory with absoulute paths
"""
return get_ext_files(directory, '.mp3')
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# reading both the data sets
trainDf = pd.read_csv("dataset/train.csv")
testDf = pd.read_csv("dataset/test.csv")
# checking lengths
trainingSetIndex = len(trainDf)
print(len(trainDf), len(testDf))
# data type check
print(trainDf.dtypes)
# null check
# OP: Item_Weight, Outlet_Size
print(trainDf.isnull().sum())
# combining both
combination = trainDf.append(testDf)
print(len(combination))
# dropping identifier columns
combination = combination.drop(["Item_Identifier", "Outlet_Identifier"], axis=1)
# replacing the null in the ItemWeight
combination = combination.fillna(combination.median())
# replacing nominal values
combination["Item_Fat_Content"] = combination["Item_Fat_Content"].replace({"LF": 0, "reg": 1})
combination["Item_Fat_Content"] = combination["Item_Fat_Content"].replace({"Low Fat": 0, "Regular": 1})
combination["Item_Fat_Content"] = combination["Item_Fat_Content"].replace({"low fat": 0, "Regular": 1})
print(combination["Item_Fat_Content"].unique())
perishable = ["Breads", "Breakfast", "Dairy", "Fruits and Vegetables", "Meat", "Seafood"]
non_perishable = ["Baking Goods", "Canned", "Frozen Foods", "Hard Drinks", "Health and Hygiene", "Household",
"Soft Drinks", "Snack Foods", "Starchy Foods", "Others"]
combination["Item_Type"] = combination["Item_Type"].replace(to_replace=perishable, value="perishable")
combination["Item_Type"] = combination["Item_Type"].replace(to_replace=non_perishable, value="non_perishable")
combination["Item_Type"] = combination["Item_Type"].replace({"perishable": 0, "non_perishable": 1})
print(combination["Item_Type"].unique())
combination["Outlet_Size"] = combination["Outlet_Size"].replace({"Small": 0,
"High": 1,
"Medium": 2,
np.nan: 3})
print(combination["Outlet_Size"].unique())
combination["Outlet_Location_Type"] = combination["Outlet_Location_Type"].replace({"Tier 3": 0,
"Tier 2": 1,
"Tier 1": 2})
print(combination["Outlet_Location_Type"].unique())
combination["Outlet_Type"] = combination["Outlet_Type"].replace({"Grocery Store": 0,
"Supermarket Type1": 1,
"Supermarket Type2": 2,
"Supermarket Type3": 3})
print(combination["Outlet_Type"].unique())
# splitting again the cleaned data sets
trainDfClean = combination[:trainingSetIndex]
testDfClean = combination[trainingSetIndex:]
# saving the sets
trainDfClean.to_csv("./dataset/train_processed.csv", index=False)
testDfClean.to_csv("./dataset/test_processed.csv", index=False)
# plotting the hist
trainDfClean.hist()
testDfClean.hist()
plt.show()
|
import numpy as np
class Env:
def __init__(self, n_action, p):
self.n_action = n_action
self.p = p
def sample(self, action, n_sample):
return (np.random.uniform(0, 1, n_sample) < self.p[action]).astype(np.uint8)
def G1():
def _op(n_action):
p = np.random.uniform(0.02, 0.05, n_action)
return p
return _op
def G2():
def _op(idx, n_action):
p_upper = 0.02 + idx * 0.01
p = np.random.uniform(0.02, p_upper, n_action)
return p
return _op
def G3():
def _op(n_action):
p = np.zeros(n_action) + 0.05
return p
return _op |
from functools import partial
import six
from ..utils.is_base_type import is_base_type
from ..utils.props import props
from .field import Field
from .objecttype import ObjectType, ObjectTypeMeta
class MutationMeta(ObjectTypeMeta):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Mutation
if not is_base_type(bases, MutationMeta):
return type.__new__(cls, name, bases, attrs)
input_class = attrs.pop('Input', None)
cls = ObjectTypeMeta.__new__(cls, name, bases, attrs)
field_args = props(input_class) if input_class else {}
resolver = getattr(cls, 'mutate', None)
assert resolver, 'All mutations must define a mutate method in it'
cls.Field = partial(Field, cls, args=field_args, resolver=resolver)
return cls
class Mutation(six.with_metaclass(MutationMeta, ObjectType)):
pass
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Preprocess
"""
import pathlib
def wmt14_en_fr_preprocess(input_file, output_file):
"""Preprocess the source file and writes to the output_file"""
input_file = input_file + "/newstest2014-fren-ref"
output_file = output_file + "/wmt14"
language = ['.en.sgm', '.fr.sgm']
count = 0
# en-fr
with open(input_file + language[0], "r", encoding='utf-8') as english, \
open(input_file + language[1], "r", encoding='utf-8') as french, \
open(output_file + '.en_fr.txt', "a", encoding='utf-8') as enfr_f, \
open(output_file + '.fr_en.txt', "a", encoding='utf-8') as fren_f:
line_id = 0
for en, fr in zip(english, french):
line_id += 1
if en[:7] == '<seg id':
print("=" * 20, "\n", line_id, "\n", "=" * 20)
en_start = en.find('>', 0)
en_end = en.find('</seg>', 0)
print(en[en_start + 1:en_end])
en_ = en[en_start + 1:en_end]
fr_start = fr.find('>', 0)
fr_end = fr.find('</seg>', 0)
print(fr[fr_start + 1:fr_end])
fr_ = fr[fr_start + 1:fr_end]
en_fr_str = en_ + "\t" + fr_ + "\n"
enfr_f.write(en_fr_str)
fr_en_str = fr_ + "\t" + en_ + "\n"
fren_f.write(fr_en_str)
count += 1
print('write {} file finished!\n total count = {}'.format(output_file + '.en_fr.txt', count))
print('write {} file finished!\n total count = {}'.format(output_file + '.fr_en.txt', count))
pathlib.Path('output').mkdir(exist_ok=True)
wmt14_en_fr_preprocess("test-full", 'output')
|
"""
This module defines the SkipoleProject, SkiCall, PageData, SectionData classes
SkipoleProject being the core of the project which loads
the project from JSON files and responds to incoming calls by calling the user functions
SkiCall is the class of the skicall object which is created for each incoming
call and is passed as an argument to the user functions
"""
import copy, os, cgi, collections, html, pprint, json, shutil, uuid, sys, traceback, re, pathlib, mimetypes
from base64 import urlsafe_b64decode
from collections.abc import MutableMapping
from http import cookies
# a search for anything none-alphanumeric and not an underscore
_AN = re.compile('[^\w]')
# a search for anything none-alphanumeric, not an underscore, not -
# to allow any character in base64.urlsafe_b64encode/base64.urlsafe_b64decode
# note the = sign is not allowed, though in b64encode it is used for padding
# This is because padding is removed on sending, and added here on receiving
_AN64 = re.compile('[^\w\-]')
from . import skiboot, read_json
from .excepts import ValidateError, ServerError, FailPage, ErrorMessage, GoTo, PageError, ServeFile
from .. import textblocks
# ServerError raised in this module use codes 9000 to 9100
# These three are 'default' functions, used if the
# user functions are not given
def _start_call(called_ident, skicall):
"Default start_call function"
return called_ident
def _submit_data(skicall):
"Default submit_data function"
return
def _end_call(page_ident, page_type, skicall):
"Default end_call function"
return
class SkipoleProject(object):
"""The SkipoleProject - an instance being a callable WSGI application"""
def __init__(self, project, projectfiles, proj_data={}, start_call=None, submit_data=None, end_call=None, url="/"):
"""Loads the project from JSON files and records the user functions"""
if _AN.search(project):
raise ServerError(message="Error: Invalid project name, alphanumeric only")
if '_' in project:
raise ServerError(message="Error: Invalid project name, alphanumeric only (no underscore).")
self._proj_ident = project
self.projectfiles = projectfiles
self.proj_data = proj_data
if start_call is None:
self.start_call = _start_call
else:
self.start_call = start_call
if submit_data is None:
self.submit_data = _submit_data
else:
self.submit_data = submit_data
if end_call is None:
self.end_call = _end_call
else:
self.end_call = end_call
# initially, assume this is the root projet, and sub projects can be added
self.rootproject = True
# A check cookies function can be set in this project if it is added as a sub-project
# initially it is None
self.check_cookies = None
# initial values, will be set from the json file
self.brief = "Project %s" % project
self.version = "0.0.0"
# The url of the root folder
url=url.strip("/").lower()
if url:
self.url = "/" + url + "/"
else:
self.url = "/"
# The root Folder
self.root = None
# dictionary of special pages, key = label: value = page ident
self.special_pages = {}
# dictionary of sections, key = name: value = section
self.sections = {}
# an ordered dictionary of {proj_ident: url,...}, ordered by length of url
self._subproject_paths = collections.OrderedDict()
# self.subprojects is a dictionary of sub projects {proj_ident: Project instance,.....}
self.subprojects = {}
# dictionary of idents: to folder or page, apart from root
# note keys are full Ident instances, values are folder or page instances
self.identitems = {}
# Create an instance of the AccessTextBlocks class for this project
self.textblocks = textblocks.AccessTextBlocks(self._proj_ident, projectfiles, skiboot.default_language())
# maintain a cach dictionary of paths against idents {path:ident}
self._paths = {}
# load project from json files
self.load_from_json()
# and add this project to the project register
skiboot.add_to_project_register(self)
def set_default_language(self, language):
"Sets the project default language"
self.textblocks.default_language = language
def get_default_language(self):
"Returns the project default language"
return self.textblocks.default_language
default_language = property(get_default_language, set_default_language)
# The __call__ method makes the instance callable, as required by a wsgi application
# This method first gets cookies from method self.get_cookies()
# it then gets status, headers and data from method self.respond()
# self.respond() parses the requested path, and calls self.proj_respond() if the page
# requested is part of this project, or it calls subproj.proj_respond() if the page
# requested is part of a sub project.
# It returns a page not found if the path does not match any page.
# It checks for a ServerError, and returns the server error page if one is raised.
# self.proj_respond(), parses caller ident and called ident and calls
# self.proj_start_call() which creates the skicall object and calls the users start_call function
# self.proj_respond() checks the ident returned from self.proj_start_call(), it then calls
# self.read_form_data() to read any any form data submitted in the raw data, and finally calls
# self.status_headers_data() or subproj.status_headers_data() if the page returned from start_call
# is a page of a subproject. It checks for a ValidateError, and returns the validate error page if one is raised.
# self.status_headers_data() calls responders, and finally calls end_call, returning the wanted status, headers and data
def __call__(self, environ, start_response):
"Defines this projects callable as the wsgi application"
# get received cookies, and lang which is a tuple of (preferred language, default language)
lang, received_cookies = self.get_cookies(environ)
status, headers, data = self.respond(environ, lang, received_cookies)
start_response(status, headers)
return data
def get_cookies(self, environ):
"""Gets cookies from environ. and places them in 'received_cookies' dictionary.
Checks presence of language in received cookie, or environ and creates a 'lang' tuple
consisting of (preferred language, default language)
returns lang, received_cookies"""
try:
cookiemorsals = cookies.SimpleCookie(environ["HTTP_COOKIE"])
except Exception:
cookiemorsals = None
if cookiemorsals:
received_cookies = {item:m.value for item,m in cookiemorsals.items()}
else:
received_cookies = {}
if 'language' in received_cookies:
language = received_cookies["language"]
else:
if "HTTP_ACCEPT_LANGUAGE" in environ:
language_list = environ["HTTP_ACCEPT_LANGUAGE"].split(',')
language = language_list[0]
else:
language = self.default_language
lang = (language, self.default_language)
return lang, received_cookies
def respond(self, environ, lang, received_cookies):
"""After cookies obtained, this is called by __call__, and returns status, headers, data
Finds the path called, and passes the path to self.proj_respond()
or to subproj.proj_respond() if the path indicates the call is to a sub project.
Detects if a ServerError occurs, and if so returns status, headers, data
for the system server_error page."""
# The tuple s_h_data is the tuple to return, start with it as None
s_h_data = None
try:
if 'PATH_INFO' in environ:
path = environ['PATH_INFO']
else:
raise ServerError(message="Invalid path")
# the path must start with this root project url
if (path.find(self.url) != 0) and (path + "/" != self.url):
# path does not start with the root, so send URL NOT FOUND
return self._url_not_found(environ, path, lang)
# This is the root project, check if the call is for a page in any sub project
for proj, projurl in self._subproject_paths.items():
if (path.find(projurl) == 0) or (path + "/" == projurl):
# this url is within a sub project
subproj = self.subprojects[proj]
if subproj.check_cookies is None:
# there is no check_cookies function, so no divertedcall. Call proj_respond of the sub project
s_h_data = subproj.proj_respond(environ, projurl, path, lang, received_cookies)
break
# the subproj has a check_cookies function, call it. Call proj_respond of the sub project
divertedcall = subproj.check_cookies(received_cookies, self.proj_data)
if divertedcall is None:
# check_cookies returns None, so no diversion
s_h_data = subproj.proj_respond(environ, projurl, path, lang, received_cookies)
break
# a divertedcall has been returned, it can be integer/tuple/label. Convert to ident
divertedcall = skiboot.find_ident(divertedcall, proj_ident=self._proj_ident)
if divertedcall is None:
# no ident found, so this is a ulr_not_found, leave s_h_data as None
break
# a divertedcall ident is given, but it could be to a page in this root project or any sub project
if divertedcall[0] == self._proj_ident:
# the diversion is to an ident of this root project
s_h_data = self.proj_respond(environ, self.url, path, lang, received_cookies, divertedcall)
break
elif divertedcall[0] in self.subprojects:
# the diversion is to an ident of a sub project, identify the sub project
subproj = self.subprojects[divertedcall[0]]
s_h_data = subproj.proj_respond(environ, self._subproject_paths[divertedcall[0]], path, lang, received_cookies, divertedcall)
break
else:
# should never occur, but if it does, leave s_h_data as None
break
else:
# the call is for a page in this root project
s_h_data = self.proj_respond(environ, self.url, path, lang, received_cookies)
if s_h_data is None:
# No page to return has been found,
return self._url_not_found(environ, path, lang)
if s_h_data[2] is None:
# No page data has been given
return self._url_not_found(environ, path, lang)
except ServerError as e:
# if debug is enabled, expand the exception message to include the exception trace
if skiboot.get_debug():
e.message += "\n"
exc_type, exc_value, exc_traceback = sys.exc_info()
str_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
for item in str_list:
e.message += item
# ServerError has occurred, return the server error page
page = self._system_page("server_error")
if (not page) or (page.page_type != "TemplatePage"):
# return the default server error page
return self.default_server_error_page(e.message, e.code)
# import any sections
page.import_sections()
# show message passed by the exception
page.show_error([e.errormessage])
# if ServerError code, set it into the widget
if e.code:
if e.section:
page_data = {(e.section, e.widget, 'code'):str(e.code)}
elif e.widget:
page_data = {(e.widget, 'code'):str(e.code)}
elif page.default_error_widget.s:
page_data = {(page.default_error_widget.s, page.default_error_widget.w, 'code'):str(e.code)}
elif page.default_error_widget.w:
page_data = {(page.default_error_widget.w, 'code'):str(e.code)}
else:
page_data = None
if page_data:
page.set_values(page_data)
# update head and body parts
page.update(environ, {}, lang, e.ident_list)
status, headers = page.get_status()
data = page.data()
if not data:
return self.default_server_error_page(e.message, e.code)
# return page data
s_h_data = e.status, headers, data
return s_h_data
def proj_respond(self, environ, projurl, path, lang, received_cookies, divertedcall=None):
"""Gets any received form data, and parses the ident field if present to find the caller page and ident_data
Calls start call, and depending on the returned page, calls the project status_headers_data method"""
caller_page = None
ident_data = None
rawformdata = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
# if ident present in the rawformdata it should consist of project_pagenumber_identdata
# and so the caller page can be found from the project_pagenumber
# identdata may not exist
try:
# get the caller_page, from the ident field of submitted data
# which could be None if no ident received
if rawformdata and ('ident' in rawformdata):
if not hasattr(rawformdata['ident'], 'value'):
raise ValidateError(message="Form data not accepted, caller page ident not recognised")
else:
# rawformdata has 'ident' with attribute 'value'
# get the caller page ident, and the ident_data received from the 'ident' field
# which will be project_pagenumber_b64encodeddata
# Note: caller_page could belong to another project, so get it using ident.item() method
# which will query the right project
# project name, number and b64 encoded data should all be ascii characters passing this test
if _AN64.search(rawformdata['ident'].value):
raise ValidateError(message="Form data not accepted, caller page ident not recognised")
ident_parts = rawformdata['ident'].value.split('_', 2)
ident_items = len(ident_parts)
try:
if ident_items == 2:
caller_page = skiboot.Ident(ident_parts[0], int(ident_parts[1])).item()
elif ident_items == 3:
caller_page = skiboot.Ident(ident_parts[0], int(ident_parts[1])).item()
b64binarydata = ident_parts[2].encode('ascii') # get the submitted data and convert to binary
# add padding
b64binarydata = b64binarydata + b"=" * (4-len(b64binarydata)%4)
ident_data = urlsafe_b64decode(b64binarydata).decode('utf-8') # b64 decode, and convert to string
except Exception:
caller_page = None
if caller_page is None:
raise ValidateError(message="Form data not accepted, (received ident is not valid)")
if caller_page.page_type != 'TemplatePage':
raise ValidateError(message="Form data not accepted, (caller page ident is not a template page)")
except ValidateError as e:
page = self._system_page("validate_error")
if (not page) or (page.page_type != "TemplatePage"):
return self.default_validate_error_page(e.message)
# import any sections
page.import_sections()
# show message passed by the exception
page.show_error([e.errormessage])
# update head and body parts
page.update(environ, {}, lang, e.ident_list)
status, headers = page.get_status()
data = page.data()
if not data:
return self.default_validate_error_page(e.message)
# return page data
return e.status, headers, data
# so caller_page could be either given, or could be None
# get the called ident, could be None
if divertedcall is None:
ident = self.page_ident_from_path(projurl, path)
else:
ident = divertedcall
if caller_page:
caller_page_ident = caller_page.ident
else:
caller_page_ident = None
# now call the proj_start_call function which creates a skicall object and
# calls the users start_call function, which could return a different page ident, or None
try:
pident, skicall = self.proj_start_call(environ,
path,
ident,
caller_page_ident,
received_cookies,
ident_data,
lang)
except ServeFile as e:
server_file = e.server_file
if server_file is None:
# URL NOT FOUND
return
# server_file is a path to a file on the server, so serve that file by returning status, headers, data
try:
data = _read_server_file(environ, server_file)
except Exception as e:
raise ServerError(message=f"Failed to read file {server_file}") from e
return e.status, e.headers, data
# pident is the ident of the diverted page or a label or url string
if pident is None:
return
# get the page from pident
if isinstance(pident, str):
# either a label, or url
if '/' in pident:
# get redirector page
return self._redirect_to_url(pident, environ, skicall.call_data, skicall.page_data, skicall.lang)
else:
# no '/' in pident so must be a label
pident = skiboot.find_ident_or_url(pident, self._proj_ident)
if not pident:
raise ServerError(message="Returned page ident from start_call not recognised")
if isinstance(pident, str):
# must be a url, get redirector page
return self._redirect_to_url(pident, environ, skicall.call_data, skicall.page_data, skicall.lang)
# so pident must be an ident
if not isinstance(pident, skiboot.Ident):
raise ServerError(message="Invalid ident returned from start_call")
# pident is the ident returned from start_call, may be in a different project
page = pident.item()
if page is None:
raise ServerError(message="Invalid ident returned from start_call")
if page.page_type == 'Folder':
page = page.default_page
if not page:
raise ServerError(message="Invalid ident returned from start_call")
# read any submitted data from rawformdata, and place in form_data
try:
form_data = {}
if rawformdata and (caller_page is not None) and (page.page_type == "RespondPage"):
form_data = self.read_form_data(rawformdata, caller_page)
# so form_data only available if
# rawformdata has been submitted
# and caller_page is known, so widgets can be extracted
# and the destination is a RespondPage
# otherwise form_data is empty (though rawformdata is retained)
# dependent on wether the requested page is in this project or a sub project,
# call status_headers_data() to find the final page to return to the client
# ident_list retains list of idents called during a call to ensure no circulating calls
ident_list = []
# initially no errors, e_list is a list of errors to be shown
e_list = []
if page.ident.proj != self._proj_ident:
# page returned from start_call is in another project
subproj = self.subprojects.get(page.ident.proj)
return subproj.status_headers_data(skicall, environ, received_cookies, rawformdata, caller_page, page, ident_list, e_list, form_data)
# call status_headers_data to return status, headers and data to the top script
return self.status_headers_data(skicall, environ, received_cookies, rawformdata, caller_page, page, ident_list, e_list, form_data)
except ValidateError as e:
page = self._system_page("validate_error")
if (not page) or (page.page_type != "TemplatePage"):
return self.default_validate_error_page(e.message)
# import any sections
page.import_sections()
# show message passed by the exception
page.show_error([e.errormessage])
# update head and body parts
page.update(environ, skicall.call_data, skicall.lang, e.ident_list)
status, headers = page.get_status()
data = page.data()
if not data:
return self.default_validate_error_page(e.message)
# return page data
return e.status, headers, page.data()
def proj_start_call(self, environ, path, ident, caller_ident, received_cookies, ident_data, lang):
"""Creates a skicall object and calls the users start_call function
ident is the ident of the page being called, could be None if not recognised
Returns new called_ident, and the skicall object"""
if not caller_ident:
tuple_caller_ident = ()
else:
tuple_caller_ident = caller_ident.to_tuple()
if ident is None:
called_ident = None
else:
called_ident = ident.to_tuple()
try:
# create the SkiCall object
skicall = SkiCall(environ = environ,
path = path,
project = self._proj_ident,
rootproject = self.rootproject,
caller_ident = tuple_caller_ident,
received_cookies = received_cookies,
ident_data = ident_data,
lang = lang,
proj_data = self.proj_data)
# the skicall object is changed in place, with call_data and page_data
# being set by the users own start_call function
new_called_ident = self.start_call(called_ident, skicall)
# convert returned tuple to an Ident object
if isinstance(new_called_ident, int):
new_called_ident = (self._proj_ident, new_called_ident)
if isinstance(new_called_ident, tuple):
new_called_ident = skiboot.make_ident(new_called_ident, self._proj_ident)
# could be a label or URL
except ServerError as e:
raise e
except ServeFile as e:
raise e
except Exception as e:
raise ServerError(message = "Invalid exception in start_call function.") from e
return new_called_ident, skicall
def read_form_data(self, rawformdata, caller_page):
"""Reads raw form data from the environ and returns a dictionary with keys as skiboot.WidgField objects and values as
the form values. Where input fields have indexed names, the skiboot.WidgField object still
has i set to empty string, but the value is given as a dictionary with indexes as keys"""
# rawformdata is the data obtained from environ
# form_data is a dictionary of data returned, without the caller ident
# and after a set of checks
if not rawformdata:
return {}
if not caller_page:
return {}
form_data = {}
for field in rawformdata.keys():
# get fields and values from the rawformdata and store them in form_data
# with keys as skiboot.WidgField objects, and values as field values
# in the case of indexed fields, the values are dictionaries
if field == 'ident':
continue
if ':' not in field:
# All widgfields have a : in them to separate widget name from field name
raise ValidateError(message="Form data not accepted, (invalid field %s)" % (field,))
widgfield = skiboot.make_widgfield(field)
# get fields and values from the rawformdata and store them in form_data
widget = caller_page.copy_widget_from_name(widgfield.s, widgfield.w)
if widget is None:
raise ValidateError(message="Form data not accepted, (unexpected field %s)" % (field,))
if isinstance(rawformdata[field], list):
# fieldvalue is a list of items
fieldvalue = [ item.value.strip() for item in rawformdata[field] ]
else:
fieldvalue = rawformdata[field].value.strip()
if widget.is_senddict(widgfield.f):
# field sends a dictionary, must have an index appended to the name
# this part removes the index from the field name, and creates a form value of a dictionary with the index as keys
fieldindex = widgfield.i
if not fieldindex:
raise ValidateError(message="Form data not accepted, (invalid dictionary field %s)" % (field,))
widgfieldnoindex = widgfield._replace(i='')
if widgfieldnoindex in form_data:
form_data[widgfieldnoindex][fieldindex] = fieldvalue
else:
form_data[widgfieldnoindex] = {fieldindex:fieldvalue}
else:
if widgfield.i:
raise ValidateError(message="Form data not accepted, (unexpected dictionary field %s)" % (field,))
form_data[widgfield] = fieldvalue
return form_data
def status_headers_data(self, skicall, environ, received_cookies, rawformdata, caller_page, page, ident_list, e_list, form_data):
"""calls responders until it can return status, headers, page.data()"""
try:
while page.page_type == 'RespondPage':
ident = page.ident
if page.responder is None:
raise ServerError(message="Respond page %s does not have any responder set" % (page.url,))
try:
page = page.call_responder(skicall, form_data, caller_page, ident_list, rawformdata)
if isinstance(page, str):
# must be a url
skicall.call_data.clear()
skicall.page_data.clear()
# get redirector page
return self._redirect_to_url(page, environ, skicall.call_data, skicall.page_data, skicall.lang)
except ServeFile as e:
server_file = e.server_file
if server_file is None:
# URL NOT FOUND
return
# server_file is a path to a file on the server, so serve that file by returning status, headers, data
try:
data = _read_server_file(environ, server_file)
except Exception as e:
raise ServerError(message=f"Failed to read file {server_file}") from e
return e.status, e.headers, data
except PageError as ex:
# a jump to a page has occurred, with a list of errors
page = ex.page
if isinstance(page, str):
# must be a url
skicall.call_data.clear()
skicall.page_data.clear()
# get redirector page
return self._redirect_to_url(page, environ, skicall.call_data, skicall.page_data, skicall.lang)
if page.ident == ident:
raise ServerError(message="Invalid Failure page: can cause circulating call")
if page.ident in ident_list:
raise ServerError(message="Invalid Failure page: can cause circulating call")
# show the list of errors on the page
e_list = ex.e_list
except GoTo as ex:
if ex.clear_submitted:
form_data.clear()
if ex.clear_page_data:
skicall.page_data.clear()
if ex.clear_errors:
ex.e_list = []
target = skiboot.find_ident_or_url(ex.target, ex.proj_ident)
# target is either an Ident, a URL or None
if not target:
raise ServerError(message="GoTo exception target not recognised")
if isinstance(target, skiboot.Ident):
if target == ident:
raise ServerError(message="GoTo exception page ident %s invalid, can cause circulating call" % (target,))
if target in ident_list:
raise ServerError(message="GoTo exception page ident %s invalid, can cause circulating call" % (target,))
page = target.item()
if not page:
raise ServerError(message="GoTo exception page ident %s not recognised" % (target,))
if page.page_type == 'Folder':
raise ServerError(message="GoTo exception page ident %s is a Folder, must be a page." % (target,))
else:
# target is a URL
skicall.call_data.clear()
return self._redirect_to_url(target, environ, skicall.call_data, skicall.page_data, skicall.lang)
# A divert to a fail page may lead to a GoTo exception which can therefore
# have an e_list
# show the list of errors on the page
e_list = ex.e_list
# it is possible that a jump to a page in another project has been made
if page.ident.proj != self._proj_ident:
subproj = skiboot.getproject(proj_ident=page.ident.proj)
return subproj.status_headers_data(skicall, environ, received_cookies, rawformdata, caller_page, page, ident_list, e_list, form_data)
except (ServerError, ValidateError) as e:
e.ident_list = ident_list
raise e
# the page to be returned to the client is now 'page'
# and 'e_list' is a list of errors to be shown on it
# call the user function end_call
try:
skicall.project = self._proj_ident
skicall.proj_data = self.proj_data
skicall.rootproject = self.rootproject
try:
session_string = self.end_call(page.ident.to_tuple(), page.page_type, skicall)
if session_string:
# set cookie in target_page
page.session_cookie = "Set-Cookie", "%s=%s; Path=%s" % (skicall.project, session_string, skiboot.root_project().url)
except FailPage as e:
page.show_error([e.errormessage])
finally:
if skicall._lang_cookie:
page.language_cookie = skicall._lang_cookie
except ServeFile as e:
server_file = e.server_file
if server_file is None:
# URL NOT FOUND
return
# server_file is a path to a file on the server, so serve that file by returning status, headers, data
try:
data = _read_server_file(environ, server_file)
except Exception as e:
raise ServerError(message=f"Failed to read file {server_file}") from e
return e.status, e.headers, data
except GoTo as e:
raise ServerError("Invalid GoTo exception in end_call") from e
except Exception as e:
raise ServerError("Invalid exception in end_call function.") from e
# import any sections
page.import_sections(skicall.page_data)
if e_list:
# show the list of errors on the page
page.show_error(e_list)
try:
# now set the widget fields
if skicall.page_data:
page.set_values(skicall.page_data)
page.update(environ, skicall.call_data, skicall.lang, ident_list)
except ServerError as e:
raise e
except Exception as e:
raise ServerError(message = "Exception setting page values.") from e
status, headers = page.get_status()
return status, headers, page.data()
def clear_cache(self):
"clear the cache of paths"
self._paths = {}
def load_from_json(self):
"Loads project with data saved in project.json file"
projectdict = read_json.create_project(self._proj_ident, self.projectfiles)
self.default_language = projectdict['default_language']
self.brief = projectdict['brief']
self.version = projectdict['version']
self.special_pages = projectdict['specialpages']
self.sections = projectdict['sections']
self.root = projectdict['siteroot']
itemlist = projectdict['itemlist']
self.identitems = {}
if itemlist:
for item in itemlist:
self.identitems[item.ident] = item
@property
def max_ident_num(self):
"Returns the maximum identnumber currently in use"
maxnum = 0
for ident in self.identitems:
if ident.num > maxnum:
maxnum = ident.num
return maxnum
def list_section_names(self):
"Returns a list of section names, alphabetacily ordered"
if not self.sections:
return []
s = [ name for name in self.sections ]
s.sort()
return s
def section(self, section_name, makecopy=True):
"Returns a section, or a deep copy of a section, or None if the section name is not found"
if section_name not in self.sections:
return
section = self.sections[section_name]
if not makecopy:
return section
if section is None:
return None
return copy.deepcopy(section)
def add_section(self, name, section):
"Adds a section to the project, returns section.change uuid"
# and save the section
section.widgets = {}
section.section_places = {} # currently unused
embedded = (name, '', None)
section.set_idents(name, section.widgets, section.section_places, embedded)
# set validators in section
section.load_validator_scriptlinks()
# set the section change number
section.change = uuid.uuid4().hex
self.sections[name] = section
return section.change
def delete_section(self, name):
"Deletes a section"
if name in self.sections:
del self.sections[name]
@property
def ident_numbers(self):
"return a list of ident numbers"
num_list = [ ident.num for ident in self.identitems ]
# insert the root
num_list.insert(0,0)
num_list.sort()
return num_list
def __getitem__(self, ident):
"""given an Ident, or a string version of ident, return page or folder.
If folder or respond page return the item, any other page, return a deep copy
of the item. If item not found, return None"""
ident = skiboot.Ident.to_ident(ident, proj_ident=self._proj_ident)
if ident is None:
return
if ident.proj != self._proj_ident:
return
if ident.num == 0:
return self.root
if ident not in self.identitems:
return
item = self.identitems[ident]
if item is None:
return
if item.page_type == 'Folder':
return item
if item.page_type == 'RespondPage':
return item
return copy.deepcopy(item)
def add_item(self, parent_ident, item, ident=None):
"""Adds a new page or folder to the project, returns the item ident"""
# check ident
if ident is None:
ident = skiboot.Ident(self._proj_ident, self.max_ident_num+1)
else:
ident = skiboot.make_ident(ident, self._proj_ident)
if ident is None:
raise ServerError(message="Sorry. Invalid ident")
if ident.num == 0:
# cannot add the root folder
raise ServerError(message="Sorry. Unable to add a new root")
if ident.proj != self._proj_ident:
# must be in this project
raise ServerError(message="Sorry. Invalid ident")
if ident in self.identitems:
# ident must not exist
raise ServerError(message="Sorry. The given ident already exists")
# check parent folder
if parent_ident.proj != self._proj_ident:
raise ServerError(message="Invalid parent ident")
parent = self.get_item(parent_ident)
if parent is None:
raise ServerError(message="Parent folder not found: Error in add_item method of Project class.")
if parent.page_type != 'Folder':
raise ServerError(message="Parent not a folder")
if item.name in parent.pages:
raise ServerError(message="Sorry, a page with that name already exists in the parent folder")
if item.name in parent.folders:
raise ServerError(message="Sorry, a folder with that name already exists in the parent folder")
# set the item ident
item.ident = ident
# set this item name into the parent
if item.page_type == 'Folder':
if parent.restricted:
item.set_restricted()
parent.folders[item.name] = item.ident
# if the page is a template or svg page, then set its idents
# and store its widgets in the page's widgets dictionary and sectionplaceholders
# in the page's sections directory
elif (item.page_type == 'TemplatePage') or (item.page_type == 'SVG'):
item.set_idents()
parent.pages[item.name] = item.ident
# now set validator modules in page
if item.page_type == 'TemplatePage':
item.load_validator_scriptlinks()
else:
parent.pages[item.name] = item.ident
# set the parent change value
parent.change = uuid.uuid4().hex
item.parentfolder = parent
# and finally, add the item
self.identitems[item.ident] = item
self.clear_cache()
return item.ident
def delete_item(self, itemident):
"""Deletes the page or folder with the given ident from the database."""
if itemident.num == 0:
# cannot delete the root folder
raise ServerError(message="Cannot delete the root folder")
if itemident.proj != self._proj_ident:
# Must belong to this project
raise ServerError(message="Cannot delete this item (does not belong to this project)")
if itemident not in self.identitems:
raise ServerError(message="Item not found")
# get the item
item = self.identitems[itemident]
# get the items parent folder
parentfolder = item.parentfolder
if item.name in parentfolder.pages:
del parentfolder.pages[item.name]
if item.name in parentfolder.folders:
del parentfolder.folders[item.name]
parentfolder.change = uuid.uuid4().hex
# del the item
del self.identitems[itemident]
self.clear_cache()
def delete_folder_recursively(self, itemident):
"""Deletes the folder and contents with the given ident from the database.
returns parentfolder number and change when done, raises ServerError on failure"""
if itemident.num == 0:
# cannot delete the root folder
raise ServerError(message="Cannot delete the root folder")
if itemident.proj != self._proj_ident:
# Must belong to this project
raise ServerError(message="Cannot delete this item (does not belong to this project)")
if itemident not in self.identitems:
raise ServerError(message="Item not found")
# get the item
item = self.identitems[itemident]
# get the items parent folder
parentfolder = item.parentfolder
if item.name in parentfolder.pages:
raise ServerError(message="The item is not a Folder")
if item.name not in parentfolder.folders:
raise ServerError(message="The item to delete has not been found")
# recursive delete this item and all contents
self._do_delete_folder_recursively(itemident)
# create change uuid for parentfolder
change = uuid.uuid4().hex
num = parentfolder.ident.num
parentfolder.change = change
self.clear_cache()
return num, change
def _do_delete_folder_recursively(self, folderident):
"Used by delete_folder_recursively to rcursively delete"
# get the folder
folder = self.identitems[folderident]
page_idents = folder.page_idents()
folder_idents = folder.folder_idents()
# get the parent folder
parentfolder = folder.parentfolder
# delete all subfolders
for folder_ident in folder_idents:
self._do_delete_folder_recursively(folder_ident)
# delete all pages in the folder
for page_ident in page_idents:
page = self.identitems[page_ident]
del folder.pages[page.name]
del self.identitems[page_ident]
# and finally delete itself
del parentfolder.folders[folder.name]
# del this item from identitems
del self.identitems[folderident]
def save_item(self, item, new_parent_ident=None):
"""Saves the page or folder - used to save an altered item, not to add a new one
If new_parent_ident is not None, indicates the item has moved to a different folder
returns the items new change uuid"""
if item.page_type == 'Folder':
return self.save_folder(item, new_parent_ident)
else:
return self.save_page(item, new_parent_ident)
def save_page(self, item, new_parent_ident=None):
"""Saves the page - used to save an altered page, not to add a new one
If new_parent_ident is not None, indicates the page has moved to a different folder
Returns the new page.change uuid"""
if item.page_type == 'Folder':
raise ServerError(message="Invalid item, not a page.")
item_ident = item.ident
if item_ident is None:
raise ServerError(message="Unable to save page - no ident set")
if self._proj_ident != item_ident.proj:
raise ServerError(message="Unable to save page - invalid ident")
if item_ident not in self.identitems:
raise ServerError(message="This page ident does not exist")
old_parent = self.identitems[item_ident].parentfolder
old_name = self.identitems[item_ident].name
if new_parent_ident is not None:
# So its a parent folder change
if new_parent_ident.num == 0:
# new parent is root
new_parent = self.root
else:
new_parent = self.identitems[new_parent_ident]
if new_parent == old_parent:
new_parent_ident = None
if (item.page_type == 'TemplatePage') or (item.page_type == 'SVG'):
item.set_idents()
# now set validator modules in page
if item.page_type == 'TemplatePage':
item.load_validator_scriptlinks()
item.change = uuid.uuid4().hex
if (old_name == item.name) and (new_parent_ident is None):
# no folder change
self.identitems[item_ident] = item
self.clear_cache()
return item.change
if new_parent_ident is None:
# so just a name change
if item.name in old_parent.pages:
raise ServerError(message="Sorry, a page with that name already exists")
if item.name in old_parent.folders:
raise ServerError(message="Sorry, a folder with that name already exists")
if old_name in old_parent.pages:
del old_parent.pages[old_name]
old_parent.pages[item.name] = item_ident
old_parent.change = uuid.uuid4().hex
self.identitems[item_ident] = item
self.clear_cache()
return item.change
# change of folder
if item.name in new_parent.pages:
raise ServerError(message="Sorry, a page with that name already exists")
if item.name in new_parent.folders:
raise ServerError(message="Sorry, a folder with that name already exists")
if old_name in old_parent.pages:
del old_parent.pages[old_name]
old_parent.change = uuid.uuid4().hex
new_parent.pages[item.name] = item_ident
new_parent.change = uuid.uuid4().hex
item.parentfolder = new_parent
self.identitems[item_ident] = item
self.clear_cache()
return item.change
def save_folder(self, item, new_parent_ident=None):
"""Saves the folder - used to save an altered folder, not to add a new one
If new_parent_ident is not None, indicates the folder has moved to a different parent folder
Returns the new folder.change uuid"""
if item.page_type != 'Folder':
raise ServerError(message="Invalid item, not a folder.")
item_ident = item.ident
if item_ident is None:
raise ServerError(message="Unable to save folder - no ident set")
if self._proj_ident != item_ident.proj:
raise ServerError(message="Unable to save folder - invalid ident")
if item_ident.num == 0:
if new_parent_ident:
raise ServerError(message="Root folder cannot have new parent")
item.change = uuid.uuid4().hex
self.root = item
self.clear_cache()
return item.change
if item_ident not in self.identitems:
raise ServerError(message="This folder ident does not exist")
old_parent = self.identitems[item_ident].parentfolder
old_name = self.identitems[item_ident].name
if new_parent_ident is not None:
# So its a parent folder change
if new_parent_ident.num == 0:
# new parent is root
new_parent = self.root
else:
new_parent = self.identitems[new_parent_ident]
if new_parent == old_parent:
new_parent_ident = None
item.change = uuid.uuid4().hex
if (old_name == item.name) and (new_parent_ident is None):
# no parent folder change
self.identitems[item_ident] = item
self.clear_cache()
return item.change
if new_parent_ident is None:
# so just a name change
if item.name in old_parent.pages:
raise ServerError(message="Sorry, a page with that name already exists")
if item.name in old_parent.folders:
raise ServerError(message="Sorry, a folder with that name already exists")
if old_name in old_parent.folders:
del old_parent.folders[old_name]
old_parent.folders[item.name] = item_ident
old_parent.change = uuid.uuid4().hex
self.identitems[item_ident] = item
self.clear_cache()
return item.change
# change of folder
# A folder cannot be moved into a sub folder of itself
folder_list = new_parent.parent_list()
# folder list is a list of (name, identnumber) starting at root
folder_ident_numbers = [ identnumber for name,identnumber in folder_list ]
if item.ident.num in folder_ident_numbers:
# item is a parent of new_parent
raise ServerError(message="Sorry, a folder cannot be moved into a subfolder of itself.")
if item.name in new_parent.pages:
raise ServerError(message="Sorry, a page with that name already exists")
if item.name in new_parent.folders:
raise ServerError(message="Sorry, a folder with that name already exists")
if old_name in old_parent.folders:
del old_parent.folders[old_name]
old_parent.change = uuid.uuid4().hex
new_parent.folders[item.name] = item_ident
new_parent.change = uuid.uuid4().hex
item.parentfolder = new_parent
self.identitems[item_ident] = item
self.clear_cache()
return item.change
def get_item(self, ident):
"""given an ident (Ident object or integer), return a
folder or page from the database, if not found, return None.
Note: the item is returned without copying and without sections imported"""
if isinstance(ident, int):
ident = skiboot.Ident(self._proj_ident, ident)
elif ident.proj != self._proj_ident:
return None
if ident.num == 0:
return self.root
if ident not in self.identitems:
return None
return self.identitems[ident]
def __iter__(self):
"This iterator does not return the root folder"
for ident in self.identitems:
yield ident
def __contains__(self, item):
"Checks if this project contains folder, page or ident"
if hasattr(item, 'ident'):
ident = item.ident
else:
ident = skiboot.Ident.to_ident(item, self._proj_ident)
if ident in self.identitems:
return True
if (ident.proj == self._proj_ident) and (ident.num == 0):
return True
return False
def __len__(self):
"This length does not include the root"
return len(self.identitems)
def __bool__(self):
return True
def set_special_page(self, label, target):
"Sets a special page"
if not label:
raise ServerError(message="Sorry, a special page label must be given")
if not target:
raise ServerError(message="Sorry, a label target must be given")
if isinstance(target, str) and ( '/' in target ):
# item is a url
item = target
elif isinstance(target, str) and ( ',' in target ) and not (target.split(',')[1].isdigit()):
if len(target.split(',')) == 2:
# item points to a subproject label
item = target
else:
item = skiboot.make_ident(target, self._proj_ident)
if not item:
raise ServerError(message="Sorry, the page target is not recognised")
self.special_pages[label] = item
def delete_special_page(self, label):
"Deletes a special page"
if label in self.special_pages:
del self.special_pages[label]
def _system_page(self, label):
"""Returns the system page with the given label, if not found, returns None"""
# label must be one of the system pages
if label not in skiboot.sys_list():
return
if label not in self.special_pages:
return
target = self.special_pages[label]
ident = skiboot.find_ident(target, proj_ident=self._proj_ident)
if ident is None:
return
return ident.item()
def labels(self):
"Returns the special pages dictionary as a dictionary of labels with url's and tuples"
labels_dict = {}
for key, val in self.special_pages.items():
if isinstance(val, str):
labels_dict[key] = val
else:
labels_dict[key] = val.to_tuple()
return labels_dict
def label_value(self, key):
"Returns the url or tuple associated with the label"
val = self.special_pages.get(key)
if val is None:
return
labels_dict = {}
if isinstance(val, str):
return val
else:
return val.to_tuple()
def _redirect_to_url(self, url, environ, call_data, page_data, lang):
"Return status, headers, page.data() of the redirector page, with fields set to url"
if '/' not in url:
raise ServerError(message="Invalid target url, must contain at least one /")
page = self._system_page('redirector')
if (not page) or (page.page_type != "TemplatePage"):
page_text = "<!DOCTYPE HTML>\n<html>\n<p>Page Redirect request. Please try: <a href=%s>%s</a></p>\n</html>" % (url, url)
return '200 OK', [('content-type', 'text/html')], [page_text.encode('ascii', 'xmlcharrefreplace')]
# create an ErrorMessage with the url as the message
err = ErrorMessage(message=html.escape(url))
# import any sections
page.import_sections()
page.show_error(error_messages=[err])
# update head and body parts
if page_data:
page.set_values(page_data)
page.update(environ, call_data, lang)
status, headers = page.get_status()
return status, headers, page.data()
def _url_not_found(self, environ, path, lang):
"Used to return the url not found page"
page = self._system_page("url_not_found")
page_content = "<!DOCTYPE HTML>\n<html>\nERROR:UNKNOWN URL\n</html>".encode('ascii', 'xmlcharrefreplace')
if not page:
return '404 Not Found', [('content-type', 'text/html')], [page_content]
if page.page_type == "TemplatePage":
# create an ErrorMessage with the path as the message
err = ErrorMessage(message=html.escape(path))
# import any sections
page.import_sections()
page.show_error(error_messages=[err])
if (page.page_type == "TemplatePage") or (page.page_type == "FilePage"):
# update head and body parts
page.update(environ, {}, lang)
status, headers = page.get_status()
page_data = page.data()
if page_data:
return '404 Not Found', headers, page_data
# if any other type of page, or no page content, then return this
return '404 Not Found', [('content-type', 'text/html')], [page_content]
def default_server_error_page(self, message='', code=0):
"Given a ServerError exception, return a default status,headers,data"
text_start = "<!DOCTYPE HTML>\n<html>\n<p>SERVER ERROR</p>\n<p>Error code : %s</p>\n" % (code,)
if message:
page_text = text_start + "<pre>%s</pre>\n</html>" % (html.escape(message),)
else:
page_text = text_start + "</html>"
return '500 Internal Server Error', [('content-type', 'text/html')], [page_text.encode('ascii', 'xmlcharrefreplace')]
def default_validate_error_page(self, message):
"Given a ValidateError exception, return a default status,headers,data"
if message:
page_text = "<!DOCTYPE HTML>\n<html>\n<p>VALIDATION ERROR</p>\n<p>%s</p>\n</html>" % (html.escape(message),)
else:
page_text = "<!DOCTYPE HTML>\n<html>\n<p>VALIDATION ERROR</p>\n</html>"
return '400 Bad Request', [('content-type', 'text/html')], [page_text.encode('ascii', 'xmlcharrefreplace')]
def page_ident_from_path(self, projurl, path):
"""Tests if ident exists in the cache, return it, if not, call self.root.page_ident_from_path
and cach the result, then return the ident. If no ident found, or if ident within a restricted folder, return None."""
if path in self._paths:
return self._paths[path]
ident = None
strip_path = path.strip("/")
if not strip_path:
pathlist = []
else:
pathlist = strip_path.split("/")
strip_projurl = projurl.strip("/")
# The projurl must be removed from the pathlist before the call to self.root.page_ident_from_path()
if (not strip_projurl):
# no projurl to remove
ident = self.root.page_ident_from_path(self.identitems, pathlist)
else:
# strip_projurl may be something like "lib", remove the projurl from the pathlist
projurl_list = strip_projurl.split("/")
for item in projurl_list:
if not pathlist:
# invalid call, the pathlist must start with the projurl
return
if item == pathlist[0]:
pathlist.pop(0)
else:
# invalid call, the pathlist must start with the projurl
return
ident = self.root.page_ident_from_path(self.identitems, pathlist)
if ident is not None:
self._paths[path] = ident
return ident
@property
def proj_ident(self):
return self._proj_ident
@property
def subproject_paths(self):
"property getter to return an ordered dictionary of sub project {ident:path,...}"
if not self.rootproject:
# sub project do not themselves contain further sub projects
return collections.OrderedDict()
return self._subproject_paths.copy()
def list_of_subproject_idents(self):
"Returns a list of subproject idents"
return [i for i in self.subproject_paths]
def add_project(self, proj, url=None, check_cookies=None):
"""Add a project to self, returns the url
proj is the sub project application.
This adds a reference to the project to the subproject_paths, returns the sub project path"""
if not self.rootproject:
raise ValidateError(message="Cannot add to a sub project")
if check_cookies is not None:
if not callable(check_cookies):
raise ValidateError(message="If given check_cookies must be callable as check_cookies(received_cookies, proj_data)")
proj_id = proj.proj_ident
# get a copy of the {proj_id:url} subproject_paths dictionary, and this projects url
sub_paths = self._subproject_paths.copy()
if (proj_id == self._proj_ident) or (proj_id in sub_paths):
raise ValidateError(message="Project already exits")
this_url = self.url
if url is None:
url = proj.url.strip("/").lower()
if not url:
url = proj_id.lower()
else:
url=url.strip("/").lower()
if not url:
raise ValidateError(message="Invalid URL passed to add_project, it must be a path longer than the root application path")
url = "/" + url + "/"
# Ensure url starts with this project url
if not url.startswith(this_url):
raise ValidateError(message="Invalid URL passed to add_project, it must be a path longer than the root application path")
# add this ident and url to subproject_paths
sub_paths[proj_id] = url
# save new subproject_paths dictionary
self._subproject_paths = collections.OrderedDict(sorted(sub_paths.items(), key=lambda t: len(t[1].strip("/").split("/")), reverse=True))
# add the subproject to this project
proj.rootproject = False
proj.url = url
proj._subproject_paths = collections.OrderedDict()
proj.subprojects = {}
self.subprojects[proj_id] = proj
# set check_cookies function into the sub project
proj.check_cookies = check_cookies
# clear non root projects from the project register
skiboot.del_from_project_register()
return url
@property
def root_ident(self):
'provides a root_ident attribute'
return skiboot.Ident(self._proj_ident, 0)
def next_ident(self):
"Returns next Ident available by incrementing the maximum existing ident number"
return skiboot.Ident(self._proj_ident, self.max_ident_num+1)
class SkiCall(object):
"""SkiCall is the class of the skicall object which is created for each incoming
call and is passed as an argument to the user functions"""
def __init__(self, environ, path, project, rootproject, caller_ident, received_cookies, ident_data, lang, proj_data):
self.environ = environ
self.path = path
self.project = project
self.rootproject = rootproject
self.caller_ident = caller_ident
self.received_cookies = received_cookies
self.ident_data = ident_data
self._lang = lang
self._lang_cookie = None
self.proj_data = proj_data
self._projectfiles = skiboot.projectfiles(project)
self.ident_list = []
self.submit_list = []
self.submit_dict = {'error_dict':{}}
self.call_data = {}
self.page_data = {}
def update(self, itemdata):
"Updates page_data from a PageData, SectionData or Dictionary"
if isinstance(itemdata, PageData):
self.page_data.update(itemdata._page_data)
elif isinstance(itemdata, SectionData):
# create a PageData object and update it with the section
pd = PageData()
pd.update(itemdata)
self.page_data.update(pd._page_data)
elif isinstance(itemdata, dict):
# create a PageData object from the dictionary
pd = PageData.from_dict(itemdict)
# and update from that
self.page_data.update(pd._page_data)
else:
raise ServerError(message="Error: invalid item used to update skicall")
# backwards compatable stuff
if ('content_length' not in self.page_data) and ('content-length' in self.page_data):
val = self.page_data['content-length']
del self.page_data['content-length']
self.page_data['content_length'] = val
if ('cssimport' not in self.page_data) and ('@import' in self.page_data):
val = self.page_data['@import']
del self.page_data['@import']
self.page_data['cssimport'] = val
def get_pagedata(self):
"""Returns a PageData object of the current data in skicall, note this is a copy,
if changed it will not change the data in skicall, unless skicall.update is called with
the new PageData object """
return PageData.from_dict(self.page_data.copy())
def clear_page_data(self):
self.page_data = {}
def clear_pagedata(self):
self.page_data = {}
@property
def projectfiles(self):
"Returns the projectfiles string"
return self._projectfiles
@property
def lang(self):
"Returns the lang tuple"
return self._lang
def get_language(self):
"Returns the language string"
return self._lang[0]
def set_language(self, language):
"Sets the language string and creates a language cookie with a persistance of 30 days"
if language:
self._lang = (language, self._lang[1])
self._lang_cookie = "Set-Cookie", "language=%s; Path=%s; Max-Age=2592000" % (language_string, skiboot.root_project().url)
language = property(get_language, set_language)
@property
def accesstextblocks(self):
"Returns the project instance of the AccessTextBlocks class"
this_project = skiboot.getproject(proj_ident=self.project)
return this_project.textblocks
def textblock(self, textref, project=None):
"""This method returns the textblock text, given a textblock reference string,
If project is not given assumes this project, if given, project must exist as either the root,
or a sub project of the root.
If no textblock is found, returns None."""
if project is None:
project = self.project
proj = skiboot.getproject(project)
if proj is None:
return
return proj.textblocks.get_text(textref, self.lang)
def label_value(self, label, project=None):
"""Given a label, returns the associated ident or URL
If project is not given assumes this project, if given, project must exist as either the root,
or a sub project of the root.
If no label is found, returns None."""
if project is None:
project = self.project
proj = skiboot.getproject(project)
if proj is None:
return
return proj.label_value(label)
def projectpaths(self):
"""Returns a dictionary of project name : project path
This method returns a dictionary of project names as keys with the project paths as values."""
return skiboot.projectpaths()
def makepath(self, *foldernames):
"Returns a url path string starting with the projects path, with the given foldernames joined"
projectpath = self.projectpaths()[self.project]
if not foldernames:
return projectpath
folderpath = "/".join(foldernames)
if projectpath.endswith("/"):
fullpath = projectpath + folderpath
else:
fullpath = projectpath + "/" + folderpath
return fullpath
def map_url_to_server(self, urlfolder, serverfolder):
"""Generally called in the start_call function. Maps a url folder such as
"special/css" to a server folder such as "/home/user/thisproject/css"
If a call is made to, say "/projectpath/special/css/myfile.css" this function will return a
pathlib.Path object to the file "/home/user/thisproject/css/myfile.css" if such a file
exists. If not then None is returned.
If the given urlfolder starts with "/" then it is an absolute path and projectpath is not prepended.
If start_call uses this to raise a ServeFile exception, then the framework will serve the file.
An example of usage is:
def start_call(called_ident, skicall):
servedfile = skicall.map_url_to_server("special/css", "/home/user/thisproject/css")
if servedfile:
raise ServeFile(servedfile)
return called_ident
"""
server_folder = pathlib.Path(serverfolder).expanduser().resolve()
if not server_folder.is_dir():
return
if not urlfolder.startswith("/"):
# relative path, pre-pend the project url
projectpath = self.projectpaths()[self.project]
if projectpath.endswith("/"):
urlfolder = projectpath + urlfolder
else:
urlfolder = projectpath + "/" + urlfolder
if self.path.startswith(urlfolder):
url_folder = pathlib.Path(urlfolder)
# the path requested
path = pathlib.Path(self.path)
# path must have more elements than url_folder, as it should have at least the filename
if len(path.parts) <= len(url_folder.parts):
return
servedfileparts = server_folder.parts + path.parts[len(url_folder.parts):]
servedfile = pathlib.Path(*servedfileparts)
if servedfile.is_file():
return servedfile
return
def _readfile(filepath, size):
"Return a generator reading the file"
with filepath.open("rb") as f:
data = f.read(size)
while data:
yield data
data = f.read(size)
def _read_server_file(environ, filepath, size=32768):
"Returns an iterator of the file"
if 'wsgi.file_wrapper' in environ:
f = filepath.open("rb")
return environ['wsgi.file_wrapper'](f, size)
else:
return _readfile(filepath, size)
# An instance of this PageData is set into a skicall object to provide data for page widgets
class PageData(MutableMapping):
page_variables = skiboot.PAGE_VARIABLES
@classmethod
def from_dict(cls, pagedict):
"Returns an instance of this class given a dictionary as produced by the to_dict method"
pd = cls()
for key,val in pagedict.items():
if "/" in key:
# sectionalias/attribute
sectionalias,att = key.split("/")
pd.sections.add(sectionalias)
pd._page_data[sectionalias, att] = val
elif ":" in key:
section_widg,fld = key.split(":")
if "-" in section_widg:
# sectionalias-widget:field
sectionalias, widg = section_widg.split("-")
pd.sections.add(sectionalias)
pd._page_data[sectionalias, widg, fld] = val
else:
# widget:field
pd._page_data[section_widg, fld] = val
else:
# a single string without / or : must be a page attribute
pd._page_data[key] = val
return pd
def __init__(self):
"_page_data will become the skicall.page_data when this object is set into skicall"
self._page_data = {}
self.sections = set()
def clear(self):
self._page_data.clear()
self.sections.clear()
def to_dict(self):
"""Returns a dictionary containing the data held in this object, with keys as strings
possibly useful for storage or caching if this data is to be re-used"""
# introduce field delimiters / : -
pagedict = {}
for key, val in self._page_data.items():
if isinstance(key,str):
# keys are strings - page attributes, leave as strings
pagedict[key] = val
elif isinstance(key, tuple):
if len(key) == 2:
if key[0] in self.sections:
# keys are (sectionalias, attribute) set as "sectionalias/attribute"
pagedict[key[0]+'/'+key[1]] = val
else:
# or keys are (widgetname, fieldname) set as "widgetname:fieldname"
pagedict[key[0]+':'+key[1]] = val
elif len(key) == 3:
# keys will be of the form sectionalias-widgetname:fieldname
pagedict[key[0]+'-'+key[1]+':'+key[2]] = val
return pagedict
def get_value(self, sectionalias, widgetname, fieldname):
"""Returns the value set in this field, if item not found, return None.
Set sectionalias to None if this widget is not in a section"""
if sectionalias is None:
key = (widgetname, fieldname)
if not self._valid_widgfield(key):
return
if key in self._page_data:
return self._page_data[key]
else:
return
if sectionalias not in self.sections:
# it does not exist
return None
key = (sectionalias, widgetname, fieldname)
if key in self._page_data:
return self._page_data[key]
def get_section(self, sectionalias):
"Retrieve a section, if it has not been added to the page, return None"
if sectionalias not in self.sections:
# it does not exist
return None
s = SectionData(sectionalias)
for key, val in self._page_data.items():
if not isinstance(key, tuple):
continue
if key[0] == sectionalias:
if len(key) == 2:
s._section_data[key[1]] = val
else:
s._section_data[key[1], key[2]] = val
return s
def delete_section(self, sectionalias):
"Deletes a section"
if sectionalias not in self.sections:
return
self.sections.remove(sectionalias)
newdict = {}
for key, val in self._page_data.items():
if isinstance(key, tuple) and (len(key) >= 2) and (key[0] == sectionalias):
continue
newdict[key] = val
self._page_data = newdict
def update(self, item):
"Update with a PageData, SectionData or a dictionary"
if isinstance(item, SectionData):
# update from SectionData
sectionalias = item.sectionalias
if sectionalias not in self.sections:
# test no widget clash
for key in self:
# iterates through every widget, with key being (widg,fld)
if sectionalias == key[0]:
# sectionalias clashes with a widget
raise KeyError
self._add_section(item)
elif isinstance(item, PageData):
# update this PageData with another PageData object
# test the sections in the item object do not clash with widgets in this object
for key in self:
# iterates through every widget in self, with key being (widg,fld)
if key[0] in item.sections:
# widget in self has the same name as a section in item
raise KeyError
self._page_data.update(item._page_data)
self.sections.update(item.sections)
elif isinstance(item, dict):
# create a PageData object from the dictionary, and update with that
pd = PageData.from_dict(item)
for key in self:
# iterates through every widget in self, with key being (widg,fld)
if key[0] in pd.sections:
# widget in self has the same name as a section in pd
raise KeyError
self._page_data.update(pd._page_data)
self.sections.update(pd.sections)
else:
raise KeyError
def _add_section(self, section):
"Add section data"
sectionalias = section.sectionalias
self.sections.add(sectionalias)
for at, val in section._section_data.items():
if isinstance(at, str):
# A section attribute
if val is None:
continue
self._page_data[sectionalias, at] = val
# add items from section
for key,val in section.items():
self._page_data[sectionalias, key[0], key[1]] = val
def __getattr__(self, name):
"Get a page attribute from the _page_data dictionary"
if name not in self.page_variables:
raise AttributeError
if name in self._page_data:
return self._page_data[name]
def __setattr__(self, name, value):
"Sets a page attribute"
if name in self.page_variables:
if value is None:
if name in self._page_data:
del self._page_data[name]
else:
self._page_data[name] = value
return
# for all other values
super().__setattr__(name, value)
def _valid_widgfield(self, key):
if not isinstance(key, tuple):
return False
if len(key) != 2:
# All widgfields have a two element tuple as key
return False
if key[0] in self.sections:
# this key name is used as a section alias
return False
return True
def __setitem__(self, key, value):
if self._valid_widgfield(key):
if (value is None) and (key in self._page_data):
del self._page_data[key]
else:
self._page_data[key] = value
else:
raise KeyError
def __delitem__(self, key):
if self._valid_widgfield(key):
if key in self._page_data:
del self._page_data[key]
else:
raise KeyError
def __getitem__(self, key):
if self._valid_widgfield(key):
return self._page_data[key]
else:
raise KeyError
def __iter__(self):
page_data = self._page_data
for key in page_data.keys():
if self._valid_widgfield(key):
yield key
def __len__(self):
"Returns the number of widgfields associated with the page"
page_data = self._page_data
length = 0
for key in page_data.keys():
if self._valid_widgfield(key):
length += 1
return length
# instances of this SectionData is used with the update method of a PageData object to provide data for sections
class SectionData(MutableMapping):
section_variables = skiboot.SECTION_VARIABLES
@classmethod
def from_dict(cls, sectiondict, sectionalias):
"Returns an instance of this class given a dictionary as produced by the to_dict method"
sd = cls(sectionalias)
newdict = {}
for key,val in sectiondict.items():
if not isinstance(key, str):
raise KeyError
if "/" in key:
alias, att = key.split("/")
# discard alias, as it is to be replaced by sectionalias
if att not in cls.section_variables:
raise KeyError
newdict[att] = val
elif ":" in key:
section_widg, fld = key.split(":")
if "-" in section_widg:
alias,widg = section_widg.split("-")
newdict[widg,fld] = val
else:
raise KeyError
else:
# not an attribute or widget
raise KeyError
# assign newdict to the new class
sd._section_data = newdict
return sd
def __init__(self, sectionalias):
"""sectionalias is the name of this section as set in the page"""
self._section_data = {}
self._sectionalias = sectionalias
def clear(self):
self._section_data = {}
def to_dict(self):
"""Returns a dictionary containing the data held in this object, with keys as strings
possibly useful for storage or caching if this data is to be re-used"""
# introduce field delimiter :
sectiondict = {}
for key, val in self._section_data.items():
if isinstance(key,str):
# keys are strings - section attributes, introduce sectionalias/attribute
if val is None:
continue
sectiondict[self._sectionalias + "/" + key] = val
elif isinstance(key, tuple):
if len(key) == 2:
# keys are (widgetname, fieldname) set as "sectionalias-widgetname:fieldname"
sectiondict[self._sectionalias + "-" + key[0]+':'+key[1]] = val
return sectiondict
def copy(self, newalias):
"Return a copy of this section with a new sectionalias"
s = self.__class__(newalias)
s._section_data = self._section_data.copy()
return s
def multiply(self, number):
"""Sets the multiplier to number and returns the given number of SectionData objects
each with sectionalias of sectionalias_0, sectionalias_1,.. etc"""
if number <= 1:
return []
sectionlist = []
for n in range(number):
newalias = self.sectionalias+ "_" + str(n)
newsection = self.copy(newalias)
newsection.multiplier = 0
sectionlist.append(newsection)
self.multiplier = number
return sectionlist
def __getattr__(self, name):
"Get a section attribute from the _section_data dictionary"
if name == "sectionalias":
return self._sectionalias
if name not in self.section_variables:
raise AttributeError
return self._section_data[name]
def __setattr__(self, name, value):
"Sets a section attribute"
if name == '_section_data':
# this is required to insert values into self._section_data
super().__setattr__(name, value)
return
if name == '_sectionalias':
# this is required to insert the section name into self._sectionalias
super().__setattr__(name, value)
return
if name not in self.section_variables:
raise AttributeError
self._section_data[name] = value
def _valid_widgfield(self, key):
if not isinstance(key, tuple):
return False
if len(key) != 2:
# All widgfields have a two element tuple as key
return False
return True
def __setitem__(self, key, value):
if self._valid_widgfield(key):
self._section_data[key] = value
else:
raise KeyError
def __delitem__(self, key):
if self._valid_widgfield(key):
del self._section_data[key]
else:
raise KeyError
def __getitem__(self, key):
if self._valid_widgfield(key):
return self._section_data[key]
else:
raise KeyError
def __iter__(self):
for key in self._section_data.keys():
if self._valid_widgfield(key):
yield key
def __len__(self):
"Returns the number of widgfields associated with the section"
length = 0
for key in self._section_data.keys():
if self._valid_widgfield(key):
length += 1
return length
|
import numpy as np
def apply_offset(stream, offset=None, coffset=None):
if offset != None:
offsetarray = np.array(offset)
coeff = np.polyfit(
offsetarray[:, 0], offsetarray[:, 1], deg=len(offsetarray)-1)
# Make sure that constant shift (only one tuple provided is handled as offset)
if len(coeff) == 1:
shift = offsetarray[0, 1] - offsetarray[0, 0]
stream = stream+shift
else:
stream = np.polyval(coeff, stream)
else:
pass
if coffset != None:
return stream+coffset
else:
return stream
def take_derivative1d(x, y, deg):
if deg == 1:
return np.gradient(y, x)
elif deg == 2:
dy = np.gradient(y, x)
return np.gradient(dy, x)
else:
raise TypeError("No other derivatives implemented.")
def take_derivative2d(z, x, y, deg):
if deg == 1:
return np.gradient(z, x, y)
elif deg == 2:
dz = np.gradient(z, x, y)
return np.gradient(dz, x, y)
else:
raise TypeError("No other derivatives implemented.")
|
def token(senten, word_tokenize):
results = []
for sentence in senten:
results.append(word_tokenize(sentence))
return results
def topic_model(reviews_lemmatized, gensim, np, MovieGroupProcess, int_val):
np.random.seed(0)
# initialize GSDMM
gsdmm = MovieGroupProcess(K=int_val, alpha=0.1, beta=0.3, n_iters=int_val)
# create dictionary of all words in all documents
dictionary = gensim.corpora.Dictionary(reviews_lemmatized)
# filter extreme cases out of dictionary
dictionary.filter_extremes(no_below=int_val, no_above=0.5, keep_n=100000)
# create variable containing length of dictionary/vocab
n_terms = len(dictionary)
# fit GSDMM model
model = gsdmm.fit(reviews_lemmatized, n_terms)
doc_count = np.array(gsdmm.cluster_doc_count)
# topics sorted by the number of document they are allocated to
top_index = doc_count.argsort()[-int_val:][::-1]
# show the top 20 words in term frequency for each cluster
ans = []
ans = top_words(gsdmm, gsdmm.cluster_word_distribution, top_index, 15)
return top_index, gsdmm, ans
def top_words(gsdmm, cluster_word_distribution, top_cluster, values):
ans = []
for cluster in top_cluster:
sort_dicts =sorted(gsdmm.cluster_word_distribution[cluster].items(), key=lambda k: k[1], reverse=True)[:values]
ans.append("\nCluster %s : %s"%(cluster,sort_dicts))
return ans
def create_topics_dataframe(pd, data_text, mgp, threshold, topic_dict, lemma_text):
result = pd.DataFrame(columns=['Text', 'Topic', 'Lemma-text'])
for i, text in enumerate(data_text):
result.at[i, 'Text'] = text
result.at[i, 'Lemma-text'] = lemma_text[i]
prob = mgp.choose_best_label(lemma_text[i])
if prob[1] >= threshold:
result.at[i, 'Topic'] = topic_dict[prob[0]]
else:
result.at[i, 'Topic'] = 'Other'
return result
def processing(data, gensim, word_tokenize, np, MovieGroupProcess, pd, WordCloud, int_val, list_stop):
df = data.iloc[:, 0]
first = df.values.tolist()
df_first = pd.DataFrame(first, columns =['comment'])
# remove characters and turn to lower case
df1 = df.str.lower().str.replace('[^\w\s]',' ')
# change text abbreviations to original word
df1 = df1.str.replace(r'\balhamdulillah\b', 'alhamdulilah')
df1 = df1.str.replace(r'\bdgn\b', 'dengan')
df1 = df1.str.replace(r'\bgemen\b', 'kerajaan')
df1 = df1.str.replace(r'\bk\b', 'okay')
df1 = df1.str.replace(r'\bmora\b', 'moratorium')
df1 = df1.str.replace(r'\bni\b', 'ini')
df1 = df1.str.replace(r'\borg\b', 'orang')
df1 = df1.str.replace(r'\bsapa\b', 'siapa')
df1 = df1.str.replace(r'\btak\b', 'tidak')
df1 = df1.str.replace(r'\btu\b', 'itu')
df1 = df1.str.replace(r'\btq\b', 'thank you')
df1 = df1.str.replace(r'\bty\b', 'thank you')
df1 = df1.str.replace(r'\bx\b', 'tidak')
# remove unwanted word
df1 = df1.str.replace('\n', '')
df1 = df1.str.replace(r'\bla\b', '')
df1 = df1.str.replace(r'\bje\b', '')
# remove stopword
stop_words = set(open('stopwords.txt').read().splitlines())
stop_words.update(list_stop)
df2 = df1.apply(lambda x: ' '.join([word for word in x.split() if word not in (stop_words)]))
# dataframe change to list
list_dat = df2.values.tolist()
# tokenize word
reviews_lemmatized = token(list_dat, word_tokenize)
# GSDMM for the topic modeling
ans = []
top_index, gsdmm, ans = topic_model(reviews_lemmatized, gensim, np, MovieGroupProcess, int_val)
# give name to the cluster
list_topic = list(top_index)
topic_dict = {}
topic_names = []
for i in range(len(list_topic)):
topic_names.append("Cluster " + str(list_topic[i]))
for i, topic_num in enumerate(top_index):
topic_dict[topic_num]=topic_names[i]
# create dataframe with topic
result = create_topics_dataframe(pd, data_text=df1, mgp=gsdmm, threshold=0.3, topic_dict=topic_dict, lemma_text=reviews_lemmatized)
result['Lemma_text'] = result['Lemma-text'].apply(lambda row: ' '.join(row))
result = result.drop('Lemma-text', axis=1)
# create dataframe with label
final_df = pd.concat([df_first, result['Topic']], axis=1)
final_df["clean"] = list_dat
# create word clouds
wc = []
for i in range(int_val):
wc.append(create_WordCloud(WordCloud, result['Lemma_text'].loc[result.Topic == topic_names[i]], title=("Most used words in "+topic_names[i])))
return wc, ans, final_df
def create_WordCloud(WordCloud, data, title=None):
wordcloud = WordCloud(width = 400, height = 400,
collocations = False,
background_color ='white',
min_font_size = 14
).generate(" ".join(data.values))
return wordcloud
|
"""
View this repository on github: https://github.com/Jothin-kumar/Geometry-app
MIT License
Copyright (c) 2021 B.Jothin kumar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Author: Jothin kumar (https://jothin.tech)
Github repository of this project: https://github.com/Jothin-kumar/Geometry-app
"""
import sys
from ._intersecting_lines import refresh_intersecting_lines
from ._parallel_lines import refresh_parallel_lines
# Import global_variables module
sys.path.append('../../')
import global_variables
class Line:
def __init__(self, point1: global_variables.get_value('Point'), point2: global_variables.get_value('Point'),
create_line_command, delete_command, show: bool = True):
self.point1 = point1
self.point2 = point2
if self.point1 == self.point2:
raise ValueError('A line must have two different points.')
for line_ in global_variables.get_value('lines'):
if (line_.point1 == point1 and line_.point2 == point2) or (
line_.point2 == point1 and line_.point1 == point2):
raise ValueError('Line already exists.')
self.points = [point1, point2]
self.name = point1.name + point2.name
self.create_line_command = create_line_command
if show:
self.line = create_line_command(point1.x, point1.y, point2.x, point2.y)
self.displayed = show
self.delete_command = delete_command
def hide(self):
self.delete_command(self.line)
self.displayed = False
def show(self):
if not self.displayed:
self.line = self.create_line_command(self.point1.x, self.point1.y, self.point2.x, self.point2.y)
self.displayed = False
def refresh(self):
self.hide()
self.show()
def highlight(self, unhighlighted_others=False):
if not unhighlighted_others:
for angle_ in global_variables.get_value('angles'):
angle_.unhighlight()
for line_ in global_variables.get_value('lines'):
line_.unhighlight()
self.hide()
self.line = self.create_line_command(self.point1.x, self.point1.y, self.point2.x, self.point2.y, fill='red')
self.displayed = True
def unhighlight(self):
self.hide()
if self.point1 and self.point2:
self.show()
def line(point1: global_variables.get_value('Point'), point2: global_variables.get_value('Point'), create_line_command,
delete_command, show: bool = True):
line_ = Line(point1, point2, create_line_command, delete_command, show)
global_variables.get_value('lines').append(line_)
global_variables.get_value('refresh_angles')()
return line_
def delete_line(line_: Line):
line_.point1 = None
line_.point2 = None
line_.hide()
del global_variables.get_value('lines')[global_variables.get_value('lines').index(line_)]
global_variables.refresh_angles()
def refresh_line(line_: Line):
line_.refresh()
def get_line_by_name(name: str):
for line_ in global_variables.get_value('lines'):
if line_.name == name:
return line_
|
from __future__ import absolute_import, print_function
import logging
from collections import defaultdict
from sage.all import (
gcd,
vector,
matrix,
QQ,
Polyhedron,
cartesian_product,
Permutation,
Permutations,
)
from . import dual_root_primitive
from .disk_cache import disk_cache
__all__ = [
"rect_tableaux",
"cubicle",
"cubicle_tableaux",
"is_dominant",
"is_extremal_edge",
"is_extremal_edge_ieq",
"extremal_edges",
"perms_of_length",
"length_tuples",
"is_shuffle",
"is_antishuffle",
"shuffles",
"antishuffles",
"perm_action",
"StabilizerGroup",
]
logger = logging.getLogger(__name__)
@disk_cache
def rect_tableaux(a, b):
r"""Return all rectangular standard Young tableaux of shape :math:`a \times b`.
:param a: number of rows
:param b: number of columns
:rtype: list of :class:`sage.StandardTableau`
"""
from sage.all import StandardTableaux
return list(map(list, StandardTableaux([b] * a)))
def cubicle(T):
r"""Return Sage :class:`sage.Polyhedron` representing the cubicle that corresponds to the given rectangular tableaux T (if any).
This is the maximal-dimensional polytope defined by
.. math::
\{ (a,b) : a_i + b_j \leq a_k + b_l \Leftrightarrow T_{i,j} \geq T_{k,l} \}
together with the equations :math:`\sum_i a_i = \sum_j b_j = 0`.
:param T: a rectangular standard Young tableaux
:type T: :class:`sage.StandardTableau`
:rtype: :class:`sage.Polyhedron` or None, if the tableaux is not additive (i.e., does not correspond to a cubicle).
"""
# assert rectangular shape
a, b = len(T), len(T[0])
assert all(len(row) == b for row in T), "T should be rectangular"
# set up constraints for a+b-dimensional polyhedron
# - traces should be zero
eqns = [
[0] + [1] * a + [0] * b,
[0] + [0] * a + [1] * b,
]
# - ordering constraints imposed by Weyl chamber
ieqs = [[0] + [0] * i + [1, -1] + [0] * (a - 2 - i) + [0] * b for i in range(a - 1)]
ieqs += [
[0] + [0] * a + [0] * j + [1, -1] + [0] * (b - 2 - j) for j in range(b - 1)
]
# - ordering constraints imposed by cubicle
for i in range(a):
for j in range(b):
for k in range(a):
for l in range(b):
if (i, j) != (k, l):
if T[i][j] >= T[k][l]:
H = [0] * (a + b)
H[i] -= 1
H[a + j] -= 1
H[k] += 1
H[a + l] += 1
ieqs.append([0] + H)
# build sage polyhedron
P = Polyhedron(ieqs=ieqs, eqns=eqns)
# compute codimension
codim = a + b - P.dim()
assert (
codim >= 2
), "Expect codimension to be at least two (because of the two trace constraints)."
return P if codim == 2 else None
@disk_cache
def cubicle_tableaux(a, b):
r"""Return list of tableaux corresponding to cubicles for :math:`a \times b`.
:param a: number of rows
:param b: number of columns
:rtype: list of :class:`sage.StandardTableau`
"""
return [T for T in rect_tableaux(a, b) if cubicle(T)]
def is_dominant(v):
r"""Determine if vector :math:`v` is dominant, i.e., :math:`v_0 \geq v_1 \geq \dots`.
:param v: vector to test.
:rtype: bool.
"""
v = list(v)
return v == sorted(v, reverse=True)
def is_extremal_edge(
dims, V, assert_dominant=True, assert_primitive=True, assert_traceless=True
):
r"""Determine whether given vector is an extremal edge. That is, verify whether :math:`V=(H_1,\dots,H_n)` is
- dominant
- primitive
- traceless
- admissible for the system :math:`C(d_1,\dots,d_n)` of restricted roots for :math:`SU(d_1) \times \dots \times SU(d_n) \to SU(\prod_i d_i)`
:param dims: dimensions :math:`d_1,\dots,d_n` of local unitary group.
:param V: vector of length :math:`\sum_i d_i` to test.
:param assert_dominant: verify that vector is dominant.
:param assert_primitive: verify that vector is primitive.
:param assert_traceless: verify that vector is traceless.
"""
assert len(V) == sum(dims)
# extract components and ensure that they are sorted and traceless
vs = [list(V[sum(dims[:i]) : sum(dims[: i + 1])]) for i in range(len(dims))]
if assert_dominant:
assert all(is_dominant(v) for v in vs), "Expected dominant."
else:
vs = [sorted(v, reverse=True) for v in vs]
if assert_traceless:
assert all(sum(v) == 0 for v in vs), "Expect trace to be zero."
# check that V is primitive
if assert_primitive:
diffs = [x - y for v in vs for (x, y) in zip(v, v[1:])]
c = gcd(diffs)
assert c == 1, (
"Expect vector that is primitive in dual of root lattice, but gcd is %s" % c
)
# build dictionary storing all indices with same sum of components
indices_by_sum = defaultdict(list)
for indices in cartesian_product(list(map(range, dims))):
s = sum(v[i] for (v, i) in zip(vs, indices))
indices_by_sum[s].append(indices)
# build corresponding equations
# - traces equal to zero
eqns = []
for i in range(len(dims)):
before = sum(dims[:i])
after = sum(dims[i + 1 :])
eqn = vector([0] * before + [1] * dims[i] + [0] * after)
eqns.append(eqn)
# - equality constraints (a_i + b_j + ... == a_k + b_l + ...)
for indices in indices_by_sum.values():
for idx1, idx2 in zip(indices, indices[1:]):
eqn = vector(QQ, sum(dims))
for i in range(len(dims)):
offset1 = sum(dims[:i]) + idx1[i]
offset2 = sum(dims[:i]) + idx2[i]
eqn[offset1] += 1
eqn[offset2] -= 1
eqns.append(eqn)
# compute dimension of kernel
codim = sum(dims) - matrix(eqns).rank()
assert (
codim >= 1
), "Kernel should have dimension at least one (since the given vector is contained in it by construction)."
return codim == 1
def is_extremal_edge_ieq(
dims, ieq, assert_dominant=True, assert_primitive=True, assert_traceless=True
):
r"""Check whether given inequality corresponds to an extremal edge (see :func:`is_extremal_edge`).
:param dims: dimensions :math:`d_1,\dots,d_n` of local unitary group.
:param ieq: inequality :math:`(H,c)` to test. We require that :math:`c=0`.
:param assert_dominant: verify that vector is dominant.
:param assert_primitive: verify that vector is primitive.
:param assert_traceless: verify that vector is traceless.
"""
H, c = ieq
assert len(H) == sum(dims)
assert c == 0
# extract parts
hs = [list(H[sum(dims[:i]) : sum(dims[: i + 1])]) for i in range(len(dims))]
# check that last component consists of negative partial sums
h_last_expected = map(lambda xs: -sum(xs), cartesian_product(hs[:-1]))
h_last = H[sum(dims[:-1]) :]
assert sorted(h_last) == sorted(h_last_expected) # return False
# check if the first components form an extremal edge
V = H[: sum(dims[:-1])]
return is_extremal_edge(
dims[:-1],
V,
assert_dominant=assert_dominant,
assert_primitive=assert_primitive,
assert_traceless=assert_traceless,
)
@disk_cache
def _extremal_edges_bipartite(a, b, include_perms=True):
r"""Returns extremal edges for :math:`a \times b`, i.e., the finite list of all vectors :math:`(H_A, H_B)` that are
- dominant
- primitive
- admissible for the system :math:`C(a, b)` of restricted roots for :math:`SU(a) \times SU(b) \to SU(ab)`
"""
# collect extremal edges
edges = set()
Ts = rect_tableaux(a, b)
for i, T in enumerate(Ts):
if i % 100 == 0:
logger.debug("computing extremal edges (%s/%s)", i + 1, len(Ts))
P = cubicle(T)
if P:
edges |= {tuple(ray.vector()) for ray in P.rays()}
# exclude permutations of the parties?
if a == b and not include_perms:
def sort(H):
H_A, H_B = H[:a], H[a:]
H_A, H_B = sorted((H_A, H_B))
return H_A + H_B
edges = {sort(H) for H in edges}
# make edges primitive in dual root lattice
root_system = [["A", a - 1], ["A", b - 1]]
return list(map(lambda e: dual_root_primitive(root_system, e), edges))
@disk_cache
def _extremal_edges_generic(dims, include_perms=True):
"""Generic implementation that works for any dimension tuple."""
from sage.all import Subsets, Polyhedron, vector
# NOTE: An alternative implementation could use solid standard Young tableaux a la http://www.math.rutgers.edu/~zeilberg/mamarim/mamarimhtml/ssyt.html
# basis vector
def e(i, d):
v = [0] * d
v[i] = 1
return v
# weight for given indices
def omega(indices):
return vector(sum(map(e, indices, dims), []))
# compute all restricted roots (up to sign)
basis = map(tuple, cartesian_product(list(map(range, dims))))
restricted_roots = []
for i, j in Subsets(basis, 2):
alpha = tuple(omega(i) - omega(j))
restricted_roots.append(alpha)
# compute inequalities for Weyl chamber
ieqs = []
for i in range(len(dims)):
for j in range(dims[i] - 1):
v = [0] * dims[i]
v[j] = 1
v[j + 1] = -1
ieqs.append([0] + [0] * sum(dims[:i]) + v + [0] * sum(dims[i + 1 :]))
# choose all possible subsets of equations
num_eqns = sum(d - 1 for d in dims) - 1
subsets = Subsets(restricted_roots, num_eqns)
logger.debug(
"%s restricted roots => %s subsets each containing %s",
len(restricted_roots),
subsets.cardinality(),
num_eqns,
)
rays = set()
for roots in subsets:
# trace equations
eqns = []
for i in range(len(dims)):
eqns.append(
[-1] + [0] * sum(dims[:i]) + [1] * dims[i] + [0] * sum(dims[i + 1 :])
)
# add orthogonality constraints
for root in roots:
eqns.append((0,) + root)
# if the space of solutions is one-dimensional, we have found an extremal edge
P = Polyhedron(ieqs=ieqs, eqns=eqns)
if P.dim() == 1:
assert len(P.rays()) == 1
rays.add(tuple(P.rays()[0]))
logger.debug("%s distinct extreme rays found", len(rays))
# remove permutations?
if not include_perms:
stab = StabilizerGroup(dims)
ray_nfs = set()
for ray in rays:
H = [ray[sum(dims[:i]) : sum(dims[: i + 1])] for i in range(len(dims))]
ray_nf = sum(stab.normal_form(H), ())
ray_nfs.add(ray_nf)
rays = ray_nfs
# post-process edges
# make edges primitive in dual root lattice
root_system = [["A", d - 1] for d in dims]
return list(map(lambda r: dual_root_primitive(root_system, r), rays))
def extremal_edges(dims, include_perms=True, algorithm=None):
r"""Returns extremal edges for ``dims`` (see :func:`is_extremal_edge`).
:param dims: dimensions :math:`d_1,\dots,d_n` of local unitary group.
:param algorithm: ``None``, ``'bipartite'``, or ``'generic'``.
:rtype: list of :class:`sage.vector`
"""
# use optimized bipartite implementation (if possible)
if algorithm is None:
algorithm = "bipartite" if len(dims) == 2 else "generic"
elif algorithm == "bipartite":
assert len(dims) == 2
if algorithm == "bipartite":
return _extremal_edges_bipartite(dims[0], dims[1], include_perms=include_perms)
elif algorithm == "generic":
return _extremal_edges_generic(dims, include_perms=include_perms)
raise Exception('Unknown algorithm "%s"' % algorithm)
def perms_of_length(n, length):
"""Return all permutations in :math:`S_n` of the given length (i.e., with the specified number of inversion).
This uses the algorithm in `<http://webhome.cs.uvic.ca/~ruskey/Publications/Inversion/InversionCAT.pdf>`_.
:param n: specifies the permutation group :math:`S_n`.
:param length: number of inversions.
:rtype: list of :class:`sage.Permutation`
"""
result = []
def gen(S, l, suffix=[]):
if l == 0:
result.append(Permutation(S + suffix))
return
n = len(S)
bin = (n - 1) * (n - 2) / 2
for i in range(n):
if n - (i + 1) <= l <= bin + n - (i + 1):
x = S[i]
gen(S[0:i] + S[i + 1 :], l - n + (i + 1), [x] + suffix)
gen(S=list(range(1, n + 1)), l=length)
return result
def length_tuples(dims, total):
r"""Return integer tuples :math:`(\ell_1, ..., \ell_n)` such that
- each component :math:`\ell_i` is in :math:`\{0,\dots,{d_i \choose 2}\}`
- their sum is equal to ``total``
:param dims: dimensions :math:`d_1,\dots,d_n`.
:param total: total length
:rtype: generator of tuples of integers
"""
# compute maximal lengths
max_lengths = [d * (d - 1) // 2 for d in dims]
ranges = [range(0, min(m + 1, total + 1)) for m in max_lengths[:-1]]
for most in cartesian_product(ranges):
last = total - sum(most)
if last >= 0 and last <= max_lengths[-1]:
yield tuple(most) + (last,)
def is_shuffle(pi, v):
r"""Check if the permutation ``pi`` is a shuffle with respect to the dominant element ``v``, i.e.,
.. math::
v_i = v_{i+1} \Rightarrow \pi_i < pi_{i+1}.
:param pi: the permutation :math:`pi`.
:param v: the dominant vector :math:`v`.
:rtype: bool
"""
assert is_dominant(v)
return all(pi[n] < pi[n + 1] for n in range(len(v) - 1) if v[n] == v[n + 1])
def is_antishuffle(pi, v):
r"""Check if the permutation ``pi`` is an antishuffle with respect to the dominant element ``v``, i.e.,
.. math::
v_i = v_{i+1} \Rightarrow \pi_i > pi_{i+1}.
:param pi: the permutation :math:`pi`.
:param v: the dominant vector :math:`v`.
:rtype: bool
"""
assert is_dominant(v)
return all(pi[n] > pi[n + 1] for n in range(len(v) - 1) if v[n] == v[n + 1])
def shuffles(v, length):
r"""Return all permutations in :math:`S_{\lvert v \rvert}` that are shuffles with respect to the dominant element ``v`` (see :func:`is_shuffle`) and have the desired length.
:param v: the dominant vector :math:`v`.
:param length: the desired length.
:rtype: :class:`sage.Permutation`
"""
result = []
assert is_dominant(v)
def gen(S, l, suffix=[]):
if not S and l == 0:
result.append(Permutation(S + suffix))
return
n = len(S)
bin = (n - 1) * (n - 2) / 2
for i in range(n):
if n - (i + 1) <= l <= bin + n - (i + 1):
x = S[i]
# filter non-shuffles
if suffix and v[n - 1] == v[n]:
# if pi(n) > pi(n + 1)
if x > suffix[0]:
continue
gen(S[0:i] + S[i + 1 :], l - n + (i + 1), [x] + suffix)
gen(S=list(range(1, len(v) + 1)), l=length)
return result
def antishuffles(v, antilength):
r"""Return all permutations in :math:`S_{\lvert v \rvert}` that are antishuffles with respect to the dominant element ``v`` (see :func:`is_antishuffle`) and have the desired antilength.
:param v: the dominant vector :math:`v`.
:param antilength: the desired antilength.
:rtype: :class:`sage.Permutation`
"""
d = len(v)
pis = shuffles(v, length=antilength)
return [Permutation([d + 1 - pi[i] for i in range(d)]) for pi in pis]
def perm_action(pi, v):
r"""Left action of a permutation :math:`\pi \in S_n` on :math:`v \in \mathbb R^n`:
.. math::
(\pi \cdot v)_i = v_{\pi^{-1}(i)}
i.e.
.. math::
(\pi \cdot v)_{\pi(i)} = v_i.
:param pi: the permutation :math:`\pi`.
:param v: the vector :math:`v`.
:rtype: :class:`sage.vector`
"""
# make zero-based
pi = [x - 1 for x in pi]
assert len(pi) == len(v) and sorted(pi) == list(range(len(v)))
# permute components
d = len(pi)
v_out = [0] * d
for i in range(d):
v_out[pi[i]] = v[i]
return tuple(v_out)
class StabilizerGroup(object):
r"""Stabilizer group :math:`S_v \subseteq S_{\lvert v \rvert}` of a vector :math:`v`.
:param v: the vector.
"""
def __init__(self, v):
#: The vector defining the stabilizer group.
self.v = v
#: The blocks of indices of components that are equal.
self.blocks = defaultdict(list)
for k, d in enumerate(v):
self.blocks[d].append(k)
self.blocks = list(self.blocks.values())
def normal_form(self, hs):
"""Returns the unique normal form of a vector ``hs`` in its orbit under the stabilizer group.
:param hs: a vector of the same length as :attr:`v`.
:rtype: tuple
"""
assert len(hs) == len(self.v)
hs_nf = [None] * len(self.v)
for indices in self.blocks:
# hs[indices] = sorted(hs[indices])
group = sorted([hs[k] for k in indices])
for j, k in enumerate(indices):
hs_nf[k] = group[j]
return tuple(hs_nf)
def orbit(self, hs_iterable):
"""Returns the orbit of a vectors ``hs_iterable`` under the stabilizer group.
:param hs_iterable: a collection of vectors
:rtype: set of tuples
"""
# generate all permutations of indices
blocks_perms = cartesian_product(list(map(Permutations, self.blocks)))
# apply all permutations
hs_perm = [None] * len(self.v)
orbit = set()
for blocks_perm in blocks_perms:
for hs in hs_iterable:
# permute according to (idx1 -> idx2) in each block
for (idx1, idx2) in zip(self.blocks, blocks_perm):
for (i, j) in zip(idx1, idx2):
hs_perm[j] = hs[i]
orbit.add(tuple(hs_perm))
return orbit
|
import discord
from discord.ext import commands
from sys import argv
class Links:
"""
Commands for easily linking to projects.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.command()
async def builds(self):
"""test"""
await self.bot.say("http://github.com/thomleg50/Atmosphere/releases")
def setup(bot):
bot.add_cog(Links(bot))
|
email = input("Please enter your email id: ").strip()
user_name = email[:email.index("@")]
domain_name = email[email.index("@")+1:]
output = f"Your username is {user_name} and your domain name is {domain_name}"
print(output)
|
# prefix our test function names with test_
def sum_nums(x,y):
return x+y
def test_sum_nums():
assert sum_nums(2,3) == 5
|
#!/usr/bin/env python
# Copyright 2013,2016 The Font Bakery Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontTools.ttLib import TTFont
script = __import__("fontbakery-nametable-from-filename")
class NameTableFromTTFName(unittest.TestCase):
def _font_renaming(self, f_path):
"""The test fonts have been generated from Glyphsapp and conform
to the googlefonts nametable spec. The test should pass if the new
nametable matches the test font's name table."""
fonts_paths = [os.path.join(f_path, f) for f in os.listdir(f_path)
if '.ttf' in f]
for font_path in fonts_paths:
font = TTFont(font_path)
old_nametable = font['name']
new_nametable = script.nametable_from_filename(font_path)
for field in script.REQUIRED_FIELDS:
if old_nametable.getName(*field):
enc = old_nametable.getName(*field).getEncoding()
self.assertEqual(
str(old_nametable.getName(*field)).decode(enc),
str(new_nametable.getName(*field)).decode(enc),
)
def test_nunito_renaming(self):
"""Nunito Chosen because it has another family Nunito Heavy and a lot
of weights"""
f_path = os.path.join('data', 'test', 'nunito')
self._font_renaming(f_path)
def test_cabin_renaming(self):
"""Cabin chosen because it has a seperate Condensed family"""
f_path = os.path.join('data', 'test', 'cabin')
self._font_renaming(f_path)
def test_glyphsapp_family_sans_export(self):
"""The ultimate test. Can this naming tool repoduce Google Font's
Naming schema.
Source repo here: https://github.com/davelab6/glyphs-export"""
f_path = os.path.join('data', 'test', 'familysans')
self._font_renaming(f_path)
if __name__ == '__main__':
unittest.main()
|
""" use unittesting module as testing framework"""
import unittest
import json
from app.api.v1.models.auth import userModel
from app.api.v1.models.category import categoryModel
from app.api.v1.models.product import productModel
from app.api.v1.models.sales import salesModel
from .dummy_data import users, category, products, sales
from ... import create_app
class BaseTest(unittest.TestCase):
"""
Base Class for all tests to inherit
"""
def setUp(self):
self.app = create_app(config_name='testing')
self.client = self.app.test_client()
# to handle error in tests
self.client.post('api/v1/register', data=json.dumps(users[0]),
content_type='application/json')
user = self.client.post('api/v1/login', data=json.dumps(users[0]),
content_type='application/json')
attendant_token = user.get_json().get('access_token')
# handle error in tests
self.client.post('api/v1/register', data=json.dumps(users[9]),
content_type='application/json')
user = self.client.post('api/v1/login', data=json.dumps(users[9]),
content_type='application/json')
admin_token = user.get_json().get('access_token')
self.admin_headers = {
'Authorization': 'Bearer {}'.format(admin_token),
'Content-Type': 'application/json'}
self.attendant_headers = {
'Authorization': 'Bearer {}'.format(attendant_token),
'Content-Type': 'application/json'
}
# create a test category
categoryModel.add_category(category[0])
# create a test prodct
productModel.add_product(products[0])
# create a test sale
salesModel.add_sales(sales[0])
def tearDown(self):
with self.app.app_context():
categoryModel.drop()
productModel.drop()
salesModel.drop()
userModel.drop()
|
import string
from pyawsstarter import LambdaBaseEnv
from .random_prefix_service import RandomPrefixService
from examplecommon import WordService
class WordsNotFoundException(Exception):
pass
class WordStep(LambdaBaseEnv):
_letters = string.ascii_lowercase
_number_letters = len(string.ascii_lowercase)
def __init__(self):
super().__init__(
{
'MIN_CHARS': int,
'MAX_CHARS': int,
'WORD_SERVICE_BASE': str
}
)
self._random_prefix_service = RandomPrefixService(
self.get_parameter('MIN_CHARS'),
self.get_parameter('MAX_CHARS')
)
self._word_service = WordService(self.get_parameter('WORD_SERVICE_BASE'))
def handle(self, event, context) -> dict:
# uses prefix if give, else create a random one
prefix = event.get('prefix') or self._random_prefix_service.random_prefix()
self._logger.info('Searching for word prefix', prefix=prefix)
words = self._word_service.starts_with(prefix)
if not words:
raise WordsNotFoundException('No words found for prefix: '.format(prefix))
event.update(
{
'startswith': {
'prefix': prefix,
'words': words
}
}
)
return event
|
"""
@author: Chandan Sharma
@GitHub: https://github.com/devchandansh/
"""
"""
================================================================
A Robot moves in a Plane starting from the origin point (0,0).
The robot can move toward UP, DOWN, LEFT, RIGHT.
The trace of Robot movement is as given following:
Assumed the fixed movements:
UP 5
DOWN 3
LEFT 3
RIGHT 2
The numbers after directions are steps.
This program is to compute the distance current position after sequence of movements.
================================================================
"""
import math
class Robot:
def __init__(self):
""" Constructor Function """
self.start_point = None
self.start_point_x = 0
self.start_point_y = 0
self.steps_up = 5
self.steps_down = 3
self.steps_left = 3
self.steps_right = 2
self.coordinate_points = []
# set the start Point
self.start_point = (self.start_point_x, self.start_point_y)
self.coordinate_points.append(self.start_point)
def getStartPoint(self):
""" Get the Starting Point of Robot """
print("Start Point:", self.start_point)
return (self.start_point)
def getCoordinates(self):
return self.coordinate_points
def moveUp(self, steps=None):
""" To Move Up """
if steps is None:
steps = self.steps_up
self.start_point_y += steps
self.coordinate_points.append((self.start_point_x, self.start_point_y))
print("Move Up:", steps)
def moveDown(self, steps=None):
""" To Move Dowm """
if steps is None:
steps = self.steps_down
self.start_point_y -= steps
self.coordinate_points.append((self.start_point_x, self.start_point_y))
print("Move Down:", steps)
def moveLeft(self, steps=None):
""" To Move Left """
if steps is None:
steps = self.steps_left
self.start_point_x -= steps
self.coordinate_points.append((self.start_point_x, self.start_point_y))
print("Move Left:", steps)
def moveRight(self, steps=None):
""" To Move Right """
if steps is None:
steps = self.steps_right
self.start_point_x += steps
self.coordinate_points.append((self.start_point_x, self.start_point_y))
print("Move Right:", steps)
def get_position(self):
""" Get the Current Position """
return (self.start_point_x, self.start_point_y)
def get_distance(self):
# Starting Point
start_x = self.start_point[0]
start_y = self.start_point[1]
# Ending Point
end_pos = self.coordinate_points[len(self.coordinate_points)-1]
end_x = end_pos[0]
end_y = end_pos[1]
# Calculation
distance = math.sqrt( math.pow((end_x - start_x), 2) + math.pow((end_y - start_y),2) )
print("---------------------------")
print("Distance Calculation:")
print("Starting Point: ", self.start_point)
print("Coordinates Points are: ", self.coordinate_points)
print("distance:", distance)
print("---------------------------")
return distance
# Execute the Program:
objRobot = Robot()
objRobot.moveUp()
objRobot.moveDown()
objRobot.moveLeft()
objRobot.moveRight()
distance = objRobot.get_distance()
print(distance)
"""
===========================================
# Output:
===========================================
Move Up: 5
Move Down: 3
Move Left: 3
Move Right: 2
---------------------------
Distance Calculation:
Starting Point: (0, 0)
Coordinates Points are: [(0, 0), (0, 5), (0, 2), (-3, 2), (-1, 2)]
distance: 2.23606797749979
---------------------------
2.23606797749979
""" |
# -*- coding: utf-8 -*-
from website.citations.providers import CitationsProvider
from addons.zotero.serializer import ZoteroSerializer
class ZoteroCitationsProvider(CitationsProvider):
serializer = ZoteroSerializer
provider_name = 'zotero'
def _folder_to_dict(self, data):
return dict(
name=data['data'].get('name'),
list_id=data['data'].get('key'),
parent_id=data['data'].get('parentCollection'),
id=data['data'].get('key'),
)
|
# C2SMART Lab, NYU
# NCHRP 03-137
# @file HB_Ext_Online.py
# @author Fan Zuo
# @author Di Yang
# @date 2020-10-18
import pandas as pd
import numpy as np
from shapely.geometry import Polygon
import math
import time
import multiprocessing as mp
def main(dataset, hb_thr):
"""The main processing function.
Keyword arguments:
>>> dataset: The loaded trajectory data generated by TCA.
>>> hb_thr: The hard braking threshold, float.
RETURN: Time of detected hard braking, Locations of involved vehicles,
the deceleration value, and the merged event index
"""
# Read the whole file using useful columns.
df = pd.read_csv(dataset, usecols=['Vehicle_ID', 'transtime', 'X', 'Y', 'Speed', 'Heading', 'Avg_Acceleration'])
df = df.sort_values(by=['transtime', 'Vehicle_ID'])
print("Before drop duplicates, data size is:", len(df))
df = df.drop_duplicates(subset=['X', 'Y', 'Speed', 'Heading', 'transtime'], keep="first")
print("After drop duplicates, data size is:", len(df))
df.index = pd.RangeIndex(start=0, stop=len(df), step=1)
df['HB'] = df['Avg_Acceleration']
# Extracting the hard braking events by pre-set threshold
df['HB'] = df['HB'].apply(lambda x: 1 if(x<-hb_thr and x>-200) else np.nan)
df = df.dropna(subset=['HB'])
df = df.drop(columns=['Speed', 'Heading'])
df = df.sort_values(by=['Vehicle_ID', 'transtime'])
df.index = pd.RangeIndex(start=0, stop=len(df), step=1)
df.to_csv("HB_Online_"+traj_file[:-4]+".csv")
if __name__ == "__main__":
program_st = time.time()
print("******************* Start Program *******************")
print("Start time %s" % (time.strftime('%X', time.localtime(program_st))))
traj_file = input("Please input the name of the trajectory file(*.csv):")
hb_threshold = abs(float("{:.5f}".format(input("Please input the hard braking threshold(fpss, float): "))))
main(traj_file, hb_threshold)
ed_time = time.time()
print("End time %s (%f)" % (time.strftime('%X', time.localtime(ed_time)), (ed_time - program_st)))
print("******************* End Program *******************") |
import os
import pickle
data_dir = os.path.dirname(os.path.realpath(__file__))
def get_file(name):
full_path = os.path.join(data_dir, name)
if not os.path.exists(full_path):
return None
else:
return full_path
def load_predict_model_from_pkl(filename):
''' The pickled models are assumed to have a predict method'''
pkl = get_file(filename)
if pkl:
with open(pkl, 'rb') as pkl:
return pickle.load(pkl).predict
else:
return None
def load_transform_model_from_pkl(filename):
''' The pickled models are assumed to have a predict method '''
pkl = get_file(filename)
if pkl:
with open(pkl, 'rb') as pkl:
return pickle.load(pkl).transform
else:
return None
|
from urllib.parse import urlparse
from flask import session
from app.models import User
from app.models import TwoFactor
def is_two_factor_enabled() -> bool:
# 2단계 인증 정보 데이터베이스에서 검색하기
two_factor = TwoFactor.query.filter_by(
user_idx=session['user']['idx']
).first()
# 검색결과가 있다면
if two_factor is not None:
# 활성화 상태
return True
else:
# 비활성화 상태
return False
def is_two_factor_passed() -> bool:
# 세션에서 2단계 인증 정보 가져오기
two_factor = session.get("two_factor", None)
# 인증정보가 없다면
if two_factor is None:
# 통과한 상태가 아님
return False
# 2단계 인증이 활성화된 상태라면
if is_two_factor_enabled():
# 2단계 인증이 통과한 상황에만 통과로 인정
return two_factor['passed']
# 2단계 인증이 비활성화된 상태이므로 통과로 판정
return True
def is_login(no_two_factor: bool = False) -> bool:
# 세션에 저장된 유저 정보 불러오기
login_user = session.get("user", None)
# 저장된 값이 없다면
if login_user is None:
# 로그인 상태 아님
return False
# 데이터베이스에서 유저 정보 불러오기
user = User.query.filter_by(
idx=session['user']['idx']
).first()
# 데이터베이스에서 발견된 유저 정보가 있다면
if user is not None:
# 세션에 저장된 유저 정보 업데이트 하기
session['user'] = {
"idx": user.idx,
"email": user.email,
"nickname": user.nickname,
}
else:
# 세션에 저장된 모든 값 삭제하기
for key in list(session.keys()):
del session[key]
# 로그인 상태 아님
return False
if no_two_factor:
# 로그인 상태가 맞음
return True
else:
# 2단계 인증을 통과했으면 로그인 상태가 맞음
return is_two_factor_passed()
def url_verifier(url: str, fallback: str = "") -> str:
# 검사할 URL 을 파싱하기
url = urlparse(url=url)
# 허용할 식별자 목록
allow_schemes = [
"http",
"https",
]
# 파싱한 URL 의 식별자가 허용하는 목록에 없다면
if url.scheme not in allow_schemes:
# 대체 값 리턴하기
return fallback
# 검사한 URL 리턴하기
return url.geturl()
|
from functools import partial
from typing import Callable, Dict, Tuple
import jax
import jax.numpy as jnp
@partial(jax.jit, static_argnums=(0))
def marginal_likelihood(
prior_params: Tuple[Callable, Callable],
params: Dict,
Xtrain: jnp.ndarray,
Ytrain: jnp.ndarray,
) -> float:
# unpack params
(mu_f, cov_f) = prior_params
# ==========================
# 1. GP Prior, mu(), cov(,)
# ==========================
mu_x = mu_f(Ytrain)
Kxx = cov_f(params, Xtrain, Xtrain)
# ===========================
# 2. GP Likelihood
# ===========================
K_gp = Kxx + (params["likelihood_noise"] + 1e-6) * jnp.eye(Kxx.shape[0])
# ===========================
# 3. Log Probability
# ===========================
log_prob = jax.scipy.stats.multivariate_normal.logpdf(
x=Ytrain.T, mean=mu_x, cov=K_gp
)
# Negative Marginal log-likelihood
return -log_prob.sum()
|
import _init_paths
import argparse
import yaml
import os.path as osp
from attrdict import AttrDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.mini_imageNet import MiniImageNet
from utils.samplers import CategoriesSampler
from model.convnet import Convnet
from utils.utils import pprint, set_gpu, ensure_path, Averager, Timer, count_acc, euclidean_metric, copyModel, setup_seed
def getDataloader():
trainset = MiniImageNet(cfg.datapath, 'train')
train_sampler = CategoriesSampler(trainset.label, 100,
cfg.train.train_way, cfg.train.shot + cfg.train.query)
train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler,
num_workers=16, pin_memory=True)
valset = MiniImageNet(cfg.datapath, 'val')
val_sampler = CategoriesSampler(valset.label, 400,
cfg.train.test_way, cfg.train.shot + cfg.train.query)
val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler,
num_workers=16, pin_memory=True)
return train_loader, val_loader
def initConfig():
with open(args.config, 'r') as f:
cfg = AttrDict(yaml.load(f))
if not args.resume:
ensure_path(cfg.save_path)
if args.seed != None:
setup_seed(args.seed)
else:
setup_seed(cfg.seed)
set_gpu(cfg.train.gpu)
return cfg
def initTrain(isResume=False):
model = nn.DataParallel(Convnet()).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
start_epoch = 1
trlog = {}
trlog['args'] = vars(args)
trlog['cfg'] = vars(cfg)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
if isResume:
checkpoint = torch.load(osp.join(cfg.save_path, "epoch-last.pth"))
model = copyModel(checkpoint["model"], nn.DataParallel(Convnet())).cuda()
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_schedule"])
start_epoch = checkpoint["epoch"] + 1
trlog = torch.load(osp.join(cfg.save_path, 'trlog'))
return model, optimizer, lr_scheduler, start_epoch, trlog
def train_epoch(cfg, model, optimizer, epoch, train_loader):
model.train()
tl = Averager()
ta = Averager()
for i, batch in enumerate(train_loader, 1):
data, _ = [_.cuda() for _ in batch]
p = cfg.train.shot * cfg.train.train_way
data_shot, data_query = data[:p], data[p:]
proto = model(data_shot)
proto = proto.reshape(cfg.train.shot, cfg.train.train_way, -1).mean(dim=0)
label = torch.arange(cfg.train.train_way).repeat(cfg.train.query)
label = label.type(torch.cuda.LongTensor)
logits = euclidean_metric(model(data_query), proto)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
print('epoch {}, train {}/{}, loss={:.4f} acc={:.4f}'
.format(epoch, i, len(train_loader), loss.item(), acc))
tl.add(loss.item())
ta.add(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
p = None; proto = None; logits = None; loss = None
tl = tl.item()
ta = ta.item()
return tl, ta
def eval(cfg, model, val_loader):
model.eval()
vl = Averager()
va = Averager()
for i, batch in enumerate(val_loader, 1):
data, _ = [_.cuda() for _ in batch]
p = cfg.train.shot * cfg.train.test_way
data_shot, data_query = data[:p], data[p:]
proto = model(data_shot)
proto = proto.reshape(cfg.train.shot, cfg.train.test_way, -1).mean(dim=0)
label = torch.arange(cfg.train.test_way).repeat(cfg.train.query)
label = label.type(torch.cuda.LongTensor)
logits = euclidean_metric(model(data_query), proto)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
vl.add(loss.item())
va.add(acc)
p = None; proto = None; logits = None; loss = None
vl = vl.item()
va = va.item()
return vl, va
def train(cfg, model, optimizer, lr_scheduler, train_loader, val_loader, start_epoch=1):
def save_model(name):
checkpoint = {
"model": model.module.state_dict(),
'optimizer': optimizer.state_dict(),
"epoch": epoch,
'lr_schedule': lr_scheduler.state_dict()
}
torch.save(checkpoint, osp.join(cfg.save_path, name + '.pth'))
# torch.save(model.module.state_dict(), osp.join(cfg.save_path, name + '.pth'))
timer = Timer()
for epoch in range(start_epoch, cfg.train.max_epoch + 1):
# torch.cuda.empty_cache()
lr_scheduler.step()
tl, ta = train_epoch(cfg, model, optimizer, epoch, train_loader)
vl, va = eval(cfg, model, val_loader)
print('epoch {}, val, loss={:.4f} acc={:.4f}'.format(epoch, vl, va))
if va > trlog['max_acc']:
trlog['max_acc'] = va
save_model('max-acc')
trlog['train_loss'].append(tl)
trlog['train_acc'].append(ta)
trlog['val_loss'].append(vl)
trlog['val_acc'].append(va)
torch.save(trlog, osp.join(cfg.save_path, 'trlog'))
save_model('epoch-last')
if epoch % cfg.train.save_epoch == 0:
save_model('epoch-{}'.format(epoch))
print('ETA:{}/{}'.format(timer.measure(), timer.measure((epoch-start_epoch + 1) / (cfg.train.max_epoch-start_epoch))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="exps/exp-v1/config.yaml")
parser.add_argument('--seed', default=None)
parser.add_argument('--resume', action="store_true", default=False)
args = parser.parse_args()
pprint(vars(args))
cfg = initConfig()
model, optimizer, lr_scheduler, start_epoch, trlog = initTrain(args.resume)
train_loader, val_loader = getDataloader()
train(cfg, model, optimizer, lr_scheduler, train_loader, val_loader, start_epoch)
|
n=int(input()) #2. Snake
s=[]
b=[]
matrix=[]
for j in range(n):
row=list(input())
matrix.append(row)
if 'S' in row:
s.append([j,row.index('S')])
if 'B' in row:
b.append([j,row.index('B')])
moves={'up':(-1,0),'down':(1,0),'left':(0,-1),'right':(0,1)}
food=0
while True:
move=input()
next_row=s[0][0]+moves[move][0]
next_col = s[0][1] + moves[move][1]
matrix[s[0][0]][s[0][1]]='.'
if 0<=next_row<n and 0<=next_col<n:
if matrix[next_row][next_col]=='*':
food+=1
matrix[next_row][next_col]='S'
s=[[next_row,next_col]]
if food>=10:
print("You won! You fed the snake.")
break
elif matrix[next_row][next_col]=='B':
if matrix[next_row][next_col]==matrix[b[0][0]][b[0][1]]:
matrix[b[1][0]][b[1][1]]='S'
s=[[b[1][0],b[1][1]]]
else:
matrix[b[0][0]][b[0][1]] = 'S'
s = [[b[0][0], b[0][1]]]
matrix[next_row][next_col] = '.'
else:
matrix[next_row][next_col] = 'S'
s=[[next_row,next_col]]
else:
print('Game over!')
break
print(f'Food eaten: {food}')
print('\n'.join(''.join(i) for i in matrix)) |
__all__ = ["interpreter", "analysis"] |
import json
def shlex(command=None, shell=None):
shell = shell or '/bin/sh'
shlexed = [shell]
if command:
shlexed.extend(['-c', command])
return shlexed
|
import matplotlib.pyplot as plt
import time
from genetic.background_function import background_function
from vis.ScatterVisualizer import ScatterVisualizer
class Particle2DVis(ScatterVisualizer):
def __init__(
self,
n: float,
num_runs: int,
interactive: bool = True,
x_limit: float = 0.0,
y_limit: float = 0.0,
offset: float = 0.0,
colour_bar: bool = True,
func_name: str = "",
):
super().__init__(
interactive=interactive,
xlim=x_limit,
ylim=y_limit,
offset=offset,
log_scale=False,
)
self.set_limits(int(n))
self.num_runs = num_runs
self.colour_bar = colour_bar
self.colour_bar_set = False
self.eval_steps = None
self.bg_function = background_function(func_name, n)
def animate(self, solution):
plt.imshow(
self.bg_function,
extent=[
-self.bg_function.shape[1] / 2.0,
self.bg_function.shape[1] / 2.0,
-self.bg_function.shape[0] / 2.0,
self.bg_function.shape[0] / 2.0,
],
cmap="viridis",
)
if self.colour_bar and not self.colour_bar_set:
plt.colorbar()
point_size = 2.5
self.my_plot.set_sizes([point_size] * len(solution))
self.colour_bar_set = True
self.my_plot.set_offsets(solution)
self.fig.canvas.draw()
|
import iniparse
from ram.osutils import TrySubmit
# hack to force iniparse to return empty string for non-present keys.
setattr(iniparse.config, 'Undefined', lambda name, namespace: '')
from . import SyncedDict
class _IniConfig(SyncedDict):
def __init__(self, dirname, section, readonly, delblank=False, createns=False):
self.filename = dirname
self.section = section
self.createns = createns
try:
self.ini_conf = iniparse.INIConfig(open(self.filename))
except IOError:
self.ini_conf = iniparse.INIConfig()
super(_IniConfig, self).__init__(readonly=readonly, delblank=delblank)
def __read__(self):
if not self.section in self.ini_conf._sections:
if self.createns:
self.ini_conf._new_namespace(self.section)
else:
raise IOError("Cannot find section `%s` in config file." % self.section)
return self.ini_conf[self.section]
def __sync__(self):
if not TrySubmit(self.filename, [str(self.ini_conf) + '\n']):
raise IOError("Failed to update `%s`." % self.filename)
from . import configopen
cfgopen = configopen(_IniConfig, mergepath=False)
|
import json
import olefile
import pandas as pd
import time
import os
from .logstuff import get_logger
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
logger = get_logger('cache_json')
def write_json_cache(cache, cache_filename):
'''
writes object to human readable JSON cache, forces to STR when necessary to seamlessly handle date objects
:param cache: object to cache
:param cache_filename: filename to use
:return: nothing
'''
fp = open(cache_filename, 'w')
json.dump(cache, fp, indent=2, default=str)
fp.close()
logger.debug(f"wrote to {cache_filename}")
def read_json_cache(cache_filename, max_cache_age=600):
'''
reads from specified filename, returns empty dictionary if the file is too old
:param cache_filename: json filename
:param max_cache_age: maximum age in seconds
:return: object (usually a dictionary) read from cache, empty dictionary if file is not found or too old
'''
try:
if os.path.isfile(cache_filename):
st = os.stat(cache_filename)
age = (time.time() - st.st_mtime)
else:
logger.debug(f"could not read a {cache_filename}")
age = 999999999999999
if age < max_cache_age:
fp = open(cache_filename, 'r')
cache = json.loads(fp.read())
fp.close()
logger.debug(f"read {round(age,1)} min old {cache_filename}")
else:
logger.debug(f"{round(age,1)} min old {cache_filename} needs to be recreated")
cache = {}
except Exception as e:
print(e)
logger.error(e)
cache = {}
return cache
#
# def xlsfile_to_df(local_filename):
# ole = olefile.OleFileIO(local_filename)
# df = pd.read_excel(ole.openstream('Workbook'), engine='xlrd')
# return df
#
#
# def safe_rm_file(local_filename):
# try:
# os.remove(local_filename)
# except OSError as e:
# print(f'failed to remove {local_filename} {e}')
|
from qtpy.QtCore import QObject, QLocale
from pymodaq.daq_utils.gui_utils.dock import DockArea
from pymodaq.daq_utils.managers.action_manager import ActionManager
from pymodaq.daq_utils.managers.parameter_manager import ParameterManager
from pyqtgraph.dockarea import DockArea
from qtpy import QtCore, QtWidgets
class CustomApp(QObject, ActionManager, ParameterManager):
"""
Implements the MixIns ActionManager and ParameterManager methods and attributes, you have to subclass it and make
concrete implementation of a given number of methods:
* setup_actions: mandatory, see ActionManager
* value_changed: non mandatory, see ParameterManager
* child_added: non mandatory, see ParameterManager
* param_deleted: non mandatory, see ParameterManager
* setup_docks: mandatory
* setup_menu: non mandatory
* connect_things: mandatory
"""
# custom signal that will be fired sometimes. Could be connected to an external object method or an internal method
log_signal = QtCore.Signal(str)
# list of dicts enabling the settings tree on the user interface
params = []
def __init__(self, dockarea: DockArea, dashboard=None):
QObject.__init__(self)
ActionManager.__init__(self)
ParameterManager.__init__(self)
QLocale.setDefault(QLocale(QLocale.English, QLocale.UnitedStates))
if not isinstance(dockarea, DockArea):
raise Exception('no valid parent container, expected a DockArea')
self.dockarea = dockarea
self.mainwindow = dockarea.parent()
self.dashboard = dashboard
self.docks = dict([])
self.statusbar = None
self._toolbar = QtWidgets.QToolBar()
if self.mainwindow is not None:
self.mainwindow.addToolBar(self._toolbar)
self.statusbar = self.mainwindow.statusBar()
self.set_toolbar(self._toolbar)
def setup_ui(self):
self.setup_docks()
self.setup_actions() # see ActionManager MixIn class
self.setup_menu()
self.connect_things()
def setup_docks(self):
"""
Mandatory method to be subclassed to setup the docks layout
for instance:
self.docks['ADock'] = gutils.Dock('ADock name)
self.dockarea.addDock(self.docks['ADock"])
self.docks['AnotherDock'] = gutils.Dock('AnotherDock name)
self.dockarea.addDock(self.docks['AnotherDock"], 'bottom', self.docks['ADock"])
See Also
########
pyqtgraph.dockarea.Dock
"""
raise NotImplementedError
def setup_menu(self):
"""
Non mandatory method to be subclassed in order to create a menubar
create menu for actions contained into the self._actions, for instance:
For instance:
file_menu = self._menubar.addMenu('File')
self.affect_to('load', file_menu)
self.affect_to('save', file_menu)
file_menu.addSeparator()
self.affect_to('quit', file_menu)
See Also
--------
pymodaq.daq_utils.managers.action_manager.ActionManager
"""
pass
def connect_things(self):
raise NotImplementedError
@property
def modules_manager(self):
if self.dashboard is not None:
return self.dashboard.modules_manager
|
from functools import reduce
# a dynamic programming solution
def calculate_largest_square_filled_first_approach(matrix):
"""
A method that calculates the largest square filled with only 1's of a given binary matrix (not space-optimized).
Problem description: https://practice.geeksforgeeks.org/problems/largest-square-formed-in-a-matrix/0
time complexity: O(n*m)
space complexity: O(n*m)
Parameters
----------
matrix : int[[]]
a 2-dimensional list
Returns
-------
int
size of the largest square
"""
n = len(matrix)
assert n >= 1
m = len(matrix[0])
assert m >= 1
arr = [[None for _ in range(m)] for _ in range(n)]
for i in range(n):
for j in range(m):
if matrix[i][j] == 0:
arr[i][j] = 0
else:
if i == 0 or j == 0:
arr[i][j] = 1
else:
arr[i][j] = min([
arr[i-1][j],
arr[i][j-1],
arr[i-1][j-1]
]) + 1
return max(reduce(lambda e, f: e + f, arr, []))
def calculate_largest_square_filled_space_optimized1(matrix):
"""
A method that calculates the largest square filled with only 1's of a given binary matrix (space-optimized 1/2).
Problem description: https://practice.geeksforgeeks.org/problems/largest-square-formed-in-a-matrix/0
time complexity: O(n*m)
space complexity: O(m)
Parameters
----------
matrix : int[[]]
a 2-dimensional list
Returns
-------
int
size of the largest square
"""
n = len(matrix)
assert n >= 1
m = len(matrix[0])
assert m >= 1
arr = [
[0 for _ in range(m)],
[0 for _ in range(m)]
]
max_square_size = 0
for i in range(n):
arr[0] = [e for e in arr[1]]
for j in range(m):
if matrix[i][j] == 0:
arr[1][j] = 0
else:
if i == 0 or j == 0:
arr[1][j] = 1
else:
arr[1][j] = min([
arr[0][j],
arr[1][j-1],
arr[0][j-1]
]) + 1
max_square_size = max(arr[1][j], max_square_size)
return max_square_size
def calculate_largest_square_filled_space_optimized2(matrix):
"""
A method that calculates the largest square filled with only 1's of a given binary matrix (space-optimized 2/2).
Problem description: https://practice.geeksforgeeks.org/problems/largest-square-formed-in-a-matrix/0
time complexity: O(n*m)
space complexity: O(min(n,m))
Parameters
----------
matrix : int[[]]
a 2-dimensional list
Returns
-------
int
size of the largest square
"""
n = len(matrix)
assert n >= 1
m = len(matrix[0])
assert m >= 1
max_square_size = 0
if n < m:
arr = [
[0 for _ in range(n)],
[0 for _ in range(n)]
]
for j in range(m):
arr[0] = [e for e in arr[1]]
for i in range(n):
if matrix[i][j] == 0:
arr[1][i] = 0
else:
if j == 0 or i == 0:
arr[1][i] = 1
else:
arr[1][i] = min([
arr[0][i],
arr[1][i-1],
arr[0][i-1]
]) + 1
max_square_size = max(arr[1][i], max_square_size)
else:
arr = [
[0 for _ in range(m)],
[0 for _ in range(m)]
]
for i in range(n):
arr[0] = [e for e in arr[1]]
for j in range(m):
if matrix[i][j] == 0:
arr[1][j] = 0
else:
if i == 0 or j == 0:
arr[1][j] = 1
else:
arr[1][j] = min([
arr[0][j],
arr[1][j-1],
arr[0][j-1]
]) + 1
max_square_size = max(arr[1][j], max_square_size)
return max_square_size
def calculate_largest_square_filled(matrix):
"""
A method that calculates the largest square filled with only 1's of a given binary matrix.
Problem description: https://practice.geeksforgeeks.org/problems/largest-square-formed-in-a-matrix/0
time complexity: O(n*m)
space complexity: O(min(n,m))
Parameters
----------
matrix : int[[]]
a 2-dimensional list
Returns
-------
int
size of the largest square
"""
return calculate_largest_square_filled_space_optimized2(matrix)
|
from __future__ import absolute_import
import os
import numpy as np
import pygame
import weakref
import carla
from carla import ColorConverter as cc
CARLA_OUT_PATH = os.environ.get("CARLA_OUT", os.path.expanduser("~/carla_out"))
if CARLA_OUT_PATH and not os.path.exists(CARLA_OUT_PATH):
os.makedirs(CARLA_OUT_PATH)
class CameraManager(object):
"""This class from carla, manual_control.py
"""
def __init__(self, parent_actor, hud):
self.image = None # need image to encode obs.
self.image_list = [] # for save images later.
self.sensor = None
self._surface = None
self._parent = parent_actor
self._hud = hud
self._recording = False
self._memory_record = False
# TODO: Make the camera positioning configurable. Toggling is already
# supported through toggle_camera
self._camera_transforms = [
carla.Transform(carla.Location(x=1.6, z=1.7)),
carla.Transform(
carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15))
]
# 0 is dashcam view; 1 is tethered view
self._transform_index = 0
self._sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
[
'sensor.camera.depth', cc.LogarithmicDepth,
'Camera Depth (Logarithmic Gray Scale)'
],
[
'sensor.camera.semantic_segmentation', cc.Raw,
'Camera Semantic Segmentation (Raw)'
],
[
'sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)'
], ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']
]
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
for item in self._sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
item.append(bp)
self._index = None
self.callback_count = 0
def __del__(self):
if self.sensor is not None:
self.sensor.destroy()
def set_recording_option(self, option):
"""Set class vars to select recording method.
Option 1: save image to disk while the program runs.(Default)
Option 2: save to memory first. Save to disk when program finishes.
Args:
option (int): record method.
Returns:
N/A.
"""
# TODO: The options should be more verbose. Strings instead of ints
if option == 1:
self._recording = True
elif option == 2:
self._memory_record = True
def toggle_camera(self):
self._transform_index = (self._transform_index + 1) % len(
self._camera_transforms)
self.sensor.set_transform(
self._camera_transforms[self._transform_index])
# TODO: Remove the hardcoded int index and make it sensor_type
def set_sensor(self, index, notify=True):
index = index % len(self._sensors)
# TODO: Remove the hardcoded 0 ad use camera_type
# TODO: Use same keys as used in self._sensors
needs_respawn = True if self._index is None \
else self._sensors[index][0] != self._sensors[self._index][0]
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self._surface = None
self.sensor = self._parent.get_world().spawn_actor(
self._sensors[index][-1],
self._camera_transforms[self._transform_index],
attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self._hud.notification(self._sensors[index][2])
self._index = index
def next_sensor(self):
self.set_sensor(self._index + 1)
def toggle_recording(self):
self._recording = not self._recording
self._hud.notification(
'Recording %s' % ('On' if self._recording else 'Off'))
def render(self, display):
if self._surface is not None:
display.blit(self._surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
self.image = image
self.callback_count += 1
if not self:
return
if self._sensors[self._index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 3), 3))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self._hud.dim) / 100.0
lidar_data += (0.5 * self._hud.dim[0], 0.5 * self._hud.dim[1])
lidar_data = np.fabs(lidar_data)
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self._hud.dim[0], self._hud.dim[1], 3)
lidar_img = np.zeros(lidar_img_size)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self._surface = pygame.surfarray.make_surface(lidar_img)
else:
image.convert(self._sensors[self._index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self._recording:
image_dir = os.path.join(
CARLA_OUT_PATH, 'images/{}/%04d.png'.format(self._parent.id) %
image.frame_number)
image.save_to_disk(image_dir) # , env.cc
# image.save_to_disk('_out/%08d' % image.frame_number)
elif self._memory_record:
self.image_list.append(image)
else:
pass
|
"""URLs for Django api application
Contains api endpoints for requests. Using router for creating automatic
detail URLs.
"""
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from . import views as api_views
__author__ = 'Petr Hendrych'
__email__ = 'xhendr03@fit.vutbr.cz'
class OptionalSlashRouter(DefaultRouter):
def __init__(self):
super().__init__()
self.trailing_slash = '/?'
router = OptionalSlashRouter()
router.register(r'files', api_views.FileViewSet)
router.register(r'tracks', api_views.TrackViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
]
|
import os
import logging
from cyvcf2 import VCF
from loqusdb.exceptions import VcfError
from loqusdb.vcf_tools.variant import get_variant_id
logger = logging.getLogger(__name__)
VALID_ENDINGS = ['.vcf', '.gz']
def get_file_handle(file_path):
"""Return cyvcf2 VCF object
Args:
file_path(str)
Returns:
vcf_obj(cyvcf2.VCF)
"""
logger.debug("Check if file end is correct")
if not os.path.exists(file_path):
raise IOError("No such file:{0}".format(file_path))
if not os.path.splitext(file_path)[-1] in VALID_ENDINGS:
raise IOError("Not a valid vcf file name: {}".format(file_path))
vcf_obj = VCF(file_path)
return vcf_obj
def get_vcf(file_path):
"""Yield variants from a vcf file
Args:
file_path(str)
Yields:
vcf_obj(cyvcf2.VCF): An iterable with cyvcf2.Variant
"""
vcf_obj = get_file_handle(file_path)
return vcf_obj
def check_vcf(variants):
"""Check if there are any problems with the vcf file
Args:
variants(iterable(cyvcf2.Variant))
Returns:
nr_variants(int)
"""
logger.info("Check if vcf is on correct format...")
nr_variants = 0
previous_pos = None
previous_chrom = None
posititon_variants = set()
for variant in variants:
nr_variants += 1
current_chrom = variant.CHROM
current_pos = variant.POS
variant_id = get_variant_id(variant)
if previous_chrom:
if current_chrom != previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
else:
if current_pos == previous_pos:
if variant_id in posititon_variants:
raise VcfError("Variant {0} occurs several times"\
" in vcf".format(variant_id))
else:
posititon_variants.add(variant_id)
else:
if not current_pos > previous_pos:
raise VcfError("Vcf if not sorted in a correct way")
previous_pos = current_pos
posititon_variants = set([variant_id])
else:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
return nr_variants
|
from output.models.ms_data.attribute.att_d007_xsd.att_d007 import (
AttRef,
Char,
Doc,
No,
)
__all__ = [
"AttRef",
"Char",
"Doc",
"No",
]
|
from client import exception, listener, logger
from client.config import config as c
from discord.ext import commands, tasks
import discord
class event_listener(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'event_listener'
STATUS = 0
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@tasks.loop(minutes=1)
async def status_update(self):
try:
status = discord.Game('{}'.format((c.CLIENT_STATUS[self.STATUS])))
await self.client.change_presence(status=discord.Status.do_not_disturb, activity=status)
self.STATUS += 1
if self.STATUS == len(c.CLIENT_STATUS):
self.STATUS = 0
except Exception as error:
await exception.error(error)
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
def __init__(self, client):
self.client = client
self.status_update.start()
@commands.Cog.listener()
async def on_message(self, payload):
try:
await listener.message(message, self.client)
except Exception as error:
await exception.error(error)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
try:
await listener.raw_reaction_add(payload, self.client, True)
except Exception as error:
await exception.error(error)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
try:
await listener.raw_reaction_remove(payload, self.client, False)
except Exception as error:
await exception.error(error)
@commands.Cog.listener()
async def on_guild_join(self, guild):
try:
await listener.guild_join(self.client, guild)
except Exception as error:
await exception.error(error)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, discord.ext.commands.CommandError):
await exception.error('{} - {}'.format(error, ctx))
await exception.error(error)
def setup(client):
client.add_cog(event_listener(client))
|
#!/usr/bin/env python3
"""Project Euler - Problem 40 Module"""
def problem41(limit):
"""Problem 41 - Champernowne's constant"""
i = 0
str_number = ''
while len(str_number) <= limit:
str_number += str(i)
i += 1
d = 1
result = 1
while d <= limit:
result *= int(str_number[d])
d *= 10
return result
def run():
"""Default Run Method"""
return problem41(1000000)
if __name__ == '__main__':
print("Result: ", run())
|
import numpy as np
# return -1 if x < 0, 1 if x > 0, random -1 or 1 if x ==0
def sign(x):
s = np.sign(x)
tmp = s[s == 0]
s[s==0] = np.random.choice([-1, 1], tmp.shape)
return s
if __name__ == "__main__":
x = np.random.choice([-1, 0, 1], [5, 5])
print(x)
print((sign(x)))
|
import xml.etree.ElementTree as ET
from CodegenDatatype import fromXMLCfgStr, toJavaDatatypeStr
from CodegenUtils import indent
class DataCodegen:
def __init__(self, parent):
self.parent = parent
def getIdentifier(self):
return self.parent.name
def getDeclaration(self):
return indent(1) + "private " + toJavaDatatypeStr(self.parent.dataType) + " " + self.getIdentifier() + ";\n"
def getGetter(self):
retStr = ""
retStr += indent(1) + "public " + toJavaDatatypeStr(self.parent.dataType) + " get" + self.getIdentifier() + "() {\n"
retStr += indent(2) + "return " + self.getIdentifier() + ";\n"
retStr += indent(1) + "}\n"
return retStr
def getSetter(self):
retStr = ""
retStr += indent(1) + "public void set" + self.getIdentifier() + "( " + toJavaDatatypeStr(self.parent.dataType) + " val ) {\n"
retStr += indent(2) + self.getIdentifier() + " = val;\n"
retStr += indent(1) + "}\n"
return retStr |
import os
import json
import numpy as np
import pandas as pd
from sklearn import metrics
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression, Ridge
rcParams['figure.figsize'] = 12, 8
# Github: https://github.com/sujitmandal
# Pypi : https://pypi.org/user/sujitmandal/
# LinkedIn : https://www.linkedin.com/in/sujit-mandal-91215013a/
if not os.path.exists('plot/RandomForest'):
os.mkdir('plot/RandomForest')
def RandomForest(train, test):
train_identifier = []
for i in train['Item_Identifier']:
train_identifier.append(i)
train_keys = []
train_values = []
for i in range(len(train_identifier)):
train_keys.append(i + 1)
train_values.append(train_identifier[i])
train_item_identifier = {}
train_item_identifier = dict(zip(train_keys, train_values))
#print(item_identifier)
test_identifier = []
for i in test['Item_Identifier']:
test_identifier.append(i)
test_keys = []
test_values = []
for i in range(len(test_identifier)):
test_keys.append(i + 1)
test_values.append(test_identifier[i])
test_item_identifier = {}
test_item_identifier = dict(zip(test_keys, test_values))
#print(item_identifier)
target = 'Item_Outlet_Sales'
IDcol = ['Item_Identifier','Outlet_Identifier']
predictors = [x for x in train.columns if x not in [target]+IDcol]
#print(predictors)
dataset = train[predictors]
print(dataset.shape)
train_dataset = dataset[:5000]
test_dataset = dataset[5000:]
print(train_dataset.shape)
print(test_dataset.shape)
target_data = train[target]
print(target_data.shape)
train_labels = target_data[:5000]
test_labels = target_data[5000:]
print(train_labels.shape)
print(test_labels.shape)
a = train_item_identifier.keys()
b = list(a)
item_identifier = b[5000:]
print(len(item_identifier))
random_forest = RandomForestRegressor(n_estimators=400,max_depth=6, min_samples_leaf=100,n_jobs=4)
random_forest.fit(train_dataset, train_labels)
predictions = random_forest.predict(test_dataset)
score = random_forest.score(train_dataset, train_labels)
print('Model Score : ', score)
print(predictions)
print(predictions.shape)
#cross-validation (cv):
cv_score = cross_val_score(random_forest, train_dataset, train_labels, cv=20)
#print(cv_score)
mean_results = {}
std_results = {}
lr_mean = np.mean(cv_score)
lr_std = np.std(cv_score)
mean_results['Random Forest'] = lr_mean
std_results['Random Forest'] = lr_std
print('Mean')
print(mean_results)
print('Standard Deviation')
print(std_results)
#Plot Actuall vs. Prediction
plt.title('Actual vs. Predicted')
plt.plot(item_identifier, test_labels)
plt.plot(item_identifier, predictions)
plt.xlabel('Item Identifier')
plt.ylabel('Item Outlet Sales')
plt.grid(True)
plt.savefig('plot/RandomForest/plot1.pdf')
plt.show(block=False)
plt.pause(5)
plt.close()
#Plot True Values vs. Predictions
a = plt.axes(aspect='equal')
plt.scatter(test_labels, predictions)
plt.xlabel('Actual Item Outlet Sales')
plt.ylabel('Predicted Item Outlet Sales')
lims = [-1000,10000]
plt.xlim(lims)
plt.ylim(lims)
plt.grid(True)
plt.plot(lims, lims)
plt.savefig('plot/RandomForest/plot2.pdf')
plt.show(block=False)
plt.pause(5)
plt.close()
#Plot Prediction Error vs.Count
error = predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [Item Outlet Sales]")
_ = plt.ylabel("Count")
plt.savefig('plot/RandomForest/plot3.pdf')
plt.show(block=False)
plt.pause(5)
plt.close()
unknow_data = test[predictors]
unknow_predictions = random_forest.predict(unknow_data)
print(unknow_predictions)
print(len(unknow_predictions))
test_a = test_item_identifier.keys()
test_b = list(test_a)
un_item_identifier = test_b
print(len(un_item_identifier))
plt.plot(un_item_identifier, unknow_predictions)
plt.xlabel('Item Identifier')
plt.ylabel('Item Outlet Sales')
plt.grid(True)
plt.savefig('plot/RandomForest/plot4.pdf')
plt.show(block=False)
plt.pause(5)
plt.close()
return(mean_results, std_results) |
from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
from ..evaluation_metrics import accuracy
class DeepLoss(nn.Module):
def __init__(self, margin=0):
super(DeepLoss, self).__init__()
self.triplet_criterion = nn.MarginRankingLoss(margin=margin)
self.soft_criterion = nn.CrossEntropyLoss()
def forward(self, inputs, targets, epoch, add_soft=0):
cnn, rnn, main = inputs
if epoch < add_soft:
loss, prec = self.tri_loss(main, targets)
else:
loss_main, prec_main = self.tri_loss(main, targets)
loss_cnn, prec_cnn = self.softmax(cnn, targets)
loss_rnn, prec_rnn = self.softmax(rnn, targets)
loss = loss_main + loss_cnn + loss_rnn
prec = max(prec_main, prec_cnn, prec_rnn)
return loss, prec
def tri_loss(self, inputs, targets):
n = inputs.size(0)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max())
dist_an.append(dist[i][mask[i] == 0].min())
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = dist_an.data.new()
y.resize_as_(dist_an.data)
y.fill_(1)
y = Variable(y)
loss = self.triplet_criterion(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1. / y.size(0)
return loss, prec
def softmax(self, inputs, targets):
loss = self.soft_criterion(inputs, targets)
prec, = accuracy(inputs.data, targets.data)
prec = prec[0]
return loss, prec
def normalize(self, inputs, p=2):
outputs = inputs.pow(p) / inputs.pow(p).sum(dim=1, keepdim=True).expand_as(inputs)
return outputs
def fusion(self, dist, targets):
pass
|
#!/usr/bin/python3
import sys
import re
from pathlib import Path
import fileinput
""" This script is intended to fix the issue with flash strings on ESP866 described """
""" in issue #8 (https://github.com/frankjoshua/rosserial_arduino_lib/issues/8) """
""" It can also be used on the raw output of the rosserial_arduino library creation """
""" routines to add flash strings as appropriate. """
""" Written by Pierce Nichols (rocketgeek@gmail.com) 9/28/29 """
gettype_pattern = re.compile(r'^\s*const char \* getType\(\)\{\s*return\s*(PSTR|)\s*\(\s*"([\w/_]*)"\s*\);\s*\};\s*$')
getmd5_pattern = re.compile(r'^\s*const char \* getMD5\(\)\{\s*return\s*(PSTR|)\s*\(\s*"([0-9a-f]*)"\s*\);\s*\};\s*$')
getprogmem_pattern = re.compile(r'^\s*static const char ([A-Z]*)\[\] PROGMEM = "([\w/_]*)"\s*;\s*$')
code_start = ' const char * '
code_end = '");};'
code_gettype = 'getType() { return '
code_md5 = 'getMD5() { return '
code_norm = ' ("'
code_pstr = ' PSTR("'
pm_start = ' static const char '
pm_progmem = '[] PROGMEM = "'
pm_norm = '[] = "'
pm_end = '";'
def process_header (path_to_header):
for line in fileinput.input(path_to_header, inplace=True):
line = line.rstrip('\r\n')
gt_match = gettype_pattern.search(line)
md_match = getmd5_pattern.search(line)
pm_match = getprogmem_pattern.search(line)
if (gt_match):
print(" #ifdef ESP8266")
print(code_start + code_gettype + code_norm + gt_match.group(2) + code_end)
print(" #else")
print(code_start + code_gettype + code_pstr + gt_match.group(2) + code_end)
print(" #endif")
elif (md_match):
print(" #ifdef ESP8266")
print(code_start + code_md5 + code_norm + md_match.group(2) + code_end)
print(" #else")
print(code_start + code_md5 + code_pstr + md_match.group(2) + code_end)
print(" #endif")
elif (pm_match):
print("#ifdef ESP8266")
print(pm_start + pm_match.group(1) + pm_norm + pm_match.group(2) + pm_end)
print("#else")
print(pm_start + pm_match.group(1) + pm_progmem + pm_match.group(2) + pm_end)
print("#endif")
else:
print(line)
rootpath = sys.argv[1] # First argument is the root of the directory tree to fix
p = Path(rootpath) # Turn it into a path
header_list = list(p.glob('**/*.h')) # Grab a list of all the header files in this directory tree
for header in header_list:
process_header(header)
|
#!/usr/bin/env python
from pathlib import Path
from typing import Tuple
def get_path(dir_path: Path,
dir_name: str = "movie",
counting_format: str = "%03d",
file_pattern: str = "") -> Tuple[Path, int, str, str]:
"""
Looks up all directories with matching dir_name
and counting format in dir_path.
Gets the highest number and returns a path with dir_name counted one up
(prevents colliding with old data).
:param dir_path: Path where to look for old directories (movie data)
:param dir_name: General name of the directories without the counter
:param counting_format: Format of counter of the directories
:return: Path for the new directory (not colliding with old data)
"""
# "movie" and "%03d" strings are hardcoded
# in mayavi movie_maker _update_subdir
if file_pattern:
existing = sorted([x for x in dir_path.glob(dir_name + "*") if x.is_file()])
try:
last_str_part = existing[-1].name.split(file_pattern)[0]
try:
last_index: int = int(last_str_part.split(dir_name)[1])
except IndexError as e:
print(f"Old file not found. Setting last_index={last_index}.")
except IndexError as e:
last_index = 0
print(f"No file found in directory {dir_name}* at path {dir_path}.")
else:
existing = sorted([x for x in dir_path.glob(dir_name + "*") if x.is_dir()])
try:
last_index: int = int(existing[-1].name.split(dir_name)[1])
except IndexError as e:
last_index = 0
print(f"No old data found. Setting last_index={last_index}.")
input_path = Path(dir_path, dir_name + counting_format % last_index)
return input_path, last_index, dir_name, counting_format
|
#!/usr/bin/env python
from main import main
main()
|
from django.http import HttpResponse
def dummy_view(request):
return HttpResponse('You ran a test.')
|
args_unmask = {
'no_cuda': False,
'cuda_device': '0',
'seed': 5,
'model': 'densenet121', # vgg16, resnet50, resnet101, densenet121
'dataset': 'unmask',
'class_set': 'cs5-2',
# training params
'val_ratio': 0.1,
'batch_size': 32,
'epochs': 30,
'optimizer': 'sgd', # can be sgd or Adam
'lr': 0.001,
'weight_decay': 0.0002,
'momentum': 0.9,
'nesterov': False,
#######
# adv defense params
'robust_training': False,
'train_epsilon': 4.0,
'eps_iter': 2.0,
'nb_iter': 7,
'test_epsilon': 8.0,
'test_eps_iter': 2.0,
'test_nb_iter': 20,
#######
# logging params
'enable_logging': True,
'enable_saving': True,
'logging_comment': 'No adversarial training DenseNet121 (cs5-2) train epsilon=4',
'logging_run': 1,
#######
'verbose': 2
}
object_dict = {
'cs3-1': ['car', 'person', 'train'],
'cs3-2': ['person', 'dog', 'bird'],
'cs5-1': ['dog', 'car', 'bottle', 'train', 'person'],
'cs5-2': ['dog', 'car', 'bird', 'train', 'person'],
} |
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Oct 4, 2017
@author: jrm
"""
import sh
import sys
def main():
# Make sure instance is cleared
from enaml.application import Application
Application._instance = None
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True,
dev='remote', # "10.0.2.2" # or 'server'
load_view=load_view
)
app.timed_call(5000, run_gestures, app)
app.start()
def run_gestures(app):
for i in range(30):
#: Swipe to next page
t = i*2000
app.timed_call(t,
sh.adb, *'shell input swipe 250 300 -800 300'.split(), _bg=True)
#: Tap a few places
for j in range(4):
app.timed_call(t+i*200,
sh.adb, *'shell input tap 500 150'.split(), _bg=True)
app.timed_call(120000, app.stop)
def load_view(app):
import enaml
#: For debug purposes only!
app.widget.resetBridgeStats()
app.widget.resetBridgeCache()
with enaml.imports():
import view
if app.view:
reload(view)
app.view = view.ContentView()
#: Time how long it takes
app.show_view()
def test_remote_debug():
#sh.pip('install tornado --user'.split())
enaml_native = sh.Command('enaml-native')
enaml_native('start', '--remote-debugging', _bg=True)
#: Add
sys.path.append('src/apps/')
sys.path.append('src/')
#: Init remote nativehooks implementation
from enamlnative.core import remotehooks
remotehooks.init()
main()
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from accounts.models import Account
class AuthenticationTest(StaticLiveServerTestCase):
DEFAULT_PASSWORD = "p4ssw0rd!"
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def login(self, username, password):
"""
Goes to the login page and enters the given credentials.
"""
self.selenium.get(self.live_server_url + "/login/")
email_inputbox = self.selenium.find_element_by_id("email")
email_inputbox.send_keys(username)
password_inputbox = self.selenium.find_element_by_id("password")
password_inputbox.send_keys(password)
submit_button = self.selenium.find_element_by_css_selector("input[type=submit]")
submit_button.click()
def logout(self):
"""
Executes the steps to log the user out of the system.
"""
user_dropdown = self.selenium.find_element_by_id("user-dropdown")
user_dropdown.click()
logout_button = self.selenium.find_element_by_link_text("Logout")
logout_button.click()
def make_account(self, email, password=DEFAULT_PASSWORD, first_name="", last_name=""):
"""
Creates a new Account instance with the given parameters
"""
account = Account(email=email, first_name=first_name, last_name=last_name)
account.set_password(password)
account.save()
return account
def test_can_register_for_an_account(self):
# Jane wanted to register for an Aeternae account, so she went to the accounts registration page and saw that
# the page title and header does suggest that this is the registration page
self.selenium.get(self.live_server_url + "/register/")
self.assertIn("Register", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Create Account", header_title.title())
# She is prompted for her name, so she enter her first and last name
first_name_inputbox = self.selenium.find_element_by_name("first_name")
first_name_inputbox.send_keys("Jane")
last_name_inputbox = self.selenium.find_element_by_name("last_name")
last_name_inputbox.send_keys("Doe")
# Next up is her email
email_inputbox = self.selenium.find_element_by_name("email")
email_inputbox.send_keys("jdoe@example.com")
# Lastly is her password, inputted twice for confirmation
password1_inputbox = self.selenium.find_element_by_name("password1")
password1_inputbox.send_keys(self.DEFAULT_PASSWORD)
password2_inputbox = self.selenium.find_element_by_name("password2")
password2_inputbox.send_keys(self.DEFAULT_PASSWORD)
# She then submitted the registration form
submit_button = self.selenium.find_element_by_css_selector("input[type=submit]")
submit_button.click()
# The page redirects to the dashboard where she saw the page title and header saying so, with a nice and warm
# message appearing to welcoming her and acknowledging that she had succesfully created an account.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/dashboard/")
self.assertIn("Dashboard", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Dashboard", header_title.title())
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("You have successfully registered for an account. Enjoy your stay!", body_text)
# Satisfied, she went to bed.
def test_can_login_with_an_account(self):
# Alice had registered for an account yesterday and would like to log back in to the dashboard
account = self.make_account(email="alice.green@example.com", first_name="Alice", last_name="Green")
# She goes to the login page...
self.selenium.get(self.live_server_url + "/login/")
self.assertIn("Login", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Login", header_title.title())
# ...and fills out the form with her credentials
email_inputbox = self.selenium.find_element_by_id("email")
email_inputbox.send_keys(account.email)
password_inputbox = self.selenium.find_element_by_id("password")
password_inputbox.send_keys(self.DEFAULT_PASSWORD)
# She then submits the login form
submit_button = self.selenium.find_element_by_css_selector("input[type=submit]")
submit_button.click()
# The page redirects to the dashboard where she saw the page title and header saying so, with a nice and warm
# message appearing to welcoming her and acknowledging that she had succesfully logged in.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/dashboard/")
self.assertIn("Dashboard", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Dashboard", header_title.title())
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("It's great to have you back, Alice!", body_text)
# Satisfied, she went to bed.
def test_can_logout_when_logged_in(self):
# Bob logs in to his account and after a while, decided that he's done for the day .
account = self.make_account(email="bob.smith@example.com", first_name="Bob", last_name="Smith")
self.login(account.email, self.DEFAULT_PASSWORD)
self.selenium.get(self.live_server_url + "/dashboard/")
self.assertIn("Dashboard", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Dashboard", header_title.title())
# He clicks the user dropdown to show the logout button
user_dropdown = self.selenium.find_element_by_id("user-dropdown")
user_dropdown.click()
# And then clicks the logout button itself
logout_button = self.selenium.find_element_by_link_text("Logout")
logout_button.click()
# He is then redirected to the login page with a message stating that he has been indeed logged out of the system.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/login/")
self.assertIn("Login", self.selenium.title)
header_title = self.selenium.find_element_by_tag_name("h1").text
self.assertIn("Login", header_title.title())
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("You have been logged out. See you soon!", body_text)
# Satisfied, he went to bed.
def test_cannot_access_the_dashboard_when_not_logged_in(self):
# Shawn had a long day and would like to check up what's new in the Aeternae dashboard.
account = self.make_account(email="shawn.smith@example.com", first_name="Shawn", last_name="Smith")
# He remembered the URL and goes to the dashboard page.
self.selenium.get(self.live_server_url + "/dashboard/")
# Weirdly enough, he was redirected to the login page.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/login/?next=/dashboard/")
# "Ooops!", he uttered, as he remembers that he forgot to login to his account.
# He typed in his credentials and hit the login button.
email_inputbox = self.selenium.find_element_by_id("email")
email_inputbox.send_keys(account.email)
password_inputbox = self.selenium.find_element_by_id("password")
password_inputbox.send_keys(self.DEFAULT_PASSWORD)
submit_button = self.selenium.find_element_by_css_selector("input[type=submit]")
submit_button.click()
# The page now redirects to the dashboard home, and he was greeted by the familiar friendly message.
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("It's great to have you back, Shawn!", body_text)
# Satisfied, he went to bed.
def test_logged_in_users_cannot_access_the_login_page(self):
# Lorraine had been working inside the Aeternae dashboard for a while now.
account = self.make_account(email="lorraine.jones@example.com", first_name="Lorraine", last_name="Jones")
self.login(username=account.email, password=self.DEFAULT_PASSWORD)
# After a few hours away from the computer, she went on and try to log in to her account.
self.selenium.get(self.live_server_url + "/login/")
# "Huh, that's weird", she thought as the page is redirected to the dashboard.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/dashboard/")
# "Oh, right! I'm already logged in!", she realized. After a while, she finished her work and logged out.
self.logout()
# She's sent off by a message confirming she is indeed logged out of the system.
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("You have been logged out. See you soon!", body_text)
# Satisfied, she went to bed.
def test_logged_in_users_cannot_access_the_registration_page(self):
# Marshall was checking something in the Aeternae dashboard, and had just went out for lunch.
account = self.make_account(email="marshall.anthony@example.com", first_name="Marshall", last_name="Anthony")
self.login(username=account.email, password=self.DEFAULT_PASSWORD)
# Coming back to his desk, he thought of an idea that requires a new account.
# He goes to the registration page to create one for himself.
self.selenium.get(self.live_server_url + "/register/")
# He's surprised that his browser redirects to the dashboard.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/dashboard/")
# "Oh, right! I'm already logged in!", he realized. He postponed his idea for tomorrow.
# After a while, he finished his work and logged out.
self.logout()
# He's sent off by a message confirming he is indeed logged out of the system.
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("You have been logged out. See you soon!", body_text)
# Satisfied, she went to bed.
def test_logged_in_users_cannot_access_the_landing_page(self):
# Natalie was just done working on something in the Aeternae dashboard, so he took a quick coffee break.
account = self.make_account(email="natalie.davis@example.com", first_name="Natalie", last_name="Davis")
self.login(username=account.email, password=self.DEFAULT_PASSWORD)
# Coming back to her station, she keys in the Aeternae root URL on her freshly booted browser
self.selenium.get(self.live_server_url)
# What do you know, her browser redirects to the dashboard.
self.assertEquals(self.selenium.current_url, self.live_server_url + "/dashboard/")
# It all makes sense because she's already logged in. She logged out at the end of the day.
self.logout()
# She's sent off by a message confirming she is indeed logged out of the system.
body_text = self.selenium.find_element_by_tag_name("body").text
self.assertIn("You have been logged out. See you soon!", body_text)
# Satisfied, she went to bed.
|
#!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
bool_parse_test.py: Tests for bool_parse.py
"""
import unittest
from core import test_lib
from core.meta import syntax_asdl, Id, types_asdl
from osh import bool_parse # module under test
bool_expr_e = syntax_asdl.bool_expr_e
lex_mode_e = types_asdl.lex_mode_e
def _ReadWords(w_parser):
words = []
while True:
w = w_parser.ReadWord(lex_mode_e.DBracket)
if w.Type() == Id.Eof_Real:
break
words.append(w)
print('')
print('words:', words)
return words
def _MakeParser(code_str):
# NOTE: We need the extra ]] token
arena = test_lib.MakeArena('<bool_parse_test.py>')
w_parser = test_lib.InitWordParser(code_str + ' ]]', arena=arena)
w_parser._Next(lex_mode_e.DBracket) # for tests only
p = bool_parse.BoolParser(w_parser)
p._Next()
return p
class BoolParserTest(unittest.TestCase):
def testParseFactor(self):
p = _MakeParser('foo')
print(p.ParseFactor())
self.assertTrue(p._TestAtEnd())
p = _MakeParser('$foo"bar"')
print(p.ParseFactor())
self.assertTrue(p._TestAtEnd())
p = _MakeParser('-z foo')
print('-------------')
node = p.ParseFactor()
print(node)
self.assertTrue(p._TestAtEnd())
self.assertEqual(bool_expr_e.BoolUnary, node.tag)
p = _MakeParser('foo == bar')
node = p.ParseFactor()
print(node)
self.assertTrue(p._TestAtEnd())
self.assertEqual(bool_expr_e.BoolBinary, node.tag)
def testParseNegatedFactor(self):
p = _MakeParser('foo')
node = p.ParseNegatedFactor()
print(node)
self.assertTrue(p._TestAtEnd())
self.assertEqual(bool_expr_e.WordTest, node.tag)
p = _MakeParser('! foo')
node = p.ParseNegatedFactor()
print(node)
self.assertTrue(p._TestAtEnd())
self.assertEqual(bool_expr_e.LogicalNot, node.tag)
def testParseTerm(self):
p = _MakeParser('foo && ! bar')
node = p.ParseTerm()
print(node)
self.assertEqual(bool_expr_e.LogicalAnd, node.tag)
# TODO: This is an entire expression I guess
p = _MakeParser('foo && ! bar && baz')
node = p.ParseTerm()
print(node)
self.assertEqual(bool_expr_e.LogicalAnd, node.tag)
p = _MakeParser('-z foo && -z bar')
node = p.ParseTerm()
print(node)
self.assertEqual(bool_expr_e.LogicalAnd, node.tag)
def testParseExpr(self):
p = _MakeParser('foo || ! bar')
node = p.ParseExpr()
print(node)
self.assertEqual(bool_expr_e.LogicalOr, node.tag)
p = _MakeParser('a == b')
print(p.ParseExpr())
def testParseFactorInParens(self):
p = _MakeParser('( foo == bar )')
node = p.ParseFactor()
print(node)
self.assertTrue(p._TestAtEnd())
self.assertEqual(bool_expr_e.BoolBinary, node.tag)
def testParseParenthesized(self):
p = _MakeParser('zoo && ( foo == bar )')
node = p.ParseExpr()
print(node)
self.assertEqual(bool_expr_e.LogicalAnd, node.tag)
if __name__ == '__main__':
unittest.main()
|
import resources
import sql.sql_deployment as sd
from sql.sql_parameters import SqlParameters
from helper import ServiceType
s = 'VM'
#res = resources.get_all_tks_resource_groups()
#params = SqlParameters()
#params.administrator_password = "})&B7Tq33n1f"
#p = sd.get_parameters(params)
#t = sd.get_template()
#resources.create_deployment("89aa748b-0621-4ec3-865a-ab0cde103b13", t, p)
pass |
from dolfin import Mesh, cells, edges
import os
# FIXME
# We don't know how to deal with tight bbox intersecting the surface and so
# are forced to have the bbox larger. Would be nice to handle this.
def mesh_around_1d(mesh, size=1, scale=10, padding=0.05):
'''
From a 1d in xd (X > 1) mesh (in XML format) produce a Xd mesh where
the 1d structure is embedded. Mesh size close to strucure should
be size(given as multiple of hmin(), elsewhere scale * size. Padding
controls size of the bounding box.
'''
dot = mesh.find('.')
root, ext = mesh[:dot], mesh[dot:]
assert ext == '.xml' or ext == '.xml.gz', ext
mesh = Mesh(mesh)
gdim = mesh.geometry().dim()
assert gdim > 1 and mesh.topology().dim() == 1
x = mesh.coordinates()
mesh.init(1, 0)
# Compute fall back mesh size:
assert size > 0
size = mesh.hmin()*size
# Don't allow zero padding - collision of lines with bdry segfaults
# too ofter so we prevent it
assert padding > 0
# Finally scale better be positive
assert scale > 0
point = (lambda xi: tuple(xi) + (0, ))\
if gdim == 2 else (lambda xi: tuple(xi))
geo = '.'.join([root, 'geo'])
with open(geo, 'w') as outfile:
# Setup
outfile.write('SetFactory("OpenCASCADE");\n')
outfile.write('size = %g;\n' % size)
outfile.write('SIZE = %g;\n' % (size*scale))
# Points
fmt = 'Point(%d) = {%.16f, %.16f, %.16f, size};\n'
for i, xi in enumerate(x, 1):
outfile.write(fmt % ((i, ) + point(xi)))
# Lines
fmt = 'Line(%d) = {%d, %d};\n'
for i, cell in enumerate(cells(mesh), 1):
outfile.write(fmt % ((i, ) + tuple(cell.entities(0)+1)))
# BBox
xmin, xmax = x.min(0), x.max(0)
padding = (xmax-xmin)*padding/2.
xmin -= padding
xmax += padding
dx = xmax - xmin
if gdim == 2 or dx[-1] < 1E-14: # All points are on a plane
rect = 'Rectangle(1) = {%g, %g, %g, %g, %g};\n' % (xmin[0],
xmin[1],
0 if gdim == 2 else xmin[2],
dx[0],
dx[1])
outfile.write(rect)
bbox = 'Surface'
else:
box = 'Box(1) = {%g, %g, %g, %g, %g, %g};\n' % (xmin[0],
xmin[1],
xmin[2],
dx[0],
dx[1],
dx[2])
outfile.write(box)
bbox = 'Volume'
# Crack
for line in xrange(1, mesh.num_cells()+1):
outfile.write('Line{%d} In %s{1};\n' % (line, bbox))
# Add Physical volume/surface
outfile.write('Physical %s(1) = {1};\n' % bbox)
# Add Physical surface/line
lines = ', '.join(map(lambda v: '%d' % v, xrange(1, mesh.num_cells()+1)))
outfile.write('Physical Line(1) = {%s};\n' % lines)
return geo, gdim
|
import itertools
from cloud_info_provider.collectors import base
class ComputeCollector(base.BaseCollector):
def __init__(self, opts, providers):
super(ComputeCollector, self).__init__(opts, providers)
self.templates = ['compute']
def fetch(self):
info = {}
# Retrieve global site information
# XXX Validate if really project agnostic
# XXX Here it uses the "default" project from the CLI parameters
site_info = self._get_info_from_providers('get_site_info')
# Get shares / projects and related images and templates
shares = self._get_info_from_providers('get_compute_shares')
for share in shares.values():
kwargs = share.copy()
endpoints = self._get_info_from_providers('get_compute_endpoints',
**kwargs)
if not endpoints.get('endpoints'):
return {}
# Collect static information for endpoints
static_compute_info = dict(endpoints, **site_info)
static_compute_info.pop('endpoints')
# Collect dynamic information
images = self._get_info_from_providers('get_images',
**kwargs)
templates = self._get_info_from_providers('get_templates',
**kwargs)
instances = self._get_info_from_providers('get_instances',
**kwargs)
quotas = self._get_info_from_providers('get_compute_quotas',
**kwargs)
# Add same static information to endpoints, images and templates
for d in itertools.chain(endpoints['endpoints'].values(),
templates.values(),
images.values()):
d.update(static_compute_info)
share['images'] = images
share['templates'] = templates
share['instances'] = instances
share['endpoints'] = endpoints
share['quotas'] = quotas
# XXX Avoid creating a new list
endpoints = {endpoint_id: endpoint for share_id, share in
shares.items() for endpoint_id,
endpoint in share['endpoints'].items()}
# XXX Avoid redoing what was done in the previous shares loop
static_compute_info = dict(endpoints, **site_info)
static_compute_info.pop('endpoints')
info.update({'static_compute_info': static_compute_info})
info.update({'shares': shares})
return info
|
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions
import pandas as pd
from config import config
class BaseWatsonError(Exception):
pass
class WatsonConfigError(BaseWatsonError):
pass
class WatsonApiError(BaseWatsonError):
pass
class WatsonClient(object):
def __init__(self, config):
if not config or 'IBM_WATSON_CREDENTIALS' not in config:
raise WatsonConfigError('No config data provided. Make sure that IBM_WATSON_CREDENTIALS is set in config.py')
credentials = config['IBM_WATSON_CREDENTIALS']
try:
self.natural_language_understanding = NaturalLanguageUnderstandingV1 (
version=credentials['version'],
username= credentials['username'],
password=credentials['password']
)
except Exception as e:
raise WatsonApiError(str(e))
def watson_keywords(self, html, **data):
try:
response = self.natural_language_understanding.analyze(html=html,features=Features(keywords=KeywordsOptions()))
if "keywords" in response:
keywords = response["keywords"]
return pd.DataFrame(keywords)
else:
return pd.DataFrame()
except Exception as e:
raise WatsonApiError(str(e))
def watson_entities(self, html, **data):
try:
response = self.natural_language_understanding.analyze(html=html,features=Features(entities=EntitiesOptions()))
if "entities" in response:
entities = response["entities"]
return pd.DataFrame(entities)
else:
return pd.DataFrame()
except Exception as e:
raise WatsonApiError(str(e))
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, regularizers, initializers
from .blocks import conv_block, mlp_block
class OrthogonalRegularizer(regularizers.Regularizer):
"""Reference: https://keras.io/examples/vision/pointnet/#build-a-model"""
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.identity = tf.eye(num_features)
def __call__(self, x):
identity = tf.cast(self.identity, x.dtype)
x = tf.reshape(x, (tf.shape(x)[0], self.num_features, self.num_features))
xxt = tf.tensordot(x, x, axes=(2, 2))
xxt = tf.reshape(
xxt, (tf.shape(x)[0] * tf.shape(x)[0], self.num_features, self.num_features)
)
return tf.reduce_sum(self.l2reg * tf.square(xxt - identity))
def get_config(self):
config = {"num_features": self.num_features, "l2reg": self.l2reg}
return config
def transformation_net(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor:
"""
Reference: https://keras.io/examples/vision/pointnet/#build-a-model.
The `filters` values come from the original paper: https://arxiv.org/pdf/1612.00593.pdf.
"""
x = conv_block(inputs, filters=64, name=f"{name}_1")
x = conv_block(x, filters=128, name=f"{name}_2")
x = conv_block(x, filters=1024, name=f"{name}_3")
x = layers.GlobalMaxPooling1D()(x)
x = mlp_block(x, filters=512, name=f"{name}_1_1")
x = mlp_block(x, filters=256, name=f"{name}_2_1")
return layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=initializers.Constant(np.eye(num_features).flatten()),
activity_regularizer=OrthogonalRegularizer(num_features),
name=f"{name}_final",
)(x)
def transformation_block(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor:
transformed_features = transformation_net(inputs, num_features, name=name)
transformed_features = layers.Reshape((num_features, num_features))(
transformed_features
)
return layers.Dot(axes=(2, 1), name=f"{name}_mm")([inputs, transformed_features])
|
from memories import generate_memory
approved = "1"
idx, imagefile = generate_memory(approved)
print("result is", idx, "image", imagefile)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
digits = load_digits()
print(dir(digits))
print(digits.target[0])
plt.gray()
plt.matshow(digits.images[10])
X_train, X_test, y_train, y_test = train_test_split(digits.data,digits.target,test_size=0.2)
# model =make_pipeline(StandardScaler(),LogisticRegression())
model=LogisticRegression()
model.fit(X_train,y_train)
print(model.predict([digits.data[4]]))
print(model.score(X_test,y_test)) |
import plotter_controller
import threading
import random
import time
import PIL.Image
import PIL.ImageDraw
import numpy as np
import sys
import step_file_data_provider
import math
class PlotterSimulator:
def __init__(self, step_time, phys_time, image_output_dir=None, save_stats=True):
self.image_time_step = 1 / 60
self.image_size = (600,600)
self.image_scale = 100
self.image_center_offset = (0,0)
self.image_output_dir = image_output_dir
self.drawn_image = None
self.step_time = step_time
self.phys_time = phys_time
#some physics constants
self.x_mass = 1
self.y_mass = 1
self.step_size = 0.001
self.k_x = 600 #The spring stiffness
self.k_y = 600 #The spring stiffness
self.c_x = 20
self.c_y = 20
self.save_stats = save_stats
self.stats = {}
self.clearStats()
#actual state of the object
self.state = []
self.effector_pos = (0,0)
self.effector_velocity = (0,0)
self.logical_pos = (0,0)
self.current_time = 0
self.current_phys_time = 0
self.step_count = 0
self.current_image_time = 0
self.image_count = 0
def getVelocity(self):
x_vel = self.effector_velocity[0]
y_vel = self.effector_velocity[1]
return math.sqrt((x_vel**2) + (y_vel**2))
def clearStats(self):
self.stats.clear()
self.stats = {
'max_force_x_abs':0,
'max_force_y_abs':0,
'max_force_x':0,
'max_force_y':0,
'min_force_x':0,
'min_force_y':0,
'max_velocity_x':0,
'max_velocity_y':0,
'min_velocity_x':0,
'min_velocity_y':0
}
def getStats(self):
return self.stats
def pushState(self):
this_state = {
'effector_pos':self.effector_pos,
'effector_velocity':self.effector_velocity,
'logical_pos':self.logical_pos,
'current_time':self.current_time,
'current_phys_time':self.current_phys_time,
'step_count':self.step_count,
'current_image_time':self.current_image_time,
'image_count':self.image_count
}
self.state.append(this_state)
def popState(self):
if(len(self.state) <= 0):
raise Exception("No state pop.")
else:
this_state = self.state.pop()
self.effector_pos = this_state['effector_pos']
self.effector_velocity = this_state['effector_velocity']
self.logical_pos = this_state['logical_pos']
self.current_time = this_state['current_time']
self.current_phys_time = this_state['current_phys_time']
self.step_count = this_state['step_count']
self.current_image_time = this_state['current_image_time']
self.image_count = this_state['image_count']
def saveImage(self, draw=False):
if(self.drawn_image is None and self.image_output_dir):
self.drawn_image = PIL.Image.new("RGBA", self.image_size)
while(self.current_phys_time > self.current_image_time and self.image_output_dir):
effector_draw_x = (self.effector_pos[0] * self.image_scale) + self.image_center_offset[0]
effector_draw_y = (self.effector_pos[1] * self.image_scale) + self.image_center_offset[1]
logical_draw_x = (self.logical_pos[0] * self.image_scale) + self.image_center_offset[0]
logical_draw_y = (self.logical_pos[1] * self.image_scale) + self.image_center_offset[1]
if(draw):
drawer = PIL.ImageDraw.Draw(self.drawn_image)
drawer.point((effector_draw_x, effector_draw_y), fill=(255, 255, 255, 255))
del drawer
img = self.drawn_image.copy()
drawer = PIL.ImageDraw.Draw(img)
drawer.line([effector_draw_x, effector_draw_y, effector_draw_x, effector_draw_y], fill=(255,0,0,255))
drawer.line([logical_draw_x, logical_draw_y, logical_draw_x, logical_draw_y], fill=(0,0,255,255))
drawer.line([effector_draw_x, effector_draw_y, logical_draw_x, logical_draw_y], fill=(0,255,0,255))
drawer.text((0,0),"%.5f"%(self.current_image_time), fill=(255,255,255,255))
del drawer
slash = '' if self.image_output_dir.endswith('/') else '/'
file_name = self.image_output_dir + slash + "%05d.png"%(self.image_count)
print("Saving image "+str(file_name))
img.save(file_name, "PNG")
self.current_image_time += self.image_time_step
self.image_count += 1
def stepPhysics(self, draw=False):
while(self.current_phys_time < self.current_time):
offset_x = self.logical_pos[0] - self.effector_pos[0]
offset_y = self.logical_pos[1] - self.effector_pos[1]
#This is actually two separate system as the actuators for the plotter is an x-y system.
force_x = (offset_x * self.k_x) - (self.c_x * self.effector_velocity[0])
force_y = (offset_y * self.k_y) - (self.c_y * self.effector_velocity[1])
acceleration_x = force_x / self.x_mass # Don't include time as it's not a motion formula
acceleration_y = force_y / self.y_mass # Don't include time as it's not a motion formula
velocity_x = self.effector_velocity[0] + (acceleration_x * self.phys_time) #Include time as it's a motion formula
velocity_y = self.effector_velocity[1] + (acceleration_y * self.phys_time) #Include time as it's a motion formula
movement_x = self.effector_pos[0] + (velocity_x * self.phys_time)
movement_y = self.effector_pos[1] + (velocity_y * self.phys_time)
self.effector_velocity = (velocity_x, velocity_y)
self.effector_pos = (movement_x, movement_y)
self.saveImage(draw)
if(self.save_stats):
self.stats['max_force_x_abs'] = max(abs(force_x), self.stats['max_force_x_abs'])
self.stats['max_force_y_abs'] = max(abs(force_y), self.stats['max_force_y_abs'])
self.stats['max_force_x'] = max(force_x, self.stats['max_force_x'])
self.stats['max_force_y'] = max(force_y, self.stats['max_force_y'])
self.stats['min_force_x'] = min(force_x, self.stats['min_force_x'])
self.stats['min_force_y'] = min(force_y, self.stats['min_force_y'])
self.current_phys_time += self.phys_time
def step(self, step):
x_diff = 0
y_diff = 0
if(step.x_step == plotter_controller.StepDirection.FORWARD):
x_diff += 1
elif(step.x_step == plotter_controller.StepDirection.BACKWARDS):
x_diff -= 1
if(step.y_step == plotter_controller.StepDirection.FORWARD):
y_diff += 1
elif(step.y_step == plotter_controller.StepDirection.BACKWARDS):
y_diff -= 1
new_pos_x = self.logical_pos[0] + (x_diff * self.step_size)
new_pos_y = self.logical_pos[1] + (y_diff * self.step_size)
self.logical_pos = (new_pos_x, new_pos_y)
self.current_time += self.step_time
self.stepPhysics(draw=step.draw_value > 0)
self.step_count += 1
class PlotterSimulatorController(plotter_controller.PlotterController):
def __init__(self, step_data, step_time=0.001, buffer_size=1024, image_output_dir=None):
super(PlotterSimulatorController, self).__init__(step_data, step_time)
self.effector_pos = (0,0)
self.stepper_thread = None
self.data_thread = None
self.simulator = PlotterSimulator(step_time, step_time / 10, image_output_dir=image_output_dir)
self.buffer_size = buffer_size
self.load_buffer = []
self.consume_buffer = []
self.buffers = []
self.has_data = True
def wait(self):
self.stepper_thread.join()
self.data_thread.join()
#Buffer size should be large enough to handle latencies in the system.
def stepThreadFunc(self):
while(self.has_data):
#wait for data
#print(self.has_data, self.consume_buffer, self.load_buffer)
while(self.has_data and len(self.consume_buffer) <= 0):
time.sleep(0)
#print(self.consume_buffer, self.load_buffer)
start_time = time.time()
step_index = 0
while(len(self.consume_buffer) > 0):
step = self.consume_buffer.pop(0)
self.simulator.step(step)
step_index += 1
current_time = time.time()
next_step_time = start_time + ((step_index)*self.step_time)
sleep_time = max(next_step_time - current_time, 0)
time.sleep(sleep_time)
def dataThreadFunc(self):
while(self.step_data.hasData()):
step = self.step_data.getStep()
self.load_buffer.append(step)
if(len(self.load_buffer) >= self.buffer_size or not self.step_data.hasData()):
#Wait for consume buffer to empty
while(len(self.consume_buffer) > 0):
time.sleep(0)
#And now swap the buffers
temp_buffer = self.load_buffer
self.load_buffer = self.consume_buffer
self.consume_buffer = temp_buffer
time.sleep(0)
self.has_data = False
def start(self):
self.stepper_thread = threading.Thread(target=self.stepThreadFunc)
self.data_thread = threading.Thread(target=self.dataThreadFunc)
self.stepper_thread.start()
self.data_thread.start()
class RandomDataProvider(plotter_controller.StepDataProvider):
def __init__(self, number_of_data=1):
self.data_left = number_of_data
def getStep(self):
if(self.data_left <= 0):
raise Exception("Program crashed as the data provider is out of data")
else:
x_step = random.choice(list(plotter_controller.StepDirection))
y_step = random.choice(list(plotter_controller.StepDirection))
self.data_left -= 1
return plotter_controller.PlotterStep(x_step, y_step, draw_value=1)
def hasData(self):
return self.data_left > 0
import argparse
def main():
parser = argparse.ArgumentParser(description='Run a plotter simulator to show trajectory of a stepfile')
parser.add_argument('--stepfile', type=str, help="The serial port to use.")
parser.add_argument('--image-output-dir', type=str, help="A list of files to send to the plotter")
args = parser.parse_args()
data_provider = step_file_data_provider.StepFileDataProvider(args.stepfile) if args.stepfile else RandomDataProvider(number_of_data=10000)
image_output_dir = args.image_output_dir
controller = PlotterSimulatorController(data_provider, image_output_dir=image_output_dir)
controller.start()
controller.wait()
if __name__ == "__main__":
main()
|
import time
import pickle
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from snake_env import SnakeEnv
from dqn_model import DQNAgent
# Code from https://gist.github.com/Jawdeyu/1d633c35238d13484deb2969ff40005d#file-dqn_run-py
def run(training_mode, pretrained, num_episodes=1000, exploration_max=1):
print("Running SnakeEnv training...")
time.sleep(3)
env = SnakeEnv(img_width=10, img_height=9)
observation_space = env.observation_space.shape
action_space = env.action_space.n
agent = DQNAgent(
state_space=observation_space,
action_space=action_space,
max_memory_size=30000,
batch_size=32,
gamma=0.90,
lr=0.00025,
dropout=0.2,
exploration_max=exploration_max,
exploration_min=0.02,
exploration_decay=0.99,
pretrained=pretrained,
)
# Restart the enviroment for each episode
num_episodes = num_episodes
env.reset()
total_rewards = []
if training_mode and pretrained:
with open("total_rewards.pkl", "rb") as f:
total_rewards = pickle.load(f)
for ep_num in tqdm(range(num_episodes)):
state = torch.Tensor([env.reset()])
done = False
total_reward = 0
while not done:
# TODO add flag in function for rendering
# env.render()
action = agent.act(state)
state_next, reward, done, info = env.step(int(action[0]))
total_reward += reward
state_next = torch.Tensor([state_next])
reward = torch.tensor([reward]).unsqueeze(0)
if training_mode:
# if using done as a bool causes issues, switch to int
agent.remember(state, action, reward, state_next, done)
# TODO probably do experience_replay after game is over instead of after every action
# This is bc game does not pause between steps
agent.experience_replay()
state = state_next
total_rewards.append(total_reward)
if ep_num != 0 and ep_num % 100 == 0:
print(
"Episode {} score = {}, average score = {}".format(
ep_num + 1, total_rewards[-1], np.mean(total_rewards)
)
)
num_episodes += 1
# Save the trained memory so that we can continue from where we stop using 'pretrained' = True
if training_mode:
with open("ending_position.pkl", "wb") as f:
pickle.dump(agent.ending_position, f)
with open("num_in_queue.pkl", "wb") as f:
pickle.dump(agent.num_in_queue, f)
with open("total_rewards.pkl", "wb") as f:
pickle.dump(total_rewards, f)
torch.save(agent.dqn.state_dict(), "DQN.pt")
torch.save(agent.STATE_PREV_MEM, "STATE_PREV_MEM.pt")
torch.save(agent.ACTION_MEM, "ACTION_MEM.pt")
torch.save(agent.REWARD_MEM, "REWARD_MEM.pt")
torch.save(agent.STATE_NEXT_MEM, "STATE_NEXT_MEM.pt")
torch.save(agent.DONE_MEM, "DONE_MEM.pt")
env.close()
if __name__ == "__main__":
run(num_episodes=1000, training_mode=True, pretrained=False)
|
import datetime
import random
import discord
from discord.ext import commands, tasks
from discord_components import DiscordComponents
from discord_slash import SlashCommand
import functions
import json
token = your token
i = discord.Intents().all()
bot = commands.Bot(command_prefix='..', intents=i)
slash = SlashCommand(bot, sync_commands=True)
guild_ids_list = []
invites = {}
bot.remove_command('help')
initial_extensions = [
'cogs.help',
]
bot.load_extension('cogs.help')
bot.load_extension("cogs.slashcog")
bot.load_extension("cogs.moderation")
bot.load_extension('cogs.fun')
bot.load_extension('cogs.filters')
bot.load_extension('cogs.messagefilters')
@bot.command()
async def add(ctx, a: int, b: int):
"""slove 1 argument + 2 argument"""
await ctx.send(a + b)
@bot.command()
@commands.has_permissions(administrator=True)
async def createjoins(ctx, *, category):
guild = ctx.message.guild
cat = discord.utils.get(guild.categories, name=category)
ch = await guild.create_voice_channel('Join to create', category=cat)
functions.updatesql(server=ctx.guild.id, joinvoice=ch.id, voicecat=cat.id)
await ctx.send(embed=discord.Embed(title='Sucsessful!'))
@bot.event
async def on_voice_state_update(member, before, after):
if after:
if after.channel:
try:
data, cat = functions.getsqldata(server=member.guild.id, joinvoice=True)
if after.channel.id == data:
guild = bot.get_guild(member.guild.id)
cat = discord.utils.get(guild.categories, id=cat)
ch = await guild.create_voice_channel(f'{member.name}\'s cahnnel', category=cat)
await ch.set_permissions(member, manage_channels=True, manage_permissions=True)
await member.move_to(ch)
with open('channelstodelete.txt', 'a') as f:
f.write(' ' + str(guild.id) + ' ' + str(ch.id) + '\n')
except:
pass
if before:
if before.channel:
f = open('channelstodelete.txt')
x = f.read().split()
if str(before.channel.id) in x:
if not before.channel.members:
await before.channel.delete()
@bot.event
async def on_message(message):
await bot.process_commands(message)
guild = message.guild
if functions.checker(guild=guild.id):
if functions.getsqldata(server=guild.id, links=True):
s = message.content
if 'www' in s or 'http' in s:
try:
if functions.checklink(s):
await message.delete()
print('deleted!')
except Exception as e:
print(e)
if guild == None:
pass
elif guild.id == 679870413140262944:
try:
lang = functions.checklang(message.content)
if lang == 'ru' or lang == 'mk' or lang == 'bg' or lang == 'uk':
role = discord.utils.get(guild.roles, name="Russian")
user = guild.get_member(message.author.id)
for i in user.roles:
if i.id == 681944762076626985 or i.id == 682169503429296149:
return
await user.add_roles(role)
else:
role = discord.utils.get(guild.roles, name="Foreigner")
user = guild.get_member(message.author.id)
for i in user.roles:
if i.id == 682169503429296149 or i.id == 681944762076626985:
return
await user.add_roles(role)
except:
pass
@bot.event
async def on_member_join(member):
datac = functions.getsqldata(server=member.guild.id, joinchannel=True)
channel = discord.utils.get(member.guild.channels, id=datac)
if functions.checker(guild=member.guild.id):
invites_before_join = invites[member.guild.id]
invites_after_join = await member.guild.invites()
for i in invites_before_join:
for j in invites_after_join:
if i.uses == j.uses - 1 and i.code == j.code:
invite = j
break
invites_after_join = await member.guild.invites()
date_format = "%a, %d %b %Y %I:%M %p"
embed = discord.Embed(color=0xdfa3ff, description=member.mention)
embed.set_author(name=str(member), icon_url=member.avatar_url)
embed.set_thumbnail(url=member.avatar_url)
members = sorted(member.guild.members, key=lambda m: m.joined_at)
embed.add_field(name="Join position", value=str(members.index(member) + 1))
embed.add_field(name="Registered", value=member.created_at.strftime(date_format))
embed.add_field(name="Invite Code", value=invite.code)
embed.add_field(name="Inviter", value=invite.inviter)
if len(member.roles) > 1:
role_string = ' '.join([r.mention for r in member.roles][1:])
embed.add_field(name="Roles [{}]".format(len(member.roles) - 1), value=role_string, inline=False)
perm_string = ', '.join([str(p[0]).replace("_", " ").title() for p in member.guild_permissions if p[1]])
embed.add_field(name="Guild permissions", value=perm_string, inline=False)
embed.set_footer(text='ID: ' + str(member.id))
await channel.send(embed=embed)
invites[member.guild.id] = invites_after_join
if member.guild.id == 679870413140262944:
if int(str(datetime.datetime.now() - member.created_at).split()[0]) <= 20:
role = discord.utils.get(member.guild.roles, name="rip")
await member.add_roles(role)
return
data = functions.getsqldata(server=member.guild.id, joinrole=True)
try:
role = discord.utils.get(member.guild.roles, id=data)
await member.add_roles(role)
except:
pass
#logs events
@bot.event
async def on_message_delete(message):
guild = message.guild
if functions.checker(guild=guild.id):
ch = functions.getsqldata(server=guild.id, logs=True)
if ch:
date_format = "%a, %d %b %Y %I:%M %p"
a = str("""```diff\n-{}{}```""".format(message.content, ' ' * (100 - len(message.content))))
channel = discord.utils.get(guild.channels, id=ch)
embed = discord.Embed(title='Message was deleted!',
description='**Content:** \n' + a, color=0xfb0350)
embed.set_author(name=str(message.author), icon_url=message.author.avatar_url)
embed.add_field(name='Channel:', value=f'{message.channel}\n<#{message.channel.id}>')
embed.set_footer(text=str(datetime.datetime.now().__format__(date_format)))
await channel.send(embed=embed)
@bot.event
async def on_message_edit(before, after):
guild = after.guild
if functions.checker(guild=guild.id):
ch = functions.getsqldata(server=guild.id, logs=True)
if ch:
date_format = "%a, %d %b %Y %I:%M %p"
b = str("""```fix\n{}{}```""".format(before.content, ' ' * (100 - len(after.content))))
a = str("""```yaml\n{}{}```""".format(after.content, ' ' * (100 - len(after.content))))
channel = discord.utils.get(guild.channels, id=ch)
embed = discord.Embed(description=f'[Message](https://discord.com/channels/{guild.id}/{after.channel.id}/{after.id}) was edited!',
color=0x5ce8c2)
embed.add_field(name='Content before:',
value=b, inline=False)
embed.add_field(name='Content after:',
value=a, inline=False)
embed.set_author(name=str(after.author), icon_url=after.author.avatar_url)
embed.add_field(name='Channel:', value=f'{after.channel}\n<#{after.channel.id}>')
embed.set_footer(text=str(datetime.datetime.now().__format__(date_format)))
await channel.send(embed=embed)
@bot.event
async def on_member_ban(guild, member):
if functions.checker(guild=guild.id):
ch = functions.getsqldata(server=guild.id, logs=True)
if ch:
date_format = "%a, %d %b %Y %I:%M %p"
channel = discord.utils.get(guild.channels, id=ch)
logs = await guild.audit_logs(limit=1, action=discord.AuditLogAction.ban).flatten()
logs = logs[0]
if logs.target == member:
embed = discord.Embed(title=f'{member} was banned!', color=0xbf1562)
embed.add_field(name='Moderator:', value=logs.user.mention)
embed.add_field(name='Reason:', value=logs.reason)
embed.set_footer(text=str(logs.created_at.__format__(date_format)))
await channel.send(embed=embed)
@bot.event
async def on_member_kick(member):
guild = member.guild
if functions.checker(guild=guild.id):
ch = functions.getsqldata(server=guild.id, logs=True)
if ch:
date_format = "%a, %d %b %Y %I:%M %p"
channel = discord.utils.get(guild.channels, id=ch)
logs = await guild.audit_logs(limit=1, action=discord.AuditLogAction.kick).flatten()
logs = logs[0]
if logs.target == member:
embed = discord.Embed(title=f'{member} was kicked!', color=0x17838a)
embed.add_field(name='Moderator:', value=logs.user)
embed.add_field(name='Reason:', value=logs.reason)
embed.set_footer(text=str(logs.created_at.__format__(date_format)))
await channel.send(embed=embed)
@bot.event
async def on_ready():
DiscordComponents(bot)
for guild in bot.guilds:
try:
guild_ids_list.append(guild.id)
invites[guild.id] = await guild.invites()
except:
pass
await bot.change_presence(activity=discord.Game('..help for help'))
bot.run(token)
|
"""Classes for handling telescope and eyepiece properties."""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'xtick.direction':'in'})
matplotlib.rcParams.update({'ytick.direction':'in'})
matplotlib.rcParams.update({'xtick.minor.visible':True})
matplotlib.rcParams.update({'ytick.minor.visible':True})
matplotlib.rcParams.update({'xtick.top':True})
matplotlib.rcParams.update({'ytick.right':True})
matplotlib.rcParams.update({'legend.frameon':False})
matplotlib.rcParams.update({'lines.dashed_pattern':[8,3]})
matplotlib.rcParams.update({"figure.figsize": [12,6]})
from TCalc.functions import focal_ratio, dawes_lim, resolving_power
from TCalc.functions import Min_magnification, Max_magnification, Min_eyepiece, Max_eyepiece
from TCalc.functions import Lmag_limit
from TCalc.functions import magnification, true_fov, exit_pupil, surface_brightness
from TCalc.age_eye import age_to_eye_diameter, eye_to_age
blue = 400
green = 550
red = 700
wavelengths_list = np.linspace(350,800,46)
age_list = np.array([10,20,30,35,45,60,70])
eye_diameter_list = np.array([age_to_eye_diameter(age) for age in age_list])
class eyepiece:
"""Class representing a single eyepiece
Args:
f_e: focal length of the eyepiece (mm)
fov_e: field of view of the eyepiece (deg). Defaults to 50 degrees.
"""
def __init__(self, f_e, fov_e=50):
if f_e <= 0:
raise ValueError("f_e must be larger than 0")
if fov_e <= 0:
raise ValueError("fov must be larger than 0")
self.f_e = f_e
self.fov_e = fov_e
class focal_reducer:
"""Class representing a single focal reducer
Args:
P_reducer (float between 0 and 1): the power of the focal reducer
"""
def __init__(self, P_reducer):
if P_reducer <= 0 or P_reducer > 1:
raise ValueError("P_reducer must be between 0 and 1")
self.P = P_reducer
self.optic_type = 'focal reducer'
class barlow_lens:
"""Class representing a single Barlow lense
Args:
barlow (float greater than 1): the Barlow factor, default is 2
"""
def __init__(self, barlow=2):
if barlow < 1:
raise ValueError("barlow must be at least 1")
self.P = barlow
self.optic_type = 'Barlow lens'
class telescope:
"""Class representing a telescope
Args:
D_o: the size of the telescope opening (mm)
f_o: focal length of the telescope (mm)
user_D_eye: diameter of telescope user's eye in mm. Default is 7 mm.
user_age: age of the telescope user. Will be used to compute user_D_eye if none is specified.
"""
def __init__(self, D_o, f_o, user_D_eye=None, user_age=None):
# Check that inputs make sense then save them as class atributes
if D_o <= 0:
raise ValueError("aperature must be larger than 0")
if f_o <= 0:
raise ValueError("f_o must be larger than 0")
self.D_o = D_o
self.f_o = f_o
self.f_o_true = f_o
# Some stuff about the user
if user_D_eye is None:
if user_age is None:
print("No user_age or user_D_eye specified, using defaults (25 year old eye)")
self.user_age = 25
self.user_D_eye = age_to_eye_diameter(self.user_age)
else:
if user_age <= 0:
raise ValueError("user_age must be larger than 0")
self.user_age = user_age
self.user_D_eye = age_to_eye_diameter(self.user_age)
else:
if user_D_eye <= 0:
raise ValueError("user_eye_aperature must be larger than 0")
self.user_D_eye = user_D_eye
if user_age is not None:
print("Specified user_age and user_eye_aperature. The user_eye_aperature will be used for calculations.")
self.user_age = user_age
# Compute basic properties derived from telescope information alone
self._compute_focal_ratio()
self._compute_dawes_limit()
self._compute_resolving_power()
self._compute_min_mag()
self._compute_max_mag()
self._compute_min_eye()
self._compute_max_eye()
self._compute_magnitude_limit()
# Initialize eyepiece information
self.eyepieces = {}
self.current_eyepiece_id = None
self.current_eyepiece = None
# Set properties that depend on eyepiece selection to NaNs
self.M = np.nan
self.compatible_eyepiece = False
self.fov = np.nan
self.D_EP = np.nan
self.SB = np.nan
# Initialize optic information
self.optics = {}
self.current_optic_id = None
self.current_optic = None
def list_eyepiece(self):
"""List the eyepieces and other optics availabe to the telescope
Args:
None
Returns:
Prints out a list of eyepiece objects and the
current eyepiece being used.
"""
print("\n Currently included eyepieces:")
print(" Name Focal Length FOV")
print(" -------------- -------------- --------------")
names = self.eyepieces.keys()
for name in names:
print(" {: <14} {: <14} {: <14} ".format("\'"+name+"\'", str(self.eyepieces[name].f_e)+" mm", str(self.eyepieces[name].fov_e)+" degrees"))
if self.current_eyepiece is None:
print("\n No eyepiece is selected\n")
else:
print("\n The currently selected eyepiece is '{}'\n".format(self.current_eyepiece_id))
print("\n Additional optical parts available:")
print(" Name Type Power")
print(" -------------- -------------- --------------")
names = self.optics.keys()
for name in names:
print(" {: <14} {: <14} {: <14}".format("\'"+name+"\'", self.optics[name].optic_type, self.optics[name].P))
if self.current_optic is None:
print("\n No optical part is selected\n")
else:
print("\n The currently selected optical part is '{}'\n".format(self.current_optic_id))
def select_eyepiece(self,id=None):
"""Set the current eyepiece
Args:
id: The id of the eyepiece to include. Default is None, which selects no eyepiece
Returns:
None
"""
# If the ID is None, we'll get rid of the eyepiece
if id is None:
self.current_eyepiece = None
self.current_eyepiece_id = None
# Reset eyepiece dependent quantities to NaN
self.M = np.nan
self.compatible_eyepiece = False
self.fov = np.nan
self.D_EP = np.nan
self.SB = np.nan
return
# Check that id is a valid input
if ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
# Check that id is in the eyepieces available
if id not in self.eyepieces.keys():
raise ValueError("id does not correspond to an eyepiece. Try self.list_eyepiece.")
# Update eyepiece selection
self.current_eyepiece_id = id
self.current_eyepiece = self.eyepieces[id]
# Update quantities dependent on eyepiece
self._compute_magnification()
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification produced by this eyepiece is not compatible with the telescope.")
self._compute_true_fov()
self._compute_exit_pupil()
self._compute_surface_brightness_sensitivity()
def select_optic(self,id=None):
"""Set the current optical part
Args:
id: The id of the optical part to include. Default is None, which selects no optical part
Returns:
None
"""
# If the ID is None, we'll get rid of the eyepiece
if id is None:
self.current_optic = None
self.current_optic_id = None
# Update f_o
self.f_o = self.f_o_true
# Check that id is a valid input
else:
if ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
# Check that id is in the optics available
if id not in self.optics.keys():
raise ValueError("id does not correspond to an optical part. Try self.list_eyepiece.")
# Update optic selection
self.current_optic_id = id
self.current_optic = self.optics[id]
# Update f_o
self.f_o = self.f_o_true * self.current_optic.P
# Update other quantities
self._compute_focal_ratio()
self._compute_min_eye()
self._compute_max_eye()
if self.current_eyepiece is not None:
self._compute_magnification()
self._compute_true_fov()
self._compute_exit_pupil()
self._compute_surface_brightness_sensitivity()
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification produced by this eyepiece is not compatible with the telescope.")
def add_eyepiece(self, piece, id=None, select=True):
"""Attach an eyepiece to the telescope class
The telescope class can have multiple eyepieces associated
with it, this method allows you to add a single eyepiece
object to the list.
Args:
piece (eyepiece class instance): the eyepiece object to add
id (string): the name to give the eyepiece - it will be identified by this name
when selecting and analyzing eyepiece configurations. If unspecified, it will be set to a number.
select (bool): if True (default) the added eyepiece will be selected by calling the select_eyepiece method.
Returns:
None
"""
# If no name is given for eyepiece, just give it the index number as a name
if id is None:
id = str(len(self.eyepieces))
# Check that inputs are formatted correctly
elif ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
if not isinstance(piece,eyepiece):
raise ValueError("piece must be an instance of eyepiece class")
# Add eyepiece to list
self.eyepieces[id] = piece
# If select==True, we'll make the new eyepiece the current eyepiece
if select:
self.select_eyepiece(id)
def add_optic(self, optic, id=None, select=True):
"""Attach an optical part to the telescope class
The telescope class can have multiple optical parts (focal reducers and Barlow lenses)
associated with it, this method allows you to add a single part to the list.
Args:
optic (focal_reducer or barlow_lens class instance): the optical part object to add
id (string): the name to give the part - it will be identified by this name
when selecting and analyzing optical configurations. If unspecified, it will be set to a number.
select (bool): if True (default) the added optical part will be selected by calling the select_eyepiece method.
Returns:
None
"""
# If no name is given for optic, just give it the index number as a name
if id is None:
id = str(len(self.optics))
# Check that inputs are formatted correctly
elif ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
if not isinstance(optic,barlow_lens):
if not isinstance(optic,focal_reducer):
raise ValueError("optic must be an instance of barlow_lens or focal_reducer class")
# Add eyepiece to list
self.optics[id] = optic
# If select==True, we'll make the new eyepiece the current eyepiece
if select:
self.select_optic(id)
def change_user_age(self,new_age):
"""Update the age of the user and the corresponding eye size
Args:
new_age (float > 0): the age of the user
Returns:
None
"""
# Some stuff about the user
if new_age <= 0:
raise ValueError("user_age must be larger than 0")
self.user_age = new_age
self.user_D_eye = age_to_eye_diameter(self.user_age)
# Update limits
self._compute_min_mag()
self._compute_max_eye()
# Update quantities dependent on eyepiece
if self.current_eyepiece is not None:
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification of the current eyepiece is not compatible.")
def say_configuration(self):
"""List properties of the telescope eyepiece pair
Args:
None
Returns:
Writes out the properties of the telescope
"""
print("\n The telescope has the following layout:")
print(" Aperture diameter: {} mm".format(self.D_o))
print(" Focal length: {} mm, corresponding to a focal ratio of {}".format(self.f_o_true,self.f_R_true))
if self.current_optic is not None:
if self.current_optic.optic_type == 'Barlow lens':
action = 'increases'
else:
action = 'decreases'
print(" '{}', a {}, has been added to the optical path. This {} the focal length by {}".format(self.current_optic_id,self.current_optic.optic_type,action,self.current_optic.P))
print(" This results in")
print(" Focal length: {} mm, corresponding to a focal ratio of {}".format(self.f_o,self.f_R))
print("")
print(" In good atmospheric conditions, the resolution of the telescope (Dawes limit) is {:.1f} arcseconds".format(self.Dawes_lim))
print(" By wavelength, the resolution is")
print(" {} nm (blue): {:.1f} arcsec".format(blue,self.blue_P_R))
print(" {} nm (green): {:.1f} arcsec".format(green,self.green_P_R))
print(" {} nm (red): {:.1f} arcsec".format(red,self.red_P_R))
print("")
age = eye_to_age(self.user_D_eye)
print(" The maximum possible magnification factor is {:.1f}".format(self.M_max))
print(" This means the minimum compatible eyepiece focal length is {:.1f} mm".format(self.f_e_min))
print("")
print(" The minimum magnification factor and corresponding maximum eyepiece focal length depend on the diameter of the observer's eye.")
print(" For a telescope user with an eye diameter of {} mm (apropriate for an age around {} years):".format(self.user_D_eye,age))
print(" The minimum magnification factor is {:.1f}".format(self.M_min))
print(" This means the maximum compatible eyepiece focal length is {:.1f} mm".format(self.M_max))
print("")
print(" The faintest star that can be seen by this telescope is {:.1f} mag".format(self.Lmag_limit))
if self.current_eyepiece is not None:
print("")
print(" The currently selected eyepiece is '{}', which has the following layout:".format(self.current_eyepiece_id))
print(" Focal length: {} mm".format(self.current_eyepiece.f_e))
print(" Field of view: {} degrees".format(self.current_eyepiece.fov_e))
print("")
if self.compatible_eyepiece:
compatible = 'is'
else:
compatible = 'IS NOT'
print(" With this eyepiece:")
print(" The magnification factor is {:.1f}. This {} compatible with the telescope limits.".format(self.M,compatible))
print(" The true field of view is {:.0f} degrees".format(self.fov))
print(" The exit pupil diameter is {:.1f} mm".format(self.D_EP))
print("")
print(" The faintest surface brightness that can be seen by this telescope is {:.2f}".format(self.SB))
print("")
def show_resolving_power(self,seeing=2.5):
"""Plots the resolution performance of the telescope for a specific seeing value
Args:
seeing (float): Seeing factor of sky. Default to 2.5
Returns:
A plot depicting variation of chromatic resolution or simply the resolution at different wavelengths
with respect to Dawes Limit and Limit due to seeing
"""
fig,ax = plt.subplots()
ax.set(xlabel='Wavelength [nm]', ylabel='Resolution [arcsec]',xlim=(380,750))
ax.title.set_text('Resolution performance of the telescope-eyepiece pair')
ax.plot(wavelengths_list,self.P_R,label='Chromatic Resolution')
ax.axhline(self.Dawes_lim,color='C0',ls='--',label='Dawes limit')
ax.axhline(seeing,color='.5',ls='--',label='Limit due to seeing')
ax.legend()
plt.show()
def show_magnification_limits(self):
"""Plots the magnification limits for a telescope-eyepiece pair according to user's age
Args:
None
Returns:
Plot of maximum achievable magnification as a function of pupil's diameter
which varies according to user's age. Also, plots the magnification strength's
of the current selected eyepice.
"""
fig,ax = plt.subplots()
ax.set(xlabel='Eye Diameter [mm]', ylabel='Magnification Factor',xlim=(5,7.5),yscale='log')
ax.title.set_text('Magnification Limits of the telescope-eyepiece pair')
ax.plot(eye_diameter_list,self.M_min_by_age,ls='--',label='Minimum')
ax.axhline(self.M_max,color='C0',label='Maximum')
ax.axhline(self.M,color='k',label='Current Eyepiece')
ax.legend()
plt.show()
def show_eyepiece_limits(self):
"""Plots the eyepiece limits for a telescope-eyepiece pair according to user's age and pupil diameter
Args:
None
Returns:
Plot of minimum achievable magnification as a function of pupil's diameter
which varies according to user's age. Also, plots the power of the current selected eyepice.
"""
fig,ax = plt.subplots()
ax.set(xlabel='Eye Diameter [mm]', ylabel='Eyepiece Focal Length [mm]',xlim=(5,7.5))
ax.title.set_text('Eyepiece Limits of the telescope-eyepiece pair')
ax.plot(eye_diameter_list,self.f_e_max_by_age,ls='--',label='Maximum')
ax.axhline(self.f_e_min,color='C0',label='Minimum')
ax.axhline(self.current_eyepiece.f_e,color='k',label='Current Eyepiece')
ax.legend()
plt.show()
# The rest of these are internal wrappers for running calculations in
# functions.py. They get called by the automatically when something
# about the telescope/eyepiece changes
def _compute_focal_ratio(self):
"""Compute the focal ratio of the telescope
Args:
None
Returns:
Updates the state of self.f_R
"""
self.f_R = focal_ratio(self.f_o,self.D_o)
self.f_R_true = focal_ratio(self.f_o_true,self.D_o)
def _compute_dawes_limit(self):
"""Compute the Dawes limit of the telescope
Args:
None
Returns:
Updates the state of self.Dawes_lim
"""
self.Dawes_lim = dawes_lim(self.D_o)
def _compute_resolving_power(self):
"""Compute the resolving power of the telescope vs wavelength
Args:
None
Returns:
Updates the state of self.resolving_power, and self.[color]_resolving_power
"""
self.P_R = resolving_power(wavelengths_list,self.D_o)
self.blue_P_R = resolving_power(blue,self.D_o)
self.green_P_R = resolving_power(green,self.D_o)
self.red_P_R = resolving_power(red,self.D_o)
def _compute_min_mag(self):
"""Compute the minimum magnification of the telescope
Args:
None
Returns:
Updates the state of self.M_min and self.M_min_by_age
"""
self.M_min = Min_magnification(self.D_o,self.user_D_eye)
self.M_min_by_age = np.zeros(len(age_list))
for i in range(len(age_list)):
self.M_min_by_age[i] = Min_magnification(self.D_o,age=age_list[i])
def _compute_max_mag(self):
"""Compute the maximum magnification of the telescope
Args:
None
Returns:
Updates the state of self.M_max
"""
self.M_max = Max_magnification(self.D_o)
def _compute_min_eye(self):
"""Compute the minimum eyepiece focal length compatible with the telescope
Args:
None
Returns:
Updates the state of self.f_e_min
"""
self.f_e_min = Min_eyepiece(self.f_o,self.M_max)
def _compute_max_eye(self):
"""Compute the maximum eyepiece focal length compatible with the telescope
Args:
None
Returns:
Updates the state of self.f_e_max and self.f_e_max_by_age
"""
self.f_e_max = Max_eyepiece(self.f_R,self.user_D_eye)
self.f_e_max_by_age = np.zeros(len(age_list))
for i in range(len(age_list)):
self.f_e_max_by_age[i] = Max_eyepiece(self.f_R,age=age_list[i])
def _compute_magnitude_limit(self):
"""Compute the magnitude limit of the telescope
Args:
None
Returns:
Updates the state of self.Lmag_limit
"""
self.Lmag_limit = Lmag_limit(self.D_o)
def _compute_magnification(self):
"""Compute the magnification for the current telescope-eyepiece combo
Args:
None
Returns:
Updates the state of self.M
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.M = magnification(self.f_o,self.current_eyepiece.f_e)
def _compute_true_fov(self):
"""Compute the true field of view of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.fov
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.fov = true_fov(self.M,self.current_eyepiece.fov_e)
def _compute_exit_pupil(self):
"""Compute the exit pupil of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.D_EP
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.D_EP = exit_pupil(self.current_eyepiece.f_e,self.f_R)
def _compute_surface_brightness_sensitivity(self):
"""Compute the surface brightness limit of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.SB
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.SB = surface_brightness(self.D_EP)
|
def interp(a, b, t, d=1) -> float:
return a + (b - a) * t**d
def inv_interp(a, b, t, d=1) -> float:
return a + (b - a) * (1 - (1 - t)**d)
def lerp_angle(a, b, t) -> float:
delta = (b - a) % 360
if delta > 180:
delta -= 360
return a + delta * t
def delta_angle(a, b):
delta = (b - a) % 360
if delta > 180:
delta -= 360
return delta |
"""
Author: Will Hanstedt
Filename: TMmain.py
Project: Research for Irina Mazilu, Ph.D.
A file to run single iterations of temperature-dependent Cayley Tree simulations.
"""
import Cayley as cy
import Cayley.graphics as cg
def main():
generations = int(input("Number of generations: "))
links = int(input("Number of links: "))
network = cy.CayleyTree(generations,links)
print("\n" + "The default values for k and J are 1.")
custom = input("Do you want to customize these values? [Y/N]: ")
if(custom.upper()=='Y'):
k = float(input("Value for k: "))
J = float(input("Value for J: "))
else:
k = J = 1
monte = cy.MonteCarlo(network) # Edit later
print("\n" + "Enter Excel file name \n"
+ "Example: monteCarloData")
filename = str(input("Filename: "))
full_filename = filename + ".xlsx"
monte.randomSpins()
for i in range (generations+1):
temp = float(input("Temperature of generation %s: " %(i)))
network.addMultipleNodes(network.nodesPerGen(i),temperature=temp)
for x in range(len(network)):
monte.simulateTemp(k,J)
monte.sendExcel(full_filename)
cayley = cg.CayleyGraphics(generations, links)
cayley.drawCayley()
if __name__ == "__main__":
main()
## Needs parameters, different temp settings, etc.
## de-parameterize simulate() methods?
|
# coding: utf-8
from abc import ABCMeta, abstractmethod
class Strategy(object):
"""Strategy is an abstract base class providing an interface for
all subsequent (inherited) trading strategies.
The goal of a (derived) Strategy object is to output a list of signals,
which has the form of a time series indexed pandas DataFrame.
In this instance only a single symbol/instrument is supported."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_signals(self):
"""An implementation is required to return the DataFrame of symbols
containing the signals to go long, short or hold (1, -1 or 0)."""
raise NotImplementedError("Should implement generate_signals()!")
class Portfolio(object):
"""An abstract base class representing a portfolio of
positions (including both instruments and cash), determined
on the basis of a set of signals provided by a Strategy."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_positions(self):
"""Provides the logic to determine how the portfolio
positions are allocated on the basis of forecasting
signals and available cash."""
raise NotImplementedError("Should implement generate_positions()!")
@abstractmethod
def backtest_portfolio(self):
"""Provides the logic to generate the trading orders
and subsequent equity curve (i.e. growth of total equity),
as a sum of holdings and cash, and the bar-period returns
associated with this curve based on the 'positions' DataFrame.
Produces a portfolio object that can be examined by
other classes/functions."""
raise NotImplementedError("Should implement backtest_portfolio()!")
|
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
self.grad = None
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(y)
self.input = input
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
y = x ** 2
return y
def backward(self, gy):
x = self.input.data
gx = 2 * x * gy
return gx
A = Square()
x = Variable(np.array(2.0))
a = A(x)
print(A.backward(500))
B=Square()
B.input=Variable(np.array(2.0))
print(B.backward(1))
#print(Square().backward(1))
#AttributeError
|
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for HTMLReader
"""
import os
import shutil
import tempfile
import unittest
from pathlib import Path
from ddt import ddt, data
from forte.pipeline import Pipeline
from forte.data.span import Span
from forte.data.data_pack import DataPack
from forte.data.data_utils import maybe_download
from forte.data.readers import HTMLReader
@ddt
class HTMLReaderPipelineTest(unittest.TestCase):
def setUp(self):
self._cache_directory = Path(os.path.join(os.getcwd(), "cache_html"))
self.reader = HTMLReader(cache_directory=self._cache_directory,
append_to_cache=True)
self.pl1 = Pipeline()
self.pl1.set_reader(self.reader)
self.pl1.initialize()
self.pl2 = Pipeline()
self.pl2.set_reader(HTMLReader(from_cache=True,
cache_directory=self._cache_directory))
self.pl2.initialize()
def tearDown(self):
shutil.rmtree(self._cache_directory)
@data(
("<title>The Original Title </title>",
"The Original Title "),
("<!DOCTYPE html><html><title>Page Title</title><body><p>This is a "
"paragraph</p></body></html>",
"Page TitleThis is a paragraph"),
('''<!DOCTYPE html>
<html>
<head>
<title>Page Title</title>
</head>
<body>
<h1>This is a Heading</h1>
<p>This is a paragraph.</p>
</body>
</html>
''', '''
\n \n Page Title\n \n \n This is a Heading
This is a paragraph.\n \n \n '''),
('''<!DOCTYPE html>
<h1 id="section1" class="bar">Section 1</h1>
<p class="foo">foo bar\nbaz blah </p>
<!-- cool beans! -->
<hr/>
<br>
<p><em>The <strong>End!</strong></em></p>
<p><em>error</p></em>weird < q <abc@example.com>
''', '''
Section 1
foo bar\nbaz blah \n \n \n \n The End!
errorweird < q \n '''),
('''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html<head>
<title//
<p ltr<span id=p>Text</span</p>
</>''',
'''\n \n Text
''')
)
def test_reader(self, value):
# Also writes to cache so that we can read from cache directory
# during caching test
html_input, expected_output = value
for pack in self.pl1.process_dataset(html_input):
self.assertEqual(expected_output, pack.text)
@data(
('<title>The Original Title </title>'),
('''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html<head>
<title//
<p ltr<span id=p>Text</span</p>
</>'''),
('''<!DOCTYPE html>
<h1 id="section1" class="bar">Section 1</h1>
<p class="foo">foo bar\nbaz blah </p>
<!-- cool beans! -->
<hr/>
<br>
<p><em>The <strong>End!</strong></em></p>
<p><em>error</p></em>weird < q <abc@example.com>
''')
)
def test_reader_replace_back(self, value):
input = value
for pack in self.pl1.process_dataset(input):
original_text = pack.get_original_text()
self.assertEqual(original_text, input)
@data(
(Span(0, 3), Span(7, 10), '<title>The Original Title </title>',
"strict"),
(Span(18, 22), Span(101, 105), '''<!DOCTYPE html PUBLIC "-//W34.01//EN">
<html<head>
<title//
<p ltr<span id=p>Text</span</p>
</>''', "relaxed"),
# # cases ending to or starting from between the span
(Span(15, 30), Span(60, 95), '''<!DOCTYPE html>
<h1 id="section1" class="bar">Section 1</h1>
<p class="foo">foo bar\nbaz blah </p>
<!-- cool beans! -->
<hr/>
<br>
<p><em>The <strong>End!</strong></em></p>
<p><em>error</p></em>weird < q <abc@example.com>''',
"forward"),
# before span starts
(Span(0, 3), Span(0, 3), 'Some text<title>The Original Title </title>',
"relaxed"),
(Span(0, 3), Span(0, 3), 'Some text<title>The Original Title </title>',
"strict"),
# after span ends # There's an issue with this #TODO (assign) mansi
# returns a span of (43, 35) which is wrong.
# (Span(28, 28), Span(43, 43),
# 'Some text<title>The Original Title </title>T',
# "strict"),
# same begin and end
(Span(14, 14), Span(21, 21),
'Some text<title>The Original Title </title>',
"strict"),
(Span(14, 14), Span(21, 21),
'Some text<title>The Original Title </title>',
"relaxed"),
(Span(14, 14), Span(21, 21),
'Some text<title>The Original Title </title>',
"backward"),
(Span(14, 14), Span(21, 21),
'Some text<title>The Original Title </title>',
"forward")
)
def test_reader_original_span(self, value):
new_span, expected_orig_span, html_input, mode = value
for pack in self.pl1.process_dataset(html_input):
# Retrieve original text
original_text = pack.get_original_text()
self.assertEqual(original_text, html_input)
# Retrieve original span
original_span = pack.get_original_span(new_span, mode)
self.assertEqual(expected_orig_span, original_span)
@data(
["<title>The Original Title </title>",
"<!DOCTYPE html><html><title>Page Title</title><body><p>This is a "
"paragraph</p></body></html>"],
["<html>Test1</html>", "<html>Test12</html>", "<html>Test3</html>"]
)
def test_reader_caching(self, value):
count_orig = 0
content = []
for pack in self.pl1.process_dataset(value):
content.append(pack.text)
count_orig = count_orig + 1
num_files = len(os.listdir(self._cache_directory))
self.assertEqual(num_files, count_orig)
# Test Caching
count_cached = 0
content_cached = []
for pack in self.pl2.process_dataset(value):
content_cached.append(pack.text)
count_cached = count_cached + 1
self.assertEqual(count_cached, count_orig)
self.assertEqual(content_cached, content)
def test_reader_with_dir(self):
tmp_dir = tempfile.TemporaryDirectory()
maybe_download('https://en.wikipedia.org/wiki/Machine_learning',
tmp_dir.name, 'test_wikipedia.html')
maybe_download('https://www.yahoo.com/',
tmp_dir.name, 'test_yahoo.html')
for pack in self.pl1.process_dataset(tmp_dir.name):
self.assertIsInstance(pack, DataPack)
tmp_dir.cleanup()
def test_reader_with_filepath(self):
tmp_dir = tempfile.TemporaryDirectory()
filepath = maybe_download('https://www.yahoo.com/',
tmp_dir.name, 'test_yahoo.html')
for pack in self.pl1.process_dataset(filepath):
self.assertIsInstance(pack, DataPack)
tmp_dir.cleanup()
@data(
["<title>The Original Title </title>",
"<!DOCTYPE html><html><title>Page Title</title><body><p>This is a "
"paragraph</p></body></html>"],
["<html>Test1</html>", "<html>Test12</html>", "<html>Test3</html>"]
)
def test_reader_with_list(self, value):
count_orig = 0
for _ in self.pl1.process_dataset(value):
count_orig = count_orig + 1
self.assertEqual(count_orig, len(value))
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.2.5 on 2021-08-03 03:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0024_achievementcode_level'),
]
operations = [
migrations.AddField(
model_name='achievementcode',
name='diamonds',
field=models.PositiveSmallIntegerField(default=1, help_text='Amount of diamonds for this achievement'),
),
migrations.AddField(
model_name='psetsubmission',
name='eligible',
field=models.BooleanField(default=True, help_text='Whether to count this for leveling up'),
),
]
|
import unittest
from test.test_json import load_tests
unittest.main()
|
from flask_wtf import FlaskForm, Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, DateField, SelectField, SelectMultipleField, \
TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User,Artist, Venue, Event, ArtistToEvent
class ArtistForm(FlaskForm):
artistName = StringField('ArtistName', validators=[DataRequired()])
HomeTown = StringField('Hometown', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
submit = SubmitField('CREATE NEW ARTIST')
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class addNewVenue(FlaskForm):
venue_name = StringField('Venue Name', validators=[DataRequired()])
location = StringField('Location Name', validators=[DataRequired()])
submit = SubmitField('Add New Venue')
def validate_venueName(self, venue_name):
venue = Venue.query.filter_by(venue_name=venue_name.data).first()
if venue is not None:
raise ValidationError('Venue already exists')
class addNewEvent(FlaskForm):
event_name = StringField('Event Name', validators=[DataRequired()])
datetime = DateField('Event Date', format='%Y-%m-%d', validators=[DataRequired()])
artist = SelectMultipleField("Artists", coerce=int, choices=[])
venue_name = SelectField('Venue', coerce=int, choices=[])
submit = SubmitField('Add New Event')
def validate_datetime(self, datetime):
datetime = Event.query.filter_by(datetime=datetime.data).first()
if datetime is not None:
raise ValidationError('No eventDate name entered')
# class EditProfileForm(FlaskForm):
# username = StringField('Username', validators=[DataRequired()])
# about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
# submit = SubmitField('Submit')
#
# def __init__(self, original_username, *args, **kwargs):
# super(EditProfileForm, self).__init__(*args, **kwargs)
# self.original_username = original_username
#
# def validate_username(self, username):
# if username.data != self.original_username:
# user = User.query.filter_by(username=self.username.data).first()
# if user is not None:
# raise ValidationError('Please use a different username.')
|
"""
File: best_photoshop_award.py
----------------------------------
This file creates a photoshopped image
that is going to compete for the Best
Photoshop Award for SC001.
Please put all the images you will use in the image_contest folder
and make sure to choose the right folder when loading your images.
"""
from simpleimage import SimpleImage
THRESHOLD = 1.3
BLACK_PIXEL = 160
def main():
"""
This function conducts green screen replacement
which is able to photoshop a person onto any background
Idea: My dream is to live in LA and enjoy the culture. I love
Hollywood movies so I will absolutely visit the 'Hollywood
sign' in the future. This combined image is my imagination of
the future.
"""
img = SimpleImage('image_contest/James.jpg')
background = SimpleImage('image_contest/Holly_wood.jpg')
background.make_as_big_as(img)
# img.show()
# background.show()
combined_img = combine(img, background)
combined_img.show()
def combine(fg, bg):
"""
:param fg: SimpleImage, green screen figure image.
:param bg: SimpleImage, the background image.
:return: SimpleImage, the green screen pixels are replaced by pixels of background image
"""
for y in range(bg.height):
for x in range(bg.width):
pixel_fg = fg.get_pixel(x, y)
avg = (pixel_fg.red + pixel_fg.blue + pixel_fg.green) // 3
total = pixel_fg.red + pixel_fg.blue + pixel_fg.green
if pixel_fg.green > avg * THRESHOLD and total > BLACK_PIXEL:
pixel_bg = bg.get_pixel(x, y)
pixel_fg.red = pixel_bg.red
pixel_fg.blue = pixel_bg.blue
pixel_fg.green = pixel_bg.green
return fg
if __name__ == '__main__':
main()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import logging
from msrest.service_client import ServiceClient
from msrest import Configuration, Deserializer
from msrest.exceptions import HttpOperationError
from ..user.user_manager import UserManager
from ..base.base_manager import BaseManager
from . import models
class PoolManager(BaseManager):
""" Manage DevOps Pools
Attributes:
See BaseManager
"""
def __init__(self, base_url='https://{}.visualstudio.com', creds=None, organization_name="", project_name=""):
"""Inits PoolManager"""
super(PoolManager, self).__init__(creds, organization_name=organization_name, project_name=project_name)
base_url = base_url.format(organization_name)
self._config = Configuration(base_url=base_url)
self._client = ServiceClient(creds, self._config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._deserialize = Deserializer(client_models)
self._user_mgr = UserManager(creds=self._creds)
def list_pools(self):
"""List what pools this project has"""
project = self._get_project_by_name(self._project_name)
# Construct URL
url = "/" + project.id + "/_apis/distributedtask/queues?actionFilter=16"
#construct header parameters
header_paramters = {}
if self._user_mgr.is_msa_account():
header_paramters['X-VSS-ForceMsaPassThrough'] = 'true'
header_paramters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, headers=header_paramters)
response = self._client.send(request)
# Handle Response
deserialized = None
if response.status_code // 100 != 2:
logging.error("GET %s", request.url)
logging.error("response: %s", response.status_code)
logging.error(response.text)
raise HttpOperationError(self._deserialize, response)
else:
deserialized = self._deserialize('Pools', response)
return deserialized
def close_connection(self):
self._client.close()
|
SECRET_KEY = "TEST_SECRET_KEY"
INSTALLED_APPS = [
"drf_recaptcha",
]
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite3"}
}
DRF_RECAPTCHA_SECRET_KEY = "TEST_DRF_RECAPTCHA_SECRET_KEY"
|
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def get_special_paths(dir):
result = []
filenames = os.listdir(dir)
for filename in filenames:
match = re.search(r'__\w+__', filename)
if match:
result.append(os.path.abspath(os.path.join(dir, filename)))
return result
def copy_to(paths, dir):
if not os.path.exists(dir):
os.makedirs(dir)
for path in paths:
shutil.copy(path, dir)
def zip_to(paths, zippath):
pathsString = ' '.join(paths)
cmd = 'zip -j %s %s' % (zippath, pathsString)
print "About to execute command %s" % cmd
(status, output) = commands.getstatusoutput(cmd)
if status:
print 'Failed to execute command because %s' % (output)
sys.exit(status)
print "Zipped all the files in %s" % zippath
def main():
# The basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
fromdir = args[2]
paths = get_special_paths(fromdir)
copy_to(paths, todir)
del args[0:2]
return
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
fromdir = args[2]
paths = get_special_paths(fromdir)
zip_to(paths, tozip)
del args[0:2]
return
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
for dirName in args:
filenames = get_special_paths(dirName)
for filename in filenames:
print filename
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
|
import bottle
application = bottle.default_app()
@bottle.route('/')
def home():
return "apache and wsgi, sitting in a tree"
|
import streamlit as st
import pandas as pd
import sys
sys.path.insert(0, '../scripts')
from results_pickler import ResultPickler
def app():
# Load Saved Results Data
results = ResultPickler()
results.load_data(file_name='./data/satisfaction_results.pickle')
st.title("User Satisfaction analysis")
st.header("Engagement Score")
st.subheader("Sample Users")
st.dataframe(results.data['engagement_score'].sample(10))
st.header("Experience Score")
st.subheader("Sample Users")
st.dataframe(results.data['experience_score'].sample(10))
st.header("Top 10 Satisfied Customers")
st.subheader(
"satisfaction score = average of both engagement & experience scores")
st.dataframe(results.data['top_10_satisfied_customers'])
st.header("Trained Model")
st.subheader("Details")
data = results.data['model_desc']
for key in data:
text = f'{key}: {data[key]}'
st.text(text)
st.header("User Clustering based on Engagement and Experience Score")
st.image('./data/satisfaction_cluster.png')
st.header('Aggregate Metrics of all Users')
st.dataframe(results.data['statisfaction_agg_cluster'])
|
# -*- encoding:utf8 -*-
from collections import OrderedDict
from collections import namedtuple
from math import log
from soynlp.tokenizer import MaxScoreTokenizer
from ._dictionary import Dictionary
default_profile= OrderedDict([
('cohesion_l', 0.5),
('droprate_l', 0.5),
('log_count_l', 0.1),
('prob_l2r', 0.1),
('log_count_l2r', 0.1),
('known_LR', 1.0),
('R_is_syllable', -0.1),
('log_length', 0.5)
])
ScoreTable = namedtuple('ScoreTable', list(default_profile))
Table = namedtuple('Table', 'L R begin end length, lr_prop lr_count cohesion_l droprate_l lcount')
class LREvaluator:
def __init__(self, profile=None):
self.profile = profile if profile else default_profile
def evaluate(self, candidates, preference=None):
scores = []
for c in candidates:
score = self._evaluate(
self.make_scoretable(c.L[0],
c.L[1],
c.R[0],
c.R[1],
c.cohesion_l,
c.droprate_l,
c.lcount,
c.lr_prop,
c.lr_count,
c.length
))
if preference:
if c.L[1] and c.L[1] in preference:
score += preference.get(c.L[1], {}).get(c.L[0], 0)
if c.R[1] and c.R[1] in preference:
score += preference.get(c.R[1], {}).get(c.R[0], 0)
scores.append((c, score))
return sorted(scores, key=lambda x:-x[-1])
def make_scoretable(self, l, pos_l, r, pos_r, cohesion, droprate, lcount, lr_prop, lr_count, len_LR):
return ScoreTable(cohesion,
droprate,
log(lcount+1),
lr_prop,
log(lr_count+1),
1 if (pos_l and pos_r) else 0,
1 if len(r) == 1 else 0,
log(len_LR)
)
def _evaluate(self, scoretable):
return sum(score * self.profile.get(field, 0) for field, score in scoretable._asdict().items())
class LRMaxScoreTagger:
def __init__(self, domain_dictionary_folders=None, use_base_dictionary=True,
dictionary_word_mincount=3,
evaluator=None, sents=None, lrgraph=None,
lrgraph_lmax=12, lrgraph_rmax=8,
base_tokenizer=None, preference=None, verbose=False
):
self.dictionary = Dictionary(domain_dictionary_folders, use_base_dictionary, dictionary_word_mincount, verbose=verbose)
self.evaluator = evaluator if evaluator else LREvaluator()
self.preference = preference if preference else {}
self.lrgraph = lrgraph if lrgraph else {}
if (not self.lrgraph) and (sents):
self.lrgraph = _build_lrgraph(sents, lrgraph_lmax, lrgraph_rmax)
self.lrgraph_norm, self.lcount, self.cohesion_l, self.droprate_l\
= self._initialize_scores(self.lrgraph)
self.base_tokenizer = base_tokenizer if base_tokenizer else lambda x:x.split()
if not base_tokenizer:
try:
self.base_tokenizer = MaxScoreTokenizer(scores=self.cohesion_l)
except Exception as e:
print('MaxScoreTokenizer(cohesion) exception: {}'.format(e))
def _build_lrgraph(self, sents, lmax=12, rmax=8):
from collections import Counter
from collections import defaultdict
eojeols = Counter((eojeol for sent in sents for eojeol in sent.split() if eojeol))
lrgraph = defaultdict(lambda: defaultdict(int))
for eojeol, count in eojeols.items():
n = len(eojeol)
for i in range(1, min(n, lmax)+1):
(l, r) = (eojeol[:i], eojeol[i:])
if len(r) > rmax:
continue
lrgraph[l][r] += count
return lrgraph
def _initialize_scores(self, lrgraph):
def to_counter(dd):
return {k:sum(d.values()) for k,d in dd.items()}
def to_normalized_graph(dd):
normed = {}
for k,d in dd.items():
sum_ = sum(d.values())
normed[k] = {k1:c/sum_ for k1,c in d.items()}
return normed
lrgraph_norm = to_normalized_graph(lrgraph)
lcount = to_counter(lrgraph)
cohesion_l = {w:pow(c/lcount[w[0]], 1/(len(w)-1)) for w, c in lcount.items() if len(w) > 1}
droprate_l = {w:c/lcount[w[:-1]] for w, c in lcount.items() if len(w) > 1 and w[:-1] in lcount}
return lrgraph_norm, lcount, cohesion_l, droprate_l
def pos(self, sent, flatten=True, debug=False):
sent_ = [self._pos(eojeol, debug) for eojeol in sent.split() if eojeol]
if flatten:
sent_ = [word for words in sent_ for word in words]
return sent_
def _pos(self, eojeol, debug=False):
candidates = self._initialize(eojeol)
scores = self._scoring(candidates)
best = self._find_best(scores)
if best:
post = self._postprocessing(eojeol, best)
else:
post = self._base_tokenizing_subword(eojeol, 0)
if not debug:
post = [w for lr in post for w in lr[:2] if w[0]]
return post
def _initialize(self, t):
candidates = self._initialize_L(t)
candidates = self._initialize_LR(t, candidates)
return candidates
def _initialize_L(self, t):
n = len(t)
candidates = []
for b in range(n):
for e in range(b+2, min(n, b+self.dictionary._lmax)+1):
l = t[b:e]
l_pos = self.dictionary.pos_L(l)
if not l_pos:
continue
candidates.append([l, # 0
l_pos, # 1
b, # 2
e, # 3
e-b, # 4
])
candidates = self._remove_l_subsets(candidates)
return sorted(candidates, key=lambda x:x[2])
def _remove_l_subsets(self, candidates):
candidates_ = []
for pos in ['Noun', 'Verb', 'Adjective', 'Adverb', 'Exclamation']:
# Sort by len_L
sorted_ = sorted(filter(lambda x:x[1] == pos, candidates), key=lambda x:-x[4])
while sorted_:
candidates_.append(sorted_.pop(0))
(b, e) = (candidates_[-1][2], candidates_[-1][3])
# removals = [i for i, c in enumerate(sorted_) if b < c[3] and e > c[2]] # Overlap
removals = [i for i, c in enumerate(sorted_) if b <= c[2] and e >= c[3]] # Subset (Contain)
for idx in reversed(removals):
del sorted_[idx]
return candidates_
def _initialize_LR(self, t, candidates, threshold_prop=0.001, threshold_count=2):
n = len(t)
expanded = []
for (l, pos, b, e, len_l) in candidates:
for len_r in range(min(self.dictionary._rmax, n-e)+1):
r = t[e:e+len_r]
lr_prop = self.lrgraph_norm.get(l, {}).get(r, 0)
lr_count = self.lrgraph.get(l, {}).get(r, 0)
if (r) and ((lr_prop <= threshold_prop) or (lr_count <= threshold_count)):
continue
expanded.append([(l, pos),
(r, None if not r else self.dictionary.pos_R(r)),
b,
e,
e + len_r,
len_r,
len_l + len_r,
lr_prop,
lr_count
])
expanded = self._remove_r_subsets(expanded)
return sorted(expanded, key=lambda x:x[2])
def _remove_r_subsets(self, expanded):
expanded_ = []
for pos in ['Josa', 'Verb', 'Adjective', None]:
# Sory by len_R
sorted_ = sorted(filter(lambda x:x[1][1] == pos, expanded), key=lambda x:-x[5])
while sorted_:
expanded_.append(sorted_.pop(0))
(b, e) = (expanded_[-1][3], expanded_[-1][4])
# removals = [i for i, c in enumerate(sorted_) if b < c[3] and e > c[2]] # Overlap
removals = [i for i, c in enumerate(sorted_) if b <= c[3] and e >= c[4]] # Subset (Contain)
for idx in reversed(removals):
del sorted_[idx]
expanded_ = [[L, R, p0, p2, len_LR, prop, count] for L, R, p0, p1, p2, len_R, len_LR, prop, count in expanded_]
return expanded_
def _scoring(self, candidates):
candidates = [self._to_table(c) for c in candidates]
scores = self.evaluator.evaluate(candidates, self.preference if self.preference else None)
return scores
def _to_table(self, c):
return Table(c[0], c[1], c[2], c[3], c[4], c[5], c[6],
self.cohesion_l.get(c[0][0], 0),
self.droprate_l.get(c[0][0], 0),
self.lcount.get(c[0][0], 0)
)
def _find_best(self, scores):
best = []
sorted_ = sorted(scores, key=lambda x:-x[-1])
while sorted_:
best.append(sorted_.pop(0)[0])
(b, e) = (best[-1][2], best[-1][3])
removals = [i for i, (c, _) in enumerate(sorted_) if b < c[3] and e > c[2]] # Overlap
for idx in reversed(removals):
del sorted_[idx]
return sorted(best, key=lambda x:x[2])
def _postprocessing(self, t, words):
n = len(t)
adds = []
if words and words[0][2] > 0:
adds += self._add_first_subword(t, words)
if words and words[-1][3] < n:
adds += self._add_last_subword(t, words, n)
adds += self._add_inter_subwords(t, words)
post = [w for w in words] + [self._to_table(a) for a in adds]
return sorted(post, key=lambda x:x[2])
def _infer_subword_information(self, subword):
pos = self.dictionary.pos_L(subword)
prop = self.lrgraph_norm.get(subword, {}).get('', 0.0)
count = self.lrgraph.get(subword, {}).get('', 0)
if not pos:
pos = self.dictionary.pos_R(subword)
return (pos, prop, count)
def _add_inter_subwords(self, t, words):
adds = []
for i, base in enumerate(words[:-1]):
if base[3] == words[i+1][2]:
continue
b = base[3]
e = words[i+1][2]
subword = t[b:e]
#(pos, prop, count) = self._infer_subword_information(subword)
#adds.append([(subword, pos), ('', None), b, e, e-b, prop, count, 0.0])
adds += self._base_tokenizing_subword(subword, b)
return adds
def _add_last_subword(self, t, words, n):
b = words[-1][3]
subword = t[b:]
#(pos, prop, count) = self._infer_subword_information(subword)
#return [[(subword, pos), ('', None), b, n, n-b, prop, count, 0.0]]
return self._base_tokenizing_subword(subword, b)
def _add_first_subword(self, t, words):
e = words[0][2]
subword = t[0:e]
#(pos, prop, count) = self._infer_subword_information(subword)
#return [[(subword, pos), ('', None), 0, e, e, prop, count, 0.0]]
return self._base_tokenizing_subword(subword, 0)
def _base_tokenizing_subword(self, t, b):
subwords = []
_subwords = self.base_tokenizer.tokenize(t, flatten=False)
if not _subwords:
return []
for w in _subwords[0]:
(pos, prop, count) = self._infer_subword_information(w[0])
subwords.append([(w[0], pos), ('', None), b+w[1], b+w[2], w[2]-w[1], prop, count, 0.0])
return subwords
def add_words_into_dictionary(self, words, tag):
if not (tag in self.dictionary._pos):
raise ValueError('{} does not exist base dictionary'.format(tag))
self.dictionary.add_words(words, tag)
def remove_words_from_dictionary(self, words, tag):
if not (tag in self.dictionary._pos):
raise ValueError('{} does not exist base dictionary'.format(tag))
self.dictionary.remove_words(words, tag)
def save_domain_dictionary(self, folder, head=None):
self.dictionary.save_domain_dictionary(folder, head)
def set_word_preferance(self, words, tag, preference=10):
if type(words) == str:
words = {words}
preference_table = self.preference.get(tag, {})
preference_table.update({word:preference for word in words})
self.preference[tag] = preference_table
def save_tagger(self, fname):
raise NotImplemented |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from otp.ai.MagicWordGlobal import *
lastClickedNametag = None
class MagicWordManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('MagicWordManager')
neverDisable = 1
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.accept('magicWord', self.handleMagicWord)
def disable(self):
self.ignore('magicWord')
DistributedObject.DistributedObject.disable(self)
def rejectWord(self, resp):
self.sendMagicWordResponse(resp)
def handleMagicWord(self, magicWord):
if base.localAvatar.getIsTeleporting():
self.sendMagicWordResponse('You cannot use magic words while teleporting!')
return
if not self.cr.wantMagicWords:
return
if magicWord.startswith('~~~~'):
magicWord = magicWord[4:]
self.sendUpdate('sendGlobalMagicWord', [magicWord, True, True])
return
if magicWord.startswith('~~~'):
magicWord = magicWord[3:]
self.sendUpdate('sendGlobalMagicWord', [magicWord, True, False])
return
if magicWord.startswith('~~'):
if lastClickedNametag == None:
target = base.localAvatar
else:
target = lastClickedNametag
magicWord = magicWord[2:]
if magicWord.startswith('~'):
target = base.localAvatar
magicWord = magicWord[1:]
if hasattr(target, 'animFSM') and target.animFSM.getCurrentState().getName() in ('TeleportIn',
'TeleportOut',
'TeleportedOut'):
self.sendMagicWordResponse('You cannot use magic words on people who are teleporting!')
return
targetId = target.doId
if target == base.localAvatar:
response = spellbook.process(base.localAvatar, target, magicWord)
if response[1]:
if response[0]:
self.sendMagicWordResponse(response[0])
self.sendUpdate('sendMagicWord', [magicWord, targetId, False])
return
self.sendUpdate('sendMagicWord', [magicWord, targetId, True])
return
def sendMagicWordResponse(self, response):
self.notify.info(response)
base.localAvatar.setSystemMessage(0, 'Magic Mille: ' + str(response)) |
from django.views.generic import View
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from django.core.exceptions import PermissionDenied
from accounts.views import LoginRequiredMixin
from home.models import Text, Photo, Audio, Video, File, Sticker, Link
from operator import attrgetter
from itertools import chain
def get_content_or_404(content_type, content_id):
models = {
"text": Text,
"photo": Photo,
"audio": Audio,
"video": Video,
"file": File,
"sticker": Sticker,
"link": Link,
}
if content_type not in models:
raise Http404
content = get_object_or_404(models[content_type], id=content_id)
if content.type == "link":
content = get_object_or_404(Text, telegram_id=content.telegram_id)
return content
def is_content_accessible(content, user):
return content.group in user.memberships.all()
def get_content_older_than(group, timestamp, limit=10):
query = {"group": group, "timestamp__lte": timestamp}
feed = sorted(chain(Text.objects.filter(**query), Photo.objects.filter(**query),
Audio.objects.filter(**query), Video.objects.filter(**query),
File.objects.filter(**query), Sticker.objects.filter(**query)),
key=attrgetter("timestamp"), reverse=True)
return feed[:limit+1]
def get_content_newer_than(group, timestamp, limit=10):
query = {"group": group, "timestamp__gte": timestamp}
feed = sorted(chain(Text.objects.filter(**query), Photo.objects.filter(**query),
Audio.objects.filter(**query), Video.objects.filter(**query),
File.objects.filter(**query), Sticker.objects.filter(**query)),
key=attrgetter("timestamp"))
return feed[:limit+1]
class ChatlogView(LoginRequiredMixin, View):
def get(self, request, content_type, content_id):
content = get_content_or_404(content_type, content_id)
if not is_content_accessible(content, request.user):
raise PermissionDenied
older = get_content_older_than(content.group, content.timestamp)
newer = get_content_newer_than(content.group, content.timestamp)
feed = sorted(set(chain(older, [content], newer)), key=attrgetter("timestamp"))
return render(request, "chatlog/chatlog.html", {
"content": content,
"feed": feed,
})
class ChatlogPagerView(LoginRequiredMixin, View):
def get(self, request, content_type, content_id, direction):
content = get_content_or_404(content_type, content_id)
if not is_content_accessible(content, request.user):
raise PermissionDenied
if direction == "older":
feed = get_content_older_than(content.group, content.timestamp)
else:
feed = get_content_newer_than(content.group, content.timestamp)
feed = sorted(filter(lambda item: item != content, feed), key=attrgetter("timestamp"))
return render(request, "chatlog/chatlog_items.html", {
"content": content,
"feed": feed,
})
|
#!/usr/bin/env python3
x = 0
bool = True
while bool:
print(x)
x = x + 1
if x == 5:
bool = False
print(bool)
print("done")
|
import rdflib
import pytest
from project import graph_utils
my_graph = graph_utils.get_graph_info("travel")
def test_name():
assert my_graph[0] == "travel"
def test_nodes():
assert my_graph[1] == 131
def test_edges():
assert my_graph[2] == 277
def test_labels():
assert my_graph[3] == {
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#unionOf"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#disjointWith"),
rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf"),
rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#first"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#onProperty"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#someValuesFrom"),
rdflib.term.URIRef("http://www.owl-ontologies.com/travel.owl#hasPart"),
rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#domain"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#inverseOf"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#oneOf"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#hasValue"),
rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#rest"),
rdflib.term.URIRef("http://www.owl-ontologies.com/travel.owl#hasAccommodation"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#complementOf"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#differentFrom"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#minCardinality"),
rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#comment"),
rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#range"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#equivalentClass"),
rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#intersectionOf"),
rdflib.term.URIRef("http://www.w3.org/2002/07/owl#versionInfo"),
}
|
"""
Objective
In this challenge, we're reinforcing what we've learned today. In case you've missed them, today's tutorials are on Conditional Probability and Combinations and Permutations.
Task
A bag contains 3 red marbles and 4 blue marbles. Then,2 marbles are drawn from the bag, at random, without replacement. If the first marble drawn is red, what is the probability that the second marble is blue?
1 / 12
7 / 12
1 / 6
2 / 3 -------> Correct
"""
|
#!/usr/bin/env python3:
# _*_ coding: utf-8 _*_
'''
MIT License
Copyright (c) 2018 USAKU Takahashi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
----------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2018 Johan Hanssen Seferidis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
from websocket_server import WebsocketServer
import re #regexp
import sys
class MyClient:
allClients = [] #class variable : all client instans is allocated into
def __init__(self,client_websocket_server,server_websocket_server):
self.pClient = client_websocket_server
self.pServer = server_websocket_server
self.pRoll = None
MyClient.allClients.append(self)
def speak_to_myself(self,message):
self.pServer.send_message(self.pClient,message)
def say_to_roll(self,roll,message):
allclients = MyClient.allClients
for ii in range(0,len(allclients)):
if allclients[ii].pRoll == roll:
self.pServer.send_message(allclients[ii].pClient,message)
def say_to_all(self,message):
self.pServer.send_message_to_all(message)
def get_roll(self):
return self.pRoll
def set_roll(self,roll):
self.pRoll = roll
def set_roll_exclusively(self,roll):
if self.pRoll == roll:
self.pServer.send_message(self.pClient,"success : you have already been "+roll)
return True
elif MyClient.exist_roll(roll):
self.pServer.send_message(self.pClient,"falure : "+roll+" already exists")
return False
else:
self.set_roll(roll)
self.pServer.send_message(self.pClient,"success : you became "+roll)
return True
@classmethod
def get_rolls_of_all(cself):
rolls = []
for ii in range(0,len(MyClient.allClients)):
rolls.append(MyClient.allClients[ii].pRoll)
print('rolls =',rolls)
return rolls
@classmethod
def exist_roll(cself,roll):
allRolls = MyClient.get_rolls_of_all()
if roll in allRolls:
return True
else:
return False
@classmethod
def remove(cself,client_websocket_server):
for ii in range(0,len(MyClient.allClients)):
if MyClient.allClients[ii].pClient == client_websocket_server:
del MyClient.allClients[ii]
@classmethod
def convertFrom(cself,client_websocket_server):
ii=0
for ii in range(0,len(MyClient.allClients)):
if MyClient.allClients[ii].pClient == client_websocket_server:
break
return MyClient.allClients[ii]
class HubServer:
ANONYMOUS = 'anonymous'
rollnameMAX31856 = 'MAX31856'
rollnameCONTROLLER = 'CONTROLLER'
def __init__(self,PORT,HOST,LOGGING):
server = WebsocketServer(PORT,HOST,LOGGING)
server.set_fn_new_client(self.new_client)
server.set_fn_client_left(self.client_left)
server.set_fn_message_received(self.message_received)
server.run_forever()
# Called for every client connecting (after handshake)
def new_client(self,client_websocket_server, server):
print("New client connected and was given id %d" % client_websocket_server['id'])
print(client_websocket_server)
server.send_message_to_all('new client joined')
MyClient(client_websocket_server,server)
# Called for every client disconnecting
def client_left(self,client, server):
print("Client(%d) disconnected" % client['id'])
MyClient.remove(client)
# Called when a client sends a message
def message_received(self,client_websocket_server, server, message):
### #print('messge from ',client_websocket_server)
### #print("Client({}) said: {}".format(client_websocket_server['id'], message))
client = MyClient.convertFrom(client_websocket_server)
client.say_to_all(message)
# if message.strip() == "co6":
# client.set_roll_exclusively(HubServer.rollnameCONTROLLER)
# elif message.strip() == "ma6":
# client.set_roll_exclusively(HubServer.rollnameMAX31856)
#
# roll = client.get_roll()
# if roll == HubServer.rollnameCONTROLLER:
# if re.search('toM',message):#pass phrase
# client.say_to_roll(HubServer.rollnameMAX31856,message)
# elif roll == HubServer.rollnameMAX31856:
# if re.search('toC',message):#pass phrase
# client.say_to_roll(HubServer.rollnameCONTROLLER,message)
# else:
# client.say_to_all(message)
if __name__ == '__main__':
args = sys.argv
if len(args) == 2:
port = int(args[1])
else:
port = 8801
print("port={}".format(port))
#HubServer(port,'garameki.com',logging.INFO)
HubServer(port,'192.168.3.6',logging.INFO)
|
import pygame
pygame.init()
WIDTH = 700
HEIGHT = 700
FPS = 60
# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (80, 175, 90)
BLUE = (60, 160, 200)
COLS = 10
ROWS = 6
win = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Breakout KD")
clock = pygame.time.Clock()
# Brick class
class Brick():
def __init__(self):
self.width = int(WIDTH/COLS)
self.height = 30
def create_bricks(self):
self.bricks = []
for row in range(ROWS):
bricks_row = []
for col in range(COLS):
brick_x = col * self.width
brick_y = row * self.height
br = pygame.Rect(brick_x,brick_y,self.width,self.height)
bricks_row.append(br)
self.bricks.append(bricks_row)
def draw_bricks(self):
for row in self.bricks:
for br in row:
pygame.draw.rect(win, GREEN, br)
pygame.draw.rect(win, BLACK, br, 2)
# Paddle Class
class Paddle():
def __init__(self):
self.width = int(WIDTH / COLS)
self.height = 20
self.x = int(WIDTH / 2) - int(self.width/2)
self.y = HEIGHT - 40
self.speed = 10
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
def draw_paddle(self):
pygame.draw.rect(win, WHITE, self.rect)
def move_paddle(self):
key = pygame.key.get_pressed()
if key[pygame.K_LEFT] and self.rect.left > 0:
self.rect.x -= self.speed
if key[pygame.K_RIGHT] and self.rect.right < WIDTH:
self.rect.x += self.speed
# Ball Class
class Ball():
def __init__(self, x, y):
self.radius = 10
self.x = x
self.y = y
self.rect = pygame.Rect(self.x, self.y, self.radius*2, self.radius*2)
self.dx = 3
self.dy = -3
self.game_status = 0
def draw_ball(self):
pygame.draw.circle(win,BLUE,(self.rect.x,self.rect.y),self.radius)
def move_ball(self):
# wall collision
if self.rect.right > WIDTH or self.rect.left < 0:
self.dx *= -1
if self.rect.top < 0:
self.dy *= -1
if self.rect.bottom > HEIGHT:
self.game_status = -1 # game over
# paddle collision
if self.rect.colliderect(paddle) and self.dy > 0:
self.dy *= -1
sound = pygame.mixer.Sound("sounds//bounce.wav")
sound.play()
# brick collision
all_done = True
row_num =0
for row in brick_wall.bricks:
col_num = 0
for br in row:
if self.rect.colliderect(br):
hit_sound = pygame.mixer.Sound("sounds//hit.wav")
hit_sound.play()
if abs(self.rect.bottom - br.top) < 5 and self.dy > 0:
self.dy *= -1
if abs(self.rect.top - br.bottom) < 5 and self.dy < 0:
self.dy *= -1
if abs(self.rect.left - br.right) < 5 and self.dx < 0:
self.dy *= -1
if abs(self.rect.right - br.left) < 5 and self.dx >0:
self.dy *= -1
brick_wall.bricks[row_num][col_num] = (0,0,0,0)
if brick_wall.bricks[row_num][col_num] != (0,0,0,0):
all_done = False
col_num += 1
row_num += 1
if all_done:
self.game_status = 1
self.rect.x += self.dx
self.rect.y += self.dy
return self.game_status
paddle = Paddle()
ball = Ball(paddle.x+int(paddle.width/2),paddle.y-10)
brick_wall = Brick()
brick_wall.create_bricks()
run = True
while run:
clock.tick(FPS)
win.fill(BLACK)
paddle.draw_paddle()
paddle.move_paddle()
ball.draw_ball()
brick_wall.draw_bricks()
game_status = ball.move_ball()
if game_status == -1:
win.fill(BLACK)
font = pygame.font.SysFont(None, 50)
font1 = pygame.font.SysFont(None, 25)
text = font.render('GAME OVER', True, BLUE)
text1 = font1.render('\u00A9 Copyright 2021 Rifky', True, WHITE)
text_rect = text.get_rect(center=(WIDTH / 2, HEIGHT / 2))
text_rect1 = text.get_rect(center=(WIDTH / 2 , HEIGHT - 20))
hit_sound = pygame.mixer.Sound("sounds//game_over.wav")
hit_sound.play()
win.blit(text, text_rect)
win.blit(text1, text_rect1)
if game_status == 1:
win.fill(BLACK)
font = pygame.font.SysFont(None, 50)
font1 = pygame.font.SysFont(None, 25)
text = font.render('YOU WIN', True, BLUE)
text1 = font1.render('\u00A9 Copyright 2021 Rifky', True, WHITE)
text_rect = text.get_rect(center=(WIDTH / 2, HEIGHT / 2))
text_rect1 = text.get_rect(center=(WIDTH / 2 , HEIGHT - 20))
hit_sound = pygame.mixer.Sound("sounds//game_win.wav")
hit_sound.play()
win.blit(text, text_rect)
win.blit(text1, text_rect1)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.update()
pygame.quit()
|
print("ARACINIZ KM NE KADAR YAKIYOR ? ")
print("-------------------------------")
yapilan_km = int(input("Kaç Km Yol Yaptınız (KM): "))
yakilan_yakit = float(input("Km'de Araç Ne Kadar Yakıyor (TL) : "))
ödeme=yapilan_km*yakilan_yakit
print("Tüketilen Yakıt Bedeli {}'dir".format(ödeme))
|
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import livestream.routing
application = ProtocolTypeRouter({ # includes index by default
'websocket': AuthMiddlewareStack(
URLRouter(
livestream.routing.websocket_urlpatterns
)
)
})
|
# coding: utf-8
"""
Social Graph API
Pho Networks REST API
OpenAPI spec version: 1.1.1
Contact: emre@phonetworks.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DefaultApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_attribute(self, **kwargs):
"""
updates (or creates) an attribute
Works with all entities, including nodes and edges. Given its key, updates an attribute value, or creates it, if it doesn't yet exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_attribute(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str value: The value to update the key with.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_attribute_with_http_info(**kwargs)
else:
(data) = self.add_attribute_with_http_info(**kwargs)
return data
def add_attribute_with_http_info(self, **kwargs):
"""
updates (or creates) an attribute
Works with all entities, including nodes and edges. Given its key, updates an attribute value, or creates it, if it doesn't yet exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_attribute_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str value: The value to update the key with.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_attribute" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/{uuid}/attribute/{key}'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2004',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def del_attribute(self, **kwargs):
"""
deletes an attribute
Works with all entities, including nodes and edges. Given its key, deletes an attribute.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.del_attribute(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.del_attribute_with_http_info(**kwargs)
else:
(data) = self.del_attribute_with_http_info(**kwargs)
return data
def del_attribute_with_http_info(self, **kwargs):
"""
deletes an attribute
Works with all entities, including nodes and edges. Given its key, deletes an attribute.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.del_attribute_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method del_attribute" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/{uuid}/attribute/{key}'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2004',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def del_entity(self, **kwargs):
"""
deletes an entity
Works with all entities, including nodes and edges.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.del_entity(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.del_entity_with_http_info(**kwargs)
else:
(data) = self.del_entity_with_http_info(**kwargs)
return data
def del_entity_with_http_info(self, **kwargs):
"""
deletes an entity
Works with all entities, including nodes and edges.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.del_entity_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method del_entity" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/{uuid}'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_edges(self, uuid, **kwargs):
"""
retrieves the edges of a node
By passing in a node ID, you can fetch all the edges of the node in question; including incoming and outgoing.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_edges(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_edges_with_http_info(uuid, **kwargs)
else:
(data) = self.get_all_edges_with_http_info(uuid, **kwargs)
return data
def get_all_edges_with_http_info(self, uuid, **kwargs):
"""
retrieves the edges of a node
By passing in a node ID, you can fetch all the edges of the node in question; including incoming and outgoing.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_edges_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_edges" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_all_edges`")
collection_formats = {}
resource_path = '/{uuid}/edges/all'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_attribute(self, uuid, key, **kwargs):
"""
retrieves the value of an entity attribute
Attribute key must be case-sensitive.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attribute(uuid, key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:param str key: The attribute key (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_attribute_with_http_info(uuid, key, **kwargs)
else:
(data) = self.get_attribute_with_http_info(uuid, key, **kwargs)
return data
def get_attribute_with_http_info(self, uuid, key, **kwargs):
"""
retrieves the value of an entity attribute
Attribute key must be case-sensitive.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attribute_with_http_info(uuid, key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:param str key: The attribute key (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'key']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attribute" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_attribute`")
# verify the required parameter 'key' is set
if ('key' not in params) or (params['key'] is None):
raise ValueError("Missing the required parameter `key` when calling `get_attribute`")
collection_formats = {}
resource_path = '/{uuid}/attribute/{key}'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
if 'key' in params:
path_params['key'] = params['key']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_attributes(self, uuid, **kwargs):
"""
retrieves the existing attribute keys of an entity (edge or node)
Attribute keys are case-sensitive, and they will be listed in an array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attributes(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_attributes_with_http_info(uuid, **kwargs)
else:
(data) = self.get_attributes_with_http_info(uuid, **kwargs)
return data
def get_attributes_with_http_info(self, uuid, **kwargs):
"""
retrieves the existing attribute keys of an entity (edge or node)
Attribute keys are case-sensitive, and they will be listed in an array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_attributes_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_attributes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_attributes`")
collection_formats = {}
resource_path = '/{uuid}/attributes'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge(self, uuid, **kwargs):
"""
retrieves an edge
By passing in an ID, you can search for available edges in the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The edge ID (required)
:return: Edge
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_edge_with_http_info(uuid, **kwargs)
else:
(data) = self.get_edge_with_http_info(uuid, **kwargs)
return data
def get_edge_with_http_info(self, uuid, **kwargs):
"""
retrieves an edge
By passing in an ID, you can search for available edges in the system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The edge ID (required)
:return: Edge
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_edge`")
collection_formats = {}
resource_path = '/edge/{uuid}'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Edge',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_getters(self, uuid, **kwargs):
"""
retrieves the edge getter methods of a node
By passing in a node UUID that exists in the database, you can fetch the edge getter methods of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge_getters(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_edge_getters_with_http_info(uuid, **kwargs)
else:
(data) = self.get_edge_getters_with_http_info(uuid, **kwargs)
return data
def get_edge_getters_with_http_info(self, uuid, **kwargs):
"""
retrieves the edge getter methods of a node
By passing in a node UUID that exists in the database, you can fetch the edge getter methods of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge_getters_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_getters" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_edge_getters`")
collection_formats = {}
resource_path = '/{uuid}/edges/getters'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_setters(self, uuid, **kwargs):
"""
retrieves the edge setter methods of a node
By passing in a node UUID that exists in the database, you can fetch the edge setter methods of the node in question. These setters may or may not be formative. If they are formative, a new node is created in result.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge_setters(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_edge_setters_with_http_info(uuid, **kwargs)
else:
(data) = self.get_edge_setters_with_http_info(uuid, **kwargs)
return data
def get_edge_setters_with_http_info(self, uuid, **kwargs):
"""
retrieves the edge setter methods of a node
By passing in a node UUID that exists in the database, you can fetch the edge setter methods of the node in question. These setters may or may not be formative. If they are formative, a new node is created in result.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_edge_setters_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_setters" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_edge_setters`")
collection_formats = {}
resource_path = '/{uuid}/edges/setters'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_founder(self, **kwargs):
"""
retrieves the Graph Founder
The Founder must be a \\Pho\\Framework\\Actor object. This method returns the object type as well as object ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_founder(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_founder_with_http_info(**kwargs)
else:
(data) = self.get_founder_with_http_info(**kwargs)
return data
def get_founder_with_http_info(self, **kwargs):
"""
retrieves the Graph Founder
The Founder must be a \\Pho\\Framework\\Actor object. This method returns the object type as well as object ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_founder_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_founder" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/founder'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_graph(self, **kwargs):
"""
retrieves the main Graph
The Graph must be a \\Pho\\Lib\\Graph\\SubGraph and \\Pho\\Framework\\Graph object. This method returns the object type as well as object ID. The Graph contains all nodes and edges in the system. Though it is contained by Space, its one and only container.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_graph(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_graph_with_http_info(**kwargs)
else:
(data) = self.get_graph_with_http_info(**kwargs)
return data
def get_graph_with_http_info(self, **kwargs):
"""
retrieves the main Graph
The Graph must be a \\Pho\\Lib\\Graph\\SubGraph and \\Pho\\Framework\\Graph object. This method returns the object type as well as object ID. The Graph contains all nodes and edges in the system. Though it is contained by Space, its one and only container.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_graph_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_graph" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/graph'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_incoming_edges(self, uuid, **kwargs):
"""
retrieves the incoming edges of a node
By passing in a node ID, you can fetch the incoming edges of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_incoming_edges(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node ID (required)
:return: list[NodeEdge]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_incoming_edges_with_http_info(uuid, **kwargs)
else:
(data) = self.get_incoming_edges_with_http_info(uuid, **kwargs)
return data
def get_incoming_edges_with_http_info(self, uuid, **kwargs):
"""
retrieves the incoming edges of a node
By passing in a node ID, you can fetch the incoming edges of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_incoming_edges_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node ID (required)
:return: list[NodeEdge]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_incoming_edges" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_incoming_edges`")
collection_formats = {}
resource_path = '/{uuid}/edges/in'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[NodeEdge]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node(self, uuid, **kwargs):
"""
retrieves a node
By passing in an ID, you can search for available nodes in the system. Please note, this function will not return edges. This method is reserved for nodes only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_node(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: Node
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_node_with_http_info(uuid, **kwargs)
else:
(data) = self.get_node_with_http_info(uuid, **kwargs)
return data
def get_node_with_http_info(self, uuid, **kwargs):
"""
retrieves a node
By passing in an ID, you can search for available nodes in the system. Please note, this function will not return edges. This method is reserved for nodes only.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_node_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:return: Node
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_node`")
collection_formats = {}
resource_path = '/{uuid}'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Node',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node_edge(self, uuid, edge, **kwargs):
"""
edge getter
Fetches edge results, whether as edge IDs or node IDs, depending on edge's characteristics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_node_edge(uuid, edge, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:param str edge: The edge getter label (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_node_edge_with_http_info(uuid, edge, **kwargs)
else:
(data) = self.get_node_edge_with_http_info(uuid, edge, **kwargs)
return data
def get_node_edge_with_http_info(self, uuid, edge, **kwargs):
"""
edge getter
Fetches edge results, whether as edge IDs or node IDs, depending on edge's characteristics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_node_edge_with_http_info(uuid, edge, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: The node ID (required)
:param str edge: The edge getter label (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'edge']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_edge" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_node_edge`")
# verify the required parameter 'edge' is set
if ('edge' not in params) or (params['edge'] is None):
raise ValueError("Missing the required parameter `edge` when calling `get_node_edge`")
collection_formats = {}
resource_path = '/{uuid}/{edge}'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
if 'edge' in params:
path_params['edge'] = params['edge']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_outgoing_edges(self, uuid, **kwargs):
"""
retrieves the outgoing edges of a node
By passing in a node ID, you can fetch the outgoing edges of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_outgoing_edges(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node ID (required)
:return: list[NodeEdge]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_outgoing_edges_with_http_info(uuid, **kwargs)
else:
(data) = self.get_outgoing_edges_with_http_info(uuid, **kwargs)
return data
def get_outgoing_edges_with_http_info(self, uuid, **kwargs):
"""
retrieves the outgoing edges of a node
By passing in a node ID, you can fetch the outgoing edges of the node in question.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_outgoing_edges_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node ID (required)
:return: list[NodeEdge]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_outgoing_edges" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_outgoing_edges`")
collection_formats = {}
resource_path = '/{uuid}/edges/out'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[NodeEdge]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_space(self, **kwargs):
"""
retrieves the Space
The Space must be a \\Pho\\Lib\\Graph\\Graph object. This method returns the object type as well as object uuid. Space always comes with the nil ID; 00000000000000000000000000000000, and under normal circumstances its class is always Pho\\Kernel\\Standards\\Space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_space(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_space_with_http_info(**kwargs)
else:
(data) = self.get_space_with_http_info(**kwargs)
return data
def get_space_with_http_info(self, **kwargs):
"""
retrieves the Space
The Space must be a \\Pho\\Lib\\Graph\\Graph object. This method returns the object type as well as object uuid. Space always comes with the nil ID; 00000000000000000000000000000000, and under normal circumstances its class is always Pho\\Kernel\\Standards\\Space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_space_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_space" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/space'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_type(self, uuid, **kwargs):
"""
fetches entity type
Possible values are; \"Space\", \"Node\", \"Graph Node\", \"Graph\", \"Actor Node\" \"Object Node\", \"Edge\", \"Read Edge\", \"Write Edge\", \"Subscribe Edge\", \"Mention Edge\", \"Unidentified\".
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_type(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_type_with_http_info(uuid, **kwargs)
else:
(data) = self.get_type_with_http_info(uuid, **kwargs)
return data
def get_type_with_http_info(self, uuid, **kwargs):
"""
fetches entity type
Possible values are; \"Space\", \"Node\", \"Graph Node\", \"Graph\", \"Actor Node\" \"Object Node\", \"Edge\", \"Read Edge\", \"Write Edge\", \"Subscribe Edge\", \"Mention Edge\", \"Unidentified\".
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_type_with_http_info(uuid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: the node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_type`")
collection_formats = {}
resource_path = '/{uuid}/type'.replace('{format}', 'json')
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def make_actor(self, **kwargs):
"""
creates an Actor object
Fetches whatever set as \"default_object\"=>\"actor\" while determining what Actor object to construct. If it doesn't exist, uses \"default_object\"=>\"founder\" class. Otherwise fails.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.make_actor(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str param1: Actor constructor argument. More parameters may be passed via param2, param3 ... param50.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.make_actor_with_http_info(**kwargs)
else:
(data) = self.make_actor_with_http_info(**kwargs)
return data
def make_actor_with_http_info(self, **kwargs):
"""
creates an Actor object
Fetches whatever set as \"default_object\"=>\"actor\" while determining what Actor object to construct. If it doesn't exist, uses \"default_object\"=>\"founder\" class. Otherwise fails.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.make_actor_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str param1: Actor constructor argument. More parameters may be passed via param2, param3 ... param50.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['param1']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method make_actor" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/actor'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'param1' in params:
body_params = params['param1']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def make_edge(self, **kwargs):
"""
creates an edge
Used to set new edges. If the edge is formative, then a node is also formed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.make_edge(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str param1: The value to update the key with. There can be 50 of those. For example; param1=\"value1\", param2 =\"another value\" depending on the edge's default constructor variable count.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.make_edge_with_http_info(**kwargs)
else:
(data) = self.make_edge_with_http_info(**kwargs)
return data
def make_edge_with_http_info(self, **kwargs):
"""
creates an edge
Used to set new edges. If the edge is formative, then a node is also formed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.make_edge_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str param1: The value to update the key with. There can be 50 of those. For example; param1=\"value1\", param2 =\"another value\" depending on the edge's default constructor variable count.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['param1']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method make_edge" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/{uuid}/{edge}'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'param1' in params:
body_params = params['param1']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_attribute(self, **kwargs):
"""
updates (or creates) an attribute
Works with all entities, including nodes and edges. Given its key, updates an attribute value, or creates it, if it doesn't yet exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_attribute(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str value: The value to update the key with.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_attribute_with_http_info(**kwargs)
else:
(data) = self.set_attribute_with_http_info(**kwargs)
return data
def set_attribute_with_http_info(self, **kwargs):
"""
updates (or creates) an attribute
Works with all entities, including nodes and edges. Given its key, updates an attribute value, or creates it, if it doesn't yet exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_attribute_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str value: The value to update the key with.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_attribute" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/{uuid}/attribute/{key}'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2004',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
from behave import *
use_step_matcher("parse")
@then("The URL is {url}")
def step_impl(context, url):
assert context.driver.current_url == url
@step("I open {url}")
def step_impl(context, url):
context.driver.get(url)
|
from dlutils.models.gans.softmax.softmax import SoftmaxGAN, update_fn
|
from __future__ import absolute_import
import argparse
import csv
import logging
import os
import random
import sys
from io import open
import pandas as pd
import numpy as np
import torch
import time
import collections
import torch.nn as nn
from collections import defaultdict
import gc
import itertools
from multiprocessing import Pool
import functools
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset, Dataset)
from typing import Callable, Dict, List, Generator, Tuple
from torch.utils.data.distributed import DistributedSampler
# from tqdm.notebook import tqdm_notebook as tqdm
from tqdm import tqdm
from sklearn.metrics import f1_score
import json
import math
from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig
from modeling_nq import BertForQuestionAnswering, loss_fn
from prepare_data_version2 import Example, Test_Example, convert_data, convert_test_data, Result, JsonChunkReader, Test_Json_Reaser
from itertools import cycle
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
MODEL_CLASSES = {
'bert': (BertForQuestionAnswering, BertTokenizer),
}
logger = logging.getLogger(__name__)
valid_size = 0
train_size = 245899 - valid_size
test_size = 346
class TextDataset(Dataset):
"""Dataset for [TensorFlow 2.0 Question Answering](https://www.kaggle.com/c/tensorflow2-question-answering).
Parameters
----------
examples : list of List[Example]
The whole Dataset.
"""
def __init__(self, examples: List[List[Example]]):
self.examples = examples
def __len__(self) -> int:
return len(self.examples)
def __getitem__(self, index):
# print('index:', index)
annotated = list(
filter(lambda example: example.class_label != 'unknown', self.examples[index]))
# print('len(annotated)',len(annotated))
if len(annotated) == 0:
return random.choice(self.examples[index])
return random.choice(annotated)
class TestDataset(Dataset):
"""
Dataset for test data
"""
def __init__(self, examples):
self.examples = examples
def __len__(self) -> int:
return len(self.examples)
def __getitem__(self, index):
return self.examples[index]
def collate_fn(examples: List[Example]) -> List[List[torch.Tensor]]:
# input tokens
# print(len(examples))
max_len = max([len(example.input_ids) for example in examples]) #batch
tokens = np.zeros((len(examples), max_len), dtype=np.int64)
# print(tokens)
token_type_ids = np.ones((len(examples), max_len), dtype=np.int64)
for i, example in enumerate(examples):
row = example.input_ids
tokens[i, :len(row)] = row
token_type_id = [0 if i <= row.index(102) else 1
for i in range(len(row))] # 102 corresponds to [SEP]
token_type_ids[i, :len(row)] = token_type_id
attention_mask = tokens > 0
inputs = [torch.from_numpy(tokens), #input_id
torch.from_numpy(attention_mask), #mask_id
torch.from_numpy(token_type_ids)] #segment_id
# output labels
all_labels = ['long', 'no', 'short', 'unknown', 'yes']
start_positions = np.array([example.start_position for example in examples])
end_positions = np.array([example.end_position for example in examples])
class_labels = [all_labels.index(example.class_label) for example in examples]
start_positions = np.where(start_positions >= max_len, -1, start_positions)
end_positions = np.where(end_positions >= max_len, -1, end_positions)
labels = [torch.LongTensor(start_positions),
torch.LongTensor(end_positions),
torch.LongTensor(class_labels)]
return [inputs, labels]
def eval_collate_fn(examples: List[Example]) -> Tuple[List[torch.Tensor], List[Example]]:
# input tokens
max_len = max([len(example.input_ids) for example in examples])
tokens = np.zeros((len(examples), max_len), dtype=np.int64)
token_type_ids = np.ones((len(examples), max_len), dtype=np.int64)
for i, example in enumerate(examples):
row = example.input_ids
tokens[i, :len(row)] = row
token_type_id = [0 if i <= row.index(102) else 1
for i in range(len(row))] # 102 corresponds to [SEP]
token_type_ids[i, :len(row)] = token_type_id
attention_mask = tokens > 0
inputs = [torch.from_numpy(tokens),
torch.from_numpy(attention_mask),
torch.from_numpy(token_type_ids)]
return inputs, examples
def test_collate_fn(examples: List[Test_Example]) -> Tuple[List[torch.Tensor], List[Test_Example]]:
# input tokens
max_len = max([len(example.input_ids) for example in examples])
tokens = np.zeros((len(examples), max_len), dtype=np.int64)
token_type_ids = np.ones((len(examples), max_len), dtype=np.int64)
for i, example in enumerate(examples):
row = example.input_ids
tokens[i, :len(row)] = row
token_type_id = [0 if i <= row.index(102) else 1
for i in range(len(row))] # 102 corresponds to [SEP]
token_type_ids[i, :len(row)] = token_type_id
attention_mask = tokens > 0
inputs = [torch.from_numpy(tokens),
torch.from_numpy(attention_mask),
torch.from_numpy(token_type_ids)]
return inputs, examples
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# def accuracy(out, labels):
# outputs = np.argmax(out, axis=1)
# return f1_score(labels,outputs,labels=[0,1,2],average='macro')
class Test_Result(object):
def __init__(self):
self.examples = {}
self.results = {}
self.logits = {}
self.class_pred = {}
self.class_labels = ['LONG', 'NO', 'SHORT', 'UNKNOWN', 'YES']
@staticmethod
def is_valid_index(example: Test_Example, index: List[int]) -> bool:
"""Return whether valid index or not.
"""
start_index, end_index = index
if start_index > end_index:
return False
if start_index <= example.question_len + 2:
return False
return True
def update(self, examples, logits, indices, class_pred):
class_pred = torch.max(class_pred, dim=1)[1].numpy() # (batch,)
for i, example in enumerate(examples):#batch
if not example.example_id in self.examples.keys():
self.examples[example.example_id] = []
self.examples[example.example_id].append(example)
if not example.example_id in self.results.keys():
self.results[example.example_id] = []
self.results[example.example_id].append(indices[i])
if not example.example_id in self.logits.keys():
self.logits[example.example_id] = []
self.logits[example.example_id].append(logits[i])
if not example.example_id in self.class_pred.keys():
self.class_pred[example.example_id] = []
self.class_pred[example.example_id].append(self.class_labels[class_pred[i]])
# print(self.examples, self.results, self.logits, self.class_pred)
def generate_prediction(self):
# answers = []
long_answers = {}
short_answers = {}
class_answers = {}
for i, item in enumerate(self.results.keys()):
# print(self.logits[item])
# print(item) # example_id
sorted_index = sorted(range(len(self.logits[item])), key=lambda k: self.logits[item][k], reverse=True)
# print(sorted_index)
# answer = {}
for j in sorted_index:
if self.class_pred[item][j] in ['YES','NO']:
short_answer = self.class_pred[item][j]
long_answer_s = self.examples[item][j].candidate[2]
long_answer_e = self.examples[item][j].candidate[3]
long_answer = str(long_answer_s)+':'+ str(long_answer_e)
class_answer = self.class_pred[item][j]
break
elif self.class_pred[item][j]=='SHORT' and self.is_valid_index(self.examples[item][j], self.results[item][j]):
short_answer_s = self.examples[item][j].tokenized_to_original_index[self.results[item][j][0]-self.examples[item][j].question_len-2+self.examples[item][j].doc_token_start]
short_answer_e = self.examples[item][j].tokenized_to_original_index[self.results[item][j][1]-self.examples[item][j].question_len-2+self.examples[item][j].doc_token_start]
long_answer_s = self.examples[item][j].candidate[2]
long_answer_e = self.examples[item][j].candidate[3]
long_answer = str(long_answer_s)+':'+ str(long_answer_e)
short_answer = str(short_answer_s)+':'+ str(short_answer_e)
class_answer = self.class_pred[item][j]
break
elif self.class_pred[item][j]=='LONG' and self.is_valid_index(self.examples[item][j], self.results[item][j]):
short_answer = None
long_answer_s = self.examples[item][j].candidate[2]
long_answer_e = self.examples[item][j].candidate[3]
long_answer = str(long_answer_s)+':'+str(long_answer_e)
class_answer = self.class_pred[item][j]
break
elif self.class_pred[item][j]=='UNKNOWN' and not self.is_valid_index(self.examples[item][j], self.results[item][j]):
short_answer = None
long_answer = None
class_answer = self.class_pred[item][j]
break
# print(short_answer, long_answer)
long_answers[item] = long_answer
short_answers[item] = short_answer
class_answers[item] = class_answer
return (long_answers, short_answers, class_answers)
def write_csv_line(content, csv_file, flag_row=1, flag_title=0):
if flag_title:
with open(csv_file, 'w') as f:
writer = csv.writer(f)
writer.writerow(['example_id', 'PredictionString'])
with open(csv_file, 'a') as f:
writer = csv.writer(f)
if flag_row:
writer.writerows(content)
else:
writer.writerow(content)
def write_answer_public(long_answers, short_answers, sample_file, submit_file):
sample_submission = pd.read_csv(sample_file)
long_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_long")].apply(lambda q: long_answers[q["example_id"].replace("_long", "")], axis=1)
short_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_short")].apply(lambda q: short_answers[q["example_id"].replace("_short", "")], axis=1)
sample_submission.loc[sample_submission["example_id"].str.contains("_long"), "PredictionString"] = long_prediction_strings
sample_submission.loc[sample_submission["example_id"].str.contains("_short"), "PredictionString"] = short_prediction_strings
sample_submission.to_csv(submit_file, index=False)
print('write done')
def write_answer_private(long_answers, short_answers, submit_file):
contents = []
for _ in long_answers.keys():
content_l = [_+'_long', long_answers[_]]
content_s = [_+'_short', short_answers[_]]
contents.append(content_l)
contents.append(content_s)
write_csv_line(contents, submit_file, flag_title=1)
def main():
#argparse start
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer, than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_question_len", default=64, type=int,
help="")
parser.add_argument("--doc_stride", default=128, type=int,
help="")
parser.add_argument("--chunksize", default=1000, type=int,
help="")
parser.add_argument("--num_labels", default=5, type=int,
help="")
parser.add_argument("--epoch", default=1, type=int,
help="")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
# parser.add_argument("--train_steps", default=-1, type=int,
# help="")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
#argparse end
args = parser.parse_args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
try:
os.makedirs(args.output_dir)
except:
pass
#define nq model
tokenizer = BertTokenizer.from_pretrained(os.path.join('./bert_large/', 'vocab.txt'), do_lower_case=args.do_lower_case)
config = BertConfig.from_pretrained(os.path.join('./bert_large/', 'bert_config.json'), num_labels=args.num_labels)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
if args.do_train:
model = BertForQuestionAnswering.from_pretrained(args.model_name_or_path, config=config)
model = model.to(args.device)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
#CUDA distribute
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare data loader
train_dir = os.path.join(args.data_dir, 'train.jsonl')
dev_dir = os.path.join(args.data_dir, 'dev_example.jsonl')
dev_size = len(open(dev_dir,'r').readlines())
convert_func = functools.partial(convert_data,
tokenizer=tokenizer,
max_seq_len=args.max_seq_length,
max_question_len=args.max_question_len,
doc_stride=args.doc_stride)
data_reader = JsonChunkReader(train_dir, convert_func, chunksize=args.chunksize)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=args.train_steps)
global_step = 0
tr_loss = 0
nb_tr_steps = 0
best_acc=0
# new output file
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as f:
f.write('*'*80+'\n')
bar_epoch = tqdm(range(args.epoch), total=args.epoch)
for epoch in bar_epoch:
bar_epoch.set_description('epoch_{}'.format(epoch))
logger.info("***** training epoch: epoch_"+str(epoch)+" *****")
bar_dataset = tqdm(data_reader, total=int(np.ceil(train_size / args.chunksize)))
chunk_index = 1
for examples in bar_dataset:
bar_dataset.set_description('chunk_{}'.format(chunk_index))
train_dataset = TextDataset(examples)
logger.info('***** chunk_train_examples:' + str(len(train_dataset))+' *****')
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps, collate_fn=collate_fn)
num_train_optimization_steps = math.ceil(len(train_dataset)/args.train_batch_size)
logger.info("***** Running training {}*****".format(chunk_index))
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
chunk_index += 1
model.train()
bar = tqdm(range(num_train_optimization_steps),total=num_train_optimization_steps)
train_dataloader = itertools.cycle(train_dataloader)
for step in bar:
batch = next(train_dataloader)
# batch = tuple(t.to(device) for t in batch)
inputs, labels = batch
input_ids, mask_ids, segment_ids = inputs
y_label = (y.to(device) for y in labels)
y_pred = model(input_ids=input_ids.to(device), # tuple
attention_mask=mask_ids.to(device),
token_type_ids=segment_ids.to(device))
loss = loss_fn(y_pred, y_label)
if args.n_gpu > 1:
loss = loss.mean() # average on multi-gpu
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
bar.set_description("loss {}".format(train_loss))
nb_tr_steps += 1
global_step += 1
loss.backward()
if global_step % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if global_step %(args.eval_steps*args.gradient_accumulation_steps)==0:
tr_loss = 0
nb_tr_steps = 0
logger.info("***** Report result *****")
logger.info(" %s = %s", 'global_step', str(global_step))
logger.info(" %s = %s", 'train loss', str(train_loss))
if args.do_eval and global_step %(args.eval_steps*args.gradient_accumulation_steps)==0:
dev_data_reader = JsonChunkReader(dev_dir, convert_func, chunksize=args.chunksize)
dev_bar = tqdm(dev_data_reader, total=math.ceil(dev_size/args.chunksize))
dev_chunk_index = 1
chunk_result = {'train_loss': train_loss,
'global_step': global_step,
'micro_F1': 0}
for dev_examples in dev_bar:
dev_bar.set_description('chunk_{}'.format(dev_chunk_index))
all_eval_examples = []
for _ in dev_examples:
for exam in _:
all_eval_examples.append(exam)
dev_dataset = TestDataset(all_eval_examples)
logger.info('***** chunk_dev_examples: %d', len(dev_examples))
logger.info("***** Running evaluation {}*****".format(dev_chunk_index))
logger.info(" Num examples = %d", len(all_eval_examples))
logger.info(" Eval Batch size = %d", args.eval_batch_size)
dev_chunk_index += 1
# Run prediction for full data
dev_sampler = SequentialSampler(dev_dataset)
dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler, batch_size=args.eval_batch_size, collate_fn=eval_collate_fn)
model.eval()
with torch.no_grad():
result = Result(max_answer_length=args.max_answer_length)
for inputs, examples in tqdm(dev_dataloader):#batch
# print(len(examples))
input_ids, input_mask, segment_ids = inputs
y_preds = model(input_ids=input_ids.to(device), attention_mask=input_mask.to(device), token_type_ids=segment_ids.to(device))
start_preds, end_preds, class_preds = (p.detach().cpu() for p in y_preds) # (batch, seq) (batch, seq) (batch, 5)
result.update(examples, start_preds.numpy(), end_preds.numpy(), class_preds.numpy())
microf1_score = result.score() # chunk_score
logger.info(" %s = %f", 'micro_f1', microf1_score)
#write to output file
with open(output_eval_file, "a") as writer:
for key in sorted(chunk_result.keys()):
if key == 'micro_F1':
writer.write("%s = %s\n" % (key, microf1_score))
else:
logger.info(" %s = %s", key, str(chunk_result[key]))
writer.write("%s = %s\n" % (key, str(chunk_result[key])))
writer.write('*'*80)
writer.write('\n')
if microf1_score > best_acc:
best_acc = microf1_score
logger.info(" %s = %f", 'best_f1', best_acc)
print("Saving Model......")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
del train_dataloader, train_dataset, dev_dataset, dev_dataloader, data_reader, dev_data_reader
gc.collect()
# if args.do_test:
# args.do_train=False
# model = BertForQuestionAnswering.from_pretrained(os.path.join(args.output_dir, "pytorch_model.bin"), config=config)
# model = model.to(args.device)
# #CUDA distribute
# if args.n_gpu > 1:
# model = torch.nn.DataParallel(model)
# test_dir = os.path.join(args.data_dir, 'test.jsonl')
# test_size = len(open(test_dir,'r').readlines())
# public_dataset, private_dataset = False, False
# if test_size == 346:
# public_dataset = True
# print('public dataset')
# elif test_size > 346:
# private_dataset = True
# print('private dataset')
# test_convert_func = functools.partial(convert_test_data,
# tokenizer=tokenizer,
# max_seq_len=args.max_seq_length,
# max_question_len=args.max_question_len)
# test_examples = Test_Json_Reaser(test_dir, test_convert_func) #List[List[Test_Example]]
# # print(len(test_examples))
# all_test_examples = []
# for _ in test_examples:
# for exam in _:
# all_test_examples.append(exam)
# # print(len(all_test_examples))
# Test_dataset = TestDataset(all_test_examples)
# logger.info('***** test_examples:' + str(len(Test_dataset))+' *****')
# logger.info("***** Running Testing *****")
# logger.info(" Test Batch size = %d", args.eval_batch_size)
# # Run prediction for full data
# test_sampler = SequentialSampler(Test_dataset)
# Test_dataloader = DataLoader(Test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size, collate_fn=test_collate_fn)
# model.eval()
# with torch.no_grad():
# test_result = Test_Result()
# for inputs, examples in tqdm(Test_dataloader):#batch
# input_ids, input_mask, segment_ids = inputs
# y_preds = model(input_ids=input_ids.to(device), attention_mask=input_mask.to(device), token_type_ids=segment_ids.to(device))
# start_preds, end_preds, class_preds = (p.detach().cpu() for p in y_preds)
# start_logits, start_index = torch.max(start_preds, dim=1)
# # print(start_logits.size())
# #(batch,)&(batch,)
# end_logits, end_index = torch.max(end_preds, dim=1)
# cls_logits = start_preds[:, 0] + end_preds[:, 0]#[cls] logits
# logits = start_logits+end_logits-cls_logits # (batch,)
# indices = torch.stack((start_index, end_index)).transpose(0,1)#(batch,2)
# test_result.update(examples, logits.numpy(), indices.numpy(), class_preds)
# long_answers, short_answers, class_answers = test_result.generate_prediction()
# print('long & short answers predict done!')
# if public_dataset:
# write_answer_public(long_answers, short_answers, os.path.join(args.data_dir, 'sample_submission.csv'), 'submission.csv')
# elif private_dataset:
# write_answer_private(long_answers, short_answers, 'submission.csv')
if __name__ == "__main__":
main() |
from src.alerter.alert_code.alert_code import AlertCode
from src.alerter.alert_code.github_alert_code import GithubAlertCode
from src.alerter.alert_code.internal_alert_code import InternalAlertCode
from src.alerter.alert_code.system_alert_code import SystemAlertCode
|
from Tkinter import *
root = Tk()
labelframe = LabelFrame(root, text="This is a LabelFrame")
labelframe.pack(fill="both", expand="yes")
left = Label(labelframe, text="Inside the LabelFrame")
left.pack()
root.mainloop() |
# Generated by Django 2.0.7 on 2018-08-01 06:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hsse_api', '0002_auto_20180801_0032'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='activity',
),
migrations.RemoveField(
model_name='site',
name='employee_com_activity',
),
migrations.RemoveField(
model_name='site',
name='environmental_indicator',
),
migrations.RemoveField(
model_name='site',
name='user',
),
migrations.RemoveField(
model_name='user',
name='audit_inspection',
),
migrations.RemoveField(
model_name='user',
name='corrective_action',
),
migrations.RemoveField(
model_name='user',
name='report',
),
migrations.AddField(
model_name='audit_inspection',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='corrective_action',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='employee_community_activity',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hsse_api.Site'),
),
migrations.AddField(
model_name='environmental_indicators',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hsse_api.Site'),
),
migrations.AddField(
model_name='report',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='safety_activity',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hsse_api.Site'),
),
migrations.AddField(
model_name='user',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hsse_api.Site'),
),
]
|
from memory_profiler import profile
@profile
def load():
from udapi.core.document import Document
document = Document()
document.load_conllu('cs-ud-train-l.conllu')
for bundle in document:
for root in bundle:
for node in root.descendants:
form_lemma = node.form + node.lemma
for bundle in document:
for root in bundle:
chain = [n for n in root.descendants if n.parent.deprel == "det" and n.parent.parent.deprel == "obj"]
for bundle in document:
for root in bundle:
for node in root.descendants:
node.deprel = 'dep'
for bundle in document:
for root in bundle:
root.compute_text()
document.store_conllu('hello.conllu')
if __name__ == '__main__':
load()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.