text stringlengths 8 6.05M |
|---|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from tqdm import tqdm
import time
import torch
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
from Utils.utils import compute_metrics, softmax
def train(args, train_dataloader, dev_dataloader, model, optimizer, scheduler, tokenizer):
writer = SummaryWriter(log_dir=args.log_dir + os.sep +
time.strftime("%Y:%m:%d:%H:%M:%S", time.localtime(time.time())))
global_step = 0
best_dev_loss = float('inf')
epoch_loss = 0.0
logging_loss = 0.0
train_step = 0
for epoch in range(int(args.num_train_epochs)):
print('---------------- Epoch: {}s start ----------'.format(epoch))
all_preds = np.array([], dtype=int)
all_labels = np.array([], dtype=int)
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[1], "attention_mask": batch[2], "labels": batch[4]}
inputs["token_type_ids"] = (
batch[2] if args.model_type in [
"bert", "xlnet", "albert"] else None
)
outputs = model(**inputs)
loss, logits = outputs[:2]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
train_step += 1
epoch_loss += loss.item()
preds = logits.detach().cpu().numpy()
preds = np.argmax(preds, axis=1)
label_ids = batch[4].to('cpu').numpy()
all_labels = np.append(all_labels, label_ids)
all_preds = np.append(all_preds, preds)
if (step+1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if global_step != 0 and global_step % args.save_step == 0:
# 保存日志,用 tensorboard 分析
dev_loss, dev_metric = evaluate(args, model, dev_dataloader)
train_metric = compute_metrics(all_preds, all_labels)
train_loss = (epoch_loss-logging_loss) / train_step
learn_rate = scheduler.get_lr()[0]
logs = {}
logs['loss'+ os.sep +'train'] = train_loss
logs['loss' + os.sep + 'dev'] = dev_loss
logs['learning_rate'] = learn_rate
for key, val in train_metric.items():
logs[key+os.sep+'train'] = val
for key, val in dev_metric.items():
logs[key+os.sep+'dev'] = val
for key, val in logs.items():
writer.add_scalar(key,val, global_step//args.save_step)
# save the checkpoint using best dev-loss
if dev_loss < best_dev_loss:
best_dev_loss = dev_loss
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(
output_dir, "training_args.bin"))
torch.save(optimizer.state_dict(), os.path.join(
output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(
output_dir, "scheduler.pt"))
writer.close()
return global_step, epoch_loss/global_step
def evaluate(args, model, dataloader, do_predict=False):
model.eval()
all_preds = np.array([], dtype=int)
all_labels = np.array([], dtype=int)
all_idxs = np.array([], dtype=int)
all_confidences = []
eval_loss = 0.0
eval_step = 0
for batch in tqdm(dataloader, desc='Eval'):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[1], "attention_mask": batch[2], "labels": batch[4]}
inputs["token_type_ids"] = (
batch[3] if args.model_type in [
"bert", "xlnet", "albert"] else None
)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
eval_step += 1
idxs = batch[0].detach().cpu().numpy()
confidences = logits.detach().cpu().numpy()
preds = np.argmax(confidences, axis=1)
confidences = confidences.tolist()
all_confidences.extend(confidences)
labels = batch[4].detach().cpu().numpy()
all_preds = np.append(all_preds, preds)
all_labels = np.append(all_labels, labels)
all_idxs = np.append(all_idxs, idxs)
eval_loss = eval_loss / eval_step
if do_predict:
eval_result_file = os.path.join(args.output_dir, 'eval_results.txt')
all_confidences = [softmax(x)[-1] for x in all_confidences]
with open(eval_result_file, 'w') as f:
for i in range(len(all_idxs)):
f.write(str(all_idxs[i]) + '\t' + str(all_preds[i]) + '\t' + str(all_labels[i]) + '\t' + str(all_confidences[i]) + '\n')
metrics = compute_metrics(all_labels, all_preds)
return eval_loss, metrics
|
from copy import deepcopy
from functools import partial
from tinymongo import TinyMongoClient
from . import io
from datatypes import BankAccountTransaction, BankCreditCardTransaction, LocalAccountTransaction
from datatypes import LocalAccount, Card
from collections import namedtuple
def load(database_folder):
connection = TinyMongoClient(database_folder)
db = getattr(connection, 'banking')
return db
def get_account_access_code(db, account):
code = io.get_account_access_code(db, account.id)
return code
def update_account_access_code(db, account, access_code):
return io.update_account_access_code(db, account.id, access_code)
def get_bank_access_code(db, bank_config):
code = io.get_bank_access_code(db, bank_config.id)
return code
def update_bank_access_code(db, bank_config, access_code):
return io.update_bank_access_code(db, bank_config.id, access_code)
def last_account_transaction_date(db, account_number):
collection = db.account_transactions
results = io.find_account_transactions(collection, account_number, sort_field='transaction_date.date')
return results[-1].transaction_date if results else None
def last_credit_card_transaction_date(db, credit_card_number):
collection = db.credit_card_transactions
results = io.find_credit_card_transactions(collection, credit_card_number, sort_field='transaction_date.date')
return results[-1].transaction_date if results else None
def find_transactions(db, account, **query):
if isinstance(account, LocalAccount):
return io.find_local_account_transactions(db, account_id=account.id, **query)
if isinstance(account, Card):
return io.find_credit_card_transactions(db, credit_card_number=account.number, **query)
def get_account_balance(db, account):
if isinstance(account, LocalAccount):
results = io.find_local_account_transactions(db, account.id)
return results[-1].balance if results else 0
def insert_transaction(db, transaction):
if isinstance(transaction.account, LocalAccount):
return io.insert_local_account_transaction(db, transaction)
def remove_transactions(db, transactions):
list(map(partial(remove_transaction, db), transactions))
def remove_transaction(db, transaction):
if isinstance(transaction, BankCreditCardTransaction):
return io.remove_credit_card_transaction(db, transaction)
def update_credit_card_transactions(db, credit_card_number, raw_fetched_transactions):
removed, inserted, updated = update_transactions(
db,
TransactionDataclass=BankCreditCardTransaction,
transaction_grouping_id=credit_card_number,
transaction_key_fields=['transaction_date', 'amount', 'type.name'],
operations=namedtuple('TransactionOperations', 'insert, update, find, find_one, find_matching, count, remove')(
io.insert_credit_card_transaction,
io.update_credit_card_transaction,
io.find_credit_card_transactions,
io.find_one_credit_card_transaction,
io.find_matching_credit_card_transaction,
io.count_credit_card_transactions,
io.remove_credit_card_transaction
),
raw_fetched_transactions=raw_fetched_transactions
)
duplicated_sequence_found = io.check_credit_card_sequence_numbering_consistency(db, credit_card_number)
if duplicated_sequence_found:
raise io.DatabaseError('Duplicated sequence numbers detected: {}'.format(
str(duplicated_sequence_found))
)
return (removed, inserted, updated)
def update_account_transactions(db, account_number, raw_fetched_transactions):
removed, inserted, updated = update_transactions(
db,
TransactionDataclass=BankAccountTransaction,
transaction_grouping_id=account_number,
transaction_key_fields=['transaction_date', 'amount', 'balance'],
operations=namedtuple('TransactionOperations', 'insert, update, find, find_one, find_matching, count, remove')(
io.insert_account_transaction,
io.update_account_transaction,
io.find_account_transactions,
io.find_one_account_transaction,
io.find_matching_account_transaction,
io.count_account_transactions,
io.remove_account_transaction
),
raw_fetched_transactions=raw_fetched_transactions
)
inconsistent_transaction = io.check_balance_consistency(db, account_number)
if inconsistent_transaction:
raise io.DatabaseError(
'Balance is inconsistent at {transaction.transaction_date} [balance={transaction.balance}, amount={transaction.amount}]'.format(
transaction=inconsistent_transaction)
)
duplicated_sequence_found = io.check_account_sequence_numbering_consistency(db, account_number)
if duplicated_sequence_found:
raise io.DatabaseError('Duplicated sequence numbers detected: {}'.format(
str(duplicated_sequence_found))
)
return (removed, inserted, updated)
def update_transactions(db, TransactionDataclass, transaction_grouping_id, transaction_key_fields, operations, raw_fetched_transactions):
if not raw_fetched_transactions:
return
actions = {
'remove': [],
'insert': [],
'update': []
}
def sequence_transactions(transactions, first_seq):
for seq, transaction in enumerate(transactions, first_seq):
_transaction = deepcopy(transaction)
_transaction._seq = seq
yield _transaction
def process_actions():
removed = list(map(partial(operations.remove, db), actions['remove']))
inserted = list(map(partial(operations.insert, db), actions['insert']))
updated = list(map(partial(operations.update, db), actions['update']))
return (len(removed), len(inserted), len(updated))
fetched_transactions = list(map(
lambda transaction: TransactionDataclass(**transaction.__dict__),
raw_fetched_transactions
))
# First use case: All fetched transactions are new
# (in other words, we don't have any stored transaction yet)
transaction_count = operations.count(db, transaction_grouping_id)
if transaction_count == 0:
actions['insert'].extend(
sequence_transactions(fetched_transactions, first_seq=0)
)
return process_actions()
# Next we process all use cases that we add a block of completely new
# transactions either on the head or on the tail, no overlaps
first_stored_transaction = operations.find_one(db, transaction_grouping_id, sort_seq=1)
last_stored_transaction = operations.find_one(db, transaction_grouping_id, sort_seq=-1)
first_fetched_transaction = fetched_transactions[0]
last_fetched_transaction = fetched_transactions[-1]
# All fetched transactions are newer
if first_fetched_transaction.transaction_date > last_stored_transaction.transaction_date:
actions['insert'].extend(
sequence_transactions(
fetched_transactions,
first_seq=last_stored_transaction._seq + 1
)
)
return process_actions()
# All fetched transactions are older
if last_fetched_transaction.transaction_date < first_stored_transaction.transaction_date:
existing_transactions = operations.find(
db, transaction_grouping_id
)
actions['insert'].extend(
sequence_transactions(
fetched_transactions,
first_seq=0
)
)
actions['update'].extend(
sequence_transactions(
existing_transactions,
first_seq=actions['insert'][-1]._seq + 1
)
)
return process_actions()
# At this point, we will have some kind of overlap. This overlap can match
# all, some or none of the fetched transactions on the database:
overlapping_transactions = list(filter(
bool,
map(
partial(operations.find_matching, db, transaction_grouping_id),
fetched_transactions
)
))
# All transactions are newer and neither in the tail or head
# so we have a diverged history
if not overlapping_transactions:
raise io.DivergedHistoryError(first_fetched_transaction, 'All transactions overlap without matches')
# We have at least one overlapping, so at this point, the
# diff algorithm will take care of extracting the insertions, updates or
# diverged history events as needed
existing_transactions = operations.find(
db, transaction_grouping_id,
since_date=overlapping_transactions[0].transaction_date
)
for action, transaction in io.select_new_transactions(fetched_transactions, existing_transactions, transaction_key_fields):
actions[action].append(transaction)
return process_actions()
def update_transaction(db, transaction):
{
BankAccountTransaction: io.update_account_transaction,
BankCreditCardTransaction: io.update_credit_card_transaction
}[transaction.__class__](db, transaction)
|
def pairs(ar):
return sum([1 for x in range(0,len(ar[:-1]),2) if ar[x]+1==ar[x+1] or ar[x+1]+1==ar[x]])
'''
In this Kata your task will be to return the count of pairs that have consecutive numbers as follows:
pairs([1,2,5,8,-4,-3,7,6,5]) = 3
The pairs are selected as follows [(1,2),(5,8),(-4,-3),(7,6),5]
--the first pair is (1,2) and the numbers in the pair are consecutive; Count = 1
--the second pair is (5,8) and are not consecutive
--the third pair is (-4,-3), consecutive. Count = 2
--the fourth pair is (7,6), also consecutive. Count = 3.
--the last digit has no pair, so we ignore.
'''
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# create_iru_bias_plots.py: create iru bias data plot #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 12, 2021 #
# #
#########################################################################################
import os
import sys
import re
import string
import math
import numpy
import astropy.io.fits as pyfits
import time
from datetime import datetime
import Chandra.Time
import unittest
#
#--- plotting routine
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/IRU/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import functions
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- some data
#
bias_list = ['aogbias1', 'aogbias2', 'aogbias3']
col_name = ['time', 'roll_bias_avg', 'roll_bias_std', 'pitch_bias_avg',\
'pitch_bias_std', 'yaw_bias_avg', 'yaw_bias_std']
rad2sec = 360.0 * 60. * 60. /(2.0 * math.pi)
m_list = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
#----------------------------------------------------------------------------------
#-- create_iru_bias_plots: create iru bias data plots ---
#----------------------------------------------------------------------------------
def create_iru_bias_plots(run=['w', 'm', 'y', 't'], stime =''):
"""
create iru bias data plots
input: run --- a list of which one to plot
w --- weekly
m --- monthly
y --- yearly
t --- full range
these must be elements of a list
stime --- time of where the plot(s) will bie made around.
the weekly plot always from Fri - Fri
the monthly plot alwyas from 1st of the month to the end
the year plot alwyas from Jan 1 till the end of year,
except the lateest year; from Jan1 to today
stime is time in seconds from 1998.1.1
time in <yyyy>:<ddd>:<hh>:<mm>:<ss>
if it is for year, you can give year
output: <web_dir>/Plots_new/<year>/<head>_bias.png (for full plot, without<year>)
<web_dir>/Plots_new/<year>/<head>_hist.png (for full plot, without<year>)
"""
for ind in run:
#
#--- weekly plots for Weekly report
#
if ind == 'w':
[tstart, tstop, head] = set_weekly_range(stime)
print("Weekly: " + str(tstart) + '<-->' + str(tstop))
plot_selected_period(tstart, tstop, head)
#
#--- monthly plot
#
elif ind == 'm':
[tstart, tstop, head] = set_monthly_range(stime)
print("Monthly: " + str(tstart) + '<-->' + str(tstop))
plot_selected_period(tstart, tstop, head)
#
#--- if today is the less than 5th day of the month, create dummy monthly plots
#--- this is because the main function creates only the previous month's plots
#--- in that time period
#
add_empty_plot_page('mon')
#
#--- yearly plot
#
elif ind == 'y':
[tstart, tstop, head] = set_yearly_range(stime)
print("Yearly: " + str(tstart) + '<-->' + str(tstop))
plot_selected_period(tstart, tstop, head)
#
#--- if today is the less than 5th day of the year, create dummy yearly plots
#
add_empty_plot_page('year')
#
#--- full range plot
#
elif ind == 't':
print("Full period")
[tstart, tstop, head] =set_full_range()
plot_selected_period(tstart, tstop, head, tunit=1)
#----------------------------------------------------------------------------------
#-- plot_selected_period: create a plot for a specified peiod --
#----------------------------------------------------------------------------------
def plot_selected_period(tstart, tstop, head, tunit=0):
"""
create a plot for a specified peiod
input: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
head --- prefix of the plot file
tunit --- indicator to tell whether this is full plot; default=0: no
output: <web_dir>/Plots_new/<year>/<head>_bias.png (for full plot, without<year>)
<web_dir>/Plots_new/<year>/<head>_hist.png (for full plot, without<year>)
"""
year = find_year(tstart + 3600.0)
data = read_data(tstart, tstop)
dtime = data[0]
#
#--- bais plot
#
if tunit == 0:
outdir = web_dir + 'Plots_new/' + str(year)
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + '/' + head + '_bias.png'
else:
outname = web_dir + 'Plots_new/' + head + '_bias.png'
out = bias_plot(dtime, data[1]*rad2sec, data[3]*rad2sec, data[5]*rad2sec, outname, tunit)
if out == False:
return out
#
#--- hist plot
#
if tunit == 0:
outdir = web_dir + 'Plots_new/' + str(year)
cmd = 'mkdir -p ' + outdir
os.system(cmd)
outname = outdir + '/' + head + '_hist.png'
else:
outname = web_dir + 'Plots_new/' + head + '_hist.png'
#
#--- compute the shift from one data point to the data point
#
shift = find_shift(data)
hist_plot(shift[0], shift[1], shift[2], outname)
#----------------------------------------------------------------------------------
#-- read_data: for the give period, find data fits files and read it --
#----------------------------------------------------------------------------------
def read_data(tstart, tstop):
"""
for the give period, find data fits files and read it
input: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: out --- a list of arrays of data
see: col_name for which data are extracted
"""
byear = find_year(tstart)
eyear = find_year(tstop)
c_len = len(col_name)
chk = 0
save = []
for year in range(byear, eyear+1):
fits = data_dir + 'iru_gyro_bias_year' + str(year) + '.fits'
try:
fout = pyfits.open(fits)
data = fout[1].data
fout.close()
except:
continue
#
#--- for the case, first year or only one year data file to read:
#
if chk == 0:
for k in range(0, c_len):
save.append(data[col_name[k]])
chk = 1
#
#--- reading the second year and append the data
#
else:
for k in range(0, c_len):
temp = numpy.append(save[k], data[col_name[k]])
save[k] = temp
out = select_out_data(save, tstart, tstop)
return out
#----------------------------------------------------------------------------------
#-- find_year: find year from the Chandra time --
#----------------------------------------------------------------------------------
def find_year(stime):
"""
find year from the Chandra time
input: stime --- time in seconds from 1998.1.1
output: year --- year
"""
date = Chandra.Time.DateTime(stime).date
atemp = re.split(':', date)
year = int(atemp[0])
return year
#----------------------------------------------------------------------------------
#-- select_out_data: extract data for a specific period ---
#----------------------------------------------------------------------------------
def select_out_data(data, tstart, tstop):
"""
extract data for a specific period
input: data --- a list of arrays of data
tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: save --- a list of arrays of data of the specified time period
"""
#
#--- drop nan data
#
dtime = numpy.array(data[0])
index = ~numpy.isnan(dtime)
dtime = dtime[index]
#
#--- select data for the time period
#
index2 = (dtime >= tstart) &(dtime < tstop)
save = []
for k in range(0, len(data)):
atemp = numpy.array(data[k])
btemp = atemp[index]
ctemp = btemp[index2]
save.append(ctemp)
return save
#----------------------------------------------------------------------------------
#-- find_shift: compute the shift from the previous data points --
#----------------------------------------------------------------------------------
def find_shift(data):
"""
compute the shift from the previous data points
input: data --- a list of data
output: save --- a list of shift. len(save) = len(data) -1
"""
save = []
for k in [1, 3, 5]:
adata = data[k] * rad2sec
shift = []
for m in range(1, len(adata)):
diff = adata[m] - adata[m-1]
shift.append(diff)
save.append(shift)
return save
#----------------------------------------------------------------------------------
#-- bias_plot: create three panel plots of bias time trend --
#----------------------------------------------------------------------------------
def bias_plot(time, data1, data2, data3, outname, tunit=0):
"""
create three panel plots of bias time trend
input: time --- a list of time data in seconds from 1998.1.1
data1 --- a list of data for the panel 1
data2 --- a list of data for the panel 2
data3 --- a list of data for the panel 3
outname --- output file name
tunit --- indicator of full plot or not: default=0: no
output: outname --- a png file
"""
#
#--- convert time format
#
if len(time) == 0:
return False
[btime, byear] = convert_time_format(time, tunit)
#
#--- set ploting range
#
try:
[xmin, xmax] = set_x_range(btime)
except:
return False
#
#--- set sizes
#
fsize = 8
weight = 'bold'
color = 'blue'
color2 = 'red'
marker = '.'
psize = 0
lw = 1
width = 7.0
height = 5.0
resolution = 200
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
mpl.rcParams['font.weight'] = weight
props = font_manager.FontProperties(size=fsize)
props = font_manager.FontProperties(weight=weight)
plt.subplots_adjust(hspace=0.05)
#
#--- first panel
ax1 = plt.subplot(311)
[ymin, ymax] = set_y_range(data1, tunit)
ax1.set_autoscale_on(False)
ax1.set_xbound(xmin,xmax)
ax1.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax1.set_ylim(ymin=ymin, ymax=ymax, auto=False)
p, = plt.plot(btime, data1, color=color, lw=lw, marker=marker, markersize=psize)
ax1.set_ylabel("Roll Bias (arcsec/sec)", fontweight=weight)
#
#--- don't plot tick labels
#
line = ax1.get_xticklabels()
for label in line:
label.set_visible(False)
#
#--- second panel
#
ax2 = plt.subplot(312)
[ymin, ymax] = set_y_range(data2, tunit)
ax2.set_autoscale_on(False)
ax2.set_xbound(xmin,xmax)
ax2.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax2.set_ylim(ymin=ymin, ymax=ymax, auto=False)
p, = plt.plot(btime, data2, color=color, lw=lw, marker=marker, markersize=psize)
ax2.set_ylabel("Pitch Bias (arcsec/sec)", fontweight=weight)
#
#--- don''t plot tick labels
#
line = ax2.get_xticklabels()
for label in line:
label.set_visible(False)
#
#--- thrid panel
#
ax3 = plt.subplot(313)
[ymin, ymax] = set_y_range(data3, tunit)
ax3.set_autoscale_on(False)
ax3.set_xbound(xmin,xmax)
ax3.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax3.set_ylim(ymin=ymin, ymax=ymax, auto=False)
p, = plt.plot(btime, data3, color=color, lw=lw, marker=marker, markersize=psize)
ax3.set_ylabel("Yaw Bias (arcsec/sec)", fontweight=weight)
if tunit == 0:
line = 'Year Date (' + str(byear) + ')'
xlabel(line, fontweight=weight)
else:
xlabel('Time (Year)', fontweight=weight)
#
#--- save the figure
#
plt.subplots_adjust(bottom=0.05,hspace=0.01)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout(h_pad=0.05)
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
return True
#----------------------------------------------------------------------------------
#-- hist_plot: create three panel histogram plot --
#----------------------------------------------------------------------------------
def hist_plot(data1, data2, data3, outname):
"""
create three panel histogram plot
input: data1 --- a list of data for the panel 1
data2 --- a list of data for the panel 2
data3 --- a list of data for the panel 3
outname --- the output file name
output: outname --- png file
"""
#
#--- set ploting range
#
xmin = -0.02
xmax = 0.02
#
#--- set sizes
#
fsize = 8
weight = 'bold'
color = 'blue'
color2 = 'red'
marker = '.'
psize = 0
lw = 1
width = 7.0
height = 5.0
resolution = 300
alpha = 0.5
bins = 300
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
mpl.rcParams['font.weight'] = weight
props = font_manager.FontProperties(size=fsize)
props = font_manager.FontProperties(weight=weight)
#
#--- first panel
#
ax1 = plt.subplot(311)
n1, bins1, patches1 = plt.hist(data1, bins=bins, range=[xmin, xmax], facecolor=color,\
alpha=alpha, histtype='stepfilled', log=True)
#
#--- fix the plotting range
#
plt.xlim(xmin, xmax)
ax1.text(0.05, 0.95, "Roll", transform=ax1.transAxes, fontsize=fsize, verticalalignment='top')
#
#--- remove the tix label for this plot
#
line = ax1.get_xticklabels()
for label in line:
label.set_visible(False)
#
#--- second panel
#
ax2 = plt.subplot(312)
n2, bins2, patches2 = plt.hist(data2, bins=bins, range=[xmin, xmax], facecolor=color,\
alpha=alpha, histtype='stepfilled', log=True)
plt.xlim(xmin, xmax)
ax2.text(0.05, 0.95, "Pitch", transform=ax2.transAxes, fontsize=fsize, verticalalignment='top')
ax2.set_ylabel("Freq (arcsec/3600s shift)", fontweight=weight)
#
#--- remove the tix label for this plot
#
line = ax2.get_xticklabels()
for label in line:
label.set_visible(False)
#
#--- thrid panel
#
ax3 = plt.subplot(313)
n3, bins3, patches3 = plt.hist(data3, bins=bins, range=[xmin, xmax], facecolor=color, \
alpha=alpha, histtype='stepfilled', log=True)
ax3.text(0.05, 0.95, "Yaw", transform=ax3.transAxes, fontsize=fsize, verticalalignment='top')
plt.xlim(xmin, xmax)
xlabel('Bias Drift Shift over 3600 sec', fontweight=weight)
#
#--- save the figure
#
plt.subplots_adjust(bottom=0.05,hspace=0.01)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout(h_pad=0.05)
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
#----------------------------------------------------------------------------------
#-- convert_time_format: convert chandra time into either fractional year or day of year
#----------------------------------------------------------------------------------
def convert_time_format(t_list, dformat=0):
"""
convert chandra time into either fractional year or day of year
input: t_list --- a list of time in seconds from 1998.1.1
dformat --- indicator of whether to convert fractional year or doy
default= 0: day of year
output: save --- a list of time in the specified format
byear --- the year of the data; useful to use with day of year
note: if the the data are over two years, byear is the year
of the starting year and day of year will increase
beyond 365/366.
"""
save = []
byear = 0
for ent in t_list:
out = Chandra.Time.DateTime(ent).date
atemp = re.split(':', out)
year = int(atemp[0])
if byear == 0:
byear = year
if mcf.is_leapyear(byear):
base = 366
else:
base = 365
yday = float(atemp[1])
hh = float(atemp[2])
mm = float(atemp[3])
ss = float(atemp[4])
yday += hh / 24.0 + mm / 1440.0 + ss / 84600.0
#
#--- for the case that the time is in yday; assume that the range is always
#--- smaller than two years
#
if dformat == 0:
if year > byear:
yday += base
save.append(yday)
#
#--- for the case that the time is in fractional year
#
else:
if year > byear:
if mcf.is_leapyear(byear):
base = 366
else:
base = 365
byear = year
fyear = year + yday / base
save.append(fyear)
return [save, byear]
#----------------------------------------------------------------------------------
#-- set_x_range: set x plotting range --
#----------------------------------------------------------------------------------
def set_x_range(x, ichk=0):
"""
set x plotting range
input: x --- a list x values
ichk --- indicator if whether this is a year plot: default= 0: no
output: xmin --- x min
xmax --- x max
"""
xmin = min(x)
xmax = max(x)
diff = xmax - xmin
xmin -= 0.05 * diff
xmax += 0.05 * diff
if ichk == 0:
xmin = int(xmin)
xmax = int(xmax) + 1
else:
if abs(xmin) > xmax:
xmax = -xmin
else:
xmin = -xmax
return [xmin, xmax]
#----------------------------------------------------------------------------------
#-- set_y_range: set y plotting range ---
#----------------------------------------------------------------------------------
def set_y_range(y, chk=0):
"""
set y plotting range
input: y --- a list of y data
ichk --- indicator if whether this is a year plot: default= 0: no
output: ymin --- y min
ymax --- y max
"""
#
#--- range setting for full range plots
#
if chk == 1:
ymin = -3.5
ymax = 1.0
else:
ymin = numpy.percentile(y, 2.0)
#
#--- drop the exteme value
#
ymax = numpy.percentile(y, 98.0)
diff = ymax - ymin
ymin -= 0.1 * diff
ymax += 0.1 * diff
#
#--- don't make the plot larger than 0.6
#
# if ymax > 0.6:
# ymax = 0.6
return [ymin, ymax]
#----------------------------------------------------------------------------------
#-- set_weekly_range: create a weekly plot range (Fri - Fri) --
#----------------------------------------------------------------------------------
def set_weekly_range(tday =''):
"""
create a weekly plot range (Fri - Fri)
input: tday --- seconds from 1998.1.1; if it is given, create for that week interval dates
or date in yyyy:ddd:hh:mm:ss format
output: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
pref --- file header for the weekly plot
"""
#
#--- find today's date and week day
#
if tday == '':
tday = time.strftime("%Y:%j:23:59:59", time.gmtime())
tday = Chandra.Time.DateTime(tday).secs
tday2 = time.strftime("%Y:%m:%d", time.gmtime())
tm = re.split(':', tday2)
wday = int(datetime.date(int(tm[0]), int(tm[1]), int(tm[2])).strftime('%w'))
else:
#
#--- if the date is given in Chandra time format
#
try:
tday = float(tday)
out = Chandra.Time.DateTime(tday).date
atemp = re.split('\.', out)
out = atemp[0].replace(':60', ':59')
#wday = int(datetime.datetime.strptime(atemp[0], "%Y:%j:%H:%M:%S").strftime('%w'))
wday = int(datetime.datetime.strptime(out, "%Y:%j:%H:%M:%S").strftime('%w'))
#
#--- if the date is given in yyyy:ddd:hh:mm:ss
#
except:
wday = int(datetime.datetime.strptime(tday, "%Y:%j:%H:%M:%S").strftime('%w'))
tday = Chandra.Time.DateTime(tday).secs
#
#--- find time interval from Friday the last week to Friday this week
#
tdiff = 4 - wday
tstop = tday + tdiff * 86400.0
tstart = tstop - 7.0 * 86400.0
#
#--- create a file header; 1 hr is added/subtructed to make sure that the dates are in the interval
#
lstop = Chandra.Time.DateTime(tstop - 3600.0).date
atemp = re.split(':', lstop)
ydate1 = atemp[1]
lstart = Chandra.Time.DateTime(tstart + 3600.0).date
atemp = re.split(':', lstart)
year = atemp[0]
ydate2 = atemp[1]
pref = year + '_' + ydate2 + '_' + ydate1
return [int(tstart), int(tstop), pref]
#----------------------------------------------------------------------------------
#-- set_monthly_range: create a mnonthly plot range ---
#----------------------------------------------------------------------------------
def set_monthly_range(tday = ''):
"""
create a mnonthly plot range
input: tday --- seconds from 1998.1.1; if it is given, create for that week interval dates
or date in yyyy:ddd:hh:mm:ss format
output: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
pref --- file header for the weekly plot
"""
#
#--- find today's date and week day
#
if tday == '':
tday = time.strftime("%Y:%m:%d", time.gmtime())
atemp = re.split(':', tday)
year = int(atemp[0])
mon = int(atemp[1])
mday = int(atemp[2])
else:
#
#--- if the time is given in Chandra time
#
try:
tday = float(tday)
out = Chandra.Time.DateTime(tday).date
atemp = re.split('\.', out)
out = atemp[0].replace(':60', ':59')
#out = datetime.datetime.strptime(atemp[0], "%Y:%j:%H:%M:%S").strftime('%Y:%m:%d')
out = datetime.datetime.strptime(out, "%Y:%j:%H:%M:%S").strftime('%Y:%m:%d')
#
#--- if the time is given in yyyy:ddd:hh:mm:ss
#
except:
out = datetime.datetime.strptime(tday, "%Y:%j:%H:%M:%S").strftime('%Y:%m:%d')
atemp = re.split(':', out)
year = int(atemp[0])
mon = int(atemp[1])
mday = int(atemp[2])
#
#--- if "today" is less than first 5 day of the month, give back the last month's date interval
#
if mday < 5:
mon -= 1
if mon < 1:
mon = 12
year -= 1
emon = mon + 1
eyear = year
if emon > 12:
emon = 1
eyear += 1
start = str(year) + '-' + str(mon) + '-01'
start = (datetime.datetime.strptime(start, "%Y-%m-%d").strftime('%Y:%j:00:00:00'))
start = Chandra.Time.DateTime(start).secs
stop = str(eyear) + '-' + str(emon) + '-01'
stop = (datetime.datetime.strptime(stop, "%Y-%m-%d").strftime('%Y:%j:00:00:00'))
stop = Chandra.Time.DateTime(stop).secs
lmon = m_list[mon -1]
lyear = str(year)
pref = lmon + lyear[2] + lyear[3]
return [int(start), int(stop), pref]
#----------------------------------------------------------------------------------
#-- set_yearly_range: set time interval for the yearly data extraction --
#----------------------------------------------------------------------------------
def set_yearly_range(year=''):
"""
set time interval for the yearly data extraction
input: year --- year of the data interval
output: start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
pref --- header of the file
"""
#
#--- find year of today
#
if year == '':
out = time.strftime("%Y:%j", time.gmtime())
atemp = re.split(':', out)
year = int(float(atemp[0]))
yday = int(float(atemp[1]))
#
#--- today is less than the 5th day of the year, use the previous year
#
if yday < 5:
year -= 1
else:
try:
check = float(year)
#
#--- if the year is accidentaly given by chandra date, just find year
#
if year > 3000:
out = Chandra.Time.DateTime(year).date
atemp = re.split(':', out)
year = int(atemp[0])
except:
atemp = re.split(':', year)
year = int(atemp[0])
year = int(year)
nyear = year + 1
start = str(year) + ':001:00:00:00'
start = Chandra.Time.DateTime(start).secs
stop = str(nyear) + ':001:00:00:00'
stop = Chandra.Time.DateTime(stop).secs
return [int(start), int(stop), str(year)]
#----------------------------------------------------------------------------------
#-- set_full_range: set time inteval for the full range plots --
#----------------------------------------------------------------------------------
def set_full_range():
"""
set time inteval for the full range plots
input: none
output: start --- starting time in seconds from 1998.1.1
stop --- stopping time in seconds from 1998.1.1
pref --- header of the file
"""
#
#--- find today's date and week day
#
start = 52531199 #--- 1999:244:00:00:00
tday = time.strftime("%Y:%j:00:00:00", time.gmtime())
stop = Chandra.Time.DateTime(tday).secs
return [int(start), int(stop), 'total']
#----------------------------------------------------------------------------------
#--find_w_date: make a list of dates of a specific week yda in Chandara time ---
#----------------------------------------------------------------------------------
def find_w_date(year, w=3):
"""
make a list of dates of a specific week yda in Chandara time
input: year --- year
w --- weekday mon = 0; default 3: thu
"""
save = []
for d in findallwdays(year, w):
tday = (datetime.datetime.strptime(str(d), "%Y-%m-%d").strftime('%Y:%j:00:00:00'))
stime = Chandra.Time.DateTime(tday).secs
save.append(int(stime))
return save
#----------------------------------------------------------------------------------
#-- findallwdays: return a list of date in yyyy-mm-dd for a given year and weekday
#----------------------------------------------------------------------------------
def findallwdays(year, w):
"""
return a list of date in yyyy-mm-dd for a given year and weekday
input: year --- year
w --- weekday mon = 0; default 3: thu
"""
d = datetime.date(year, 1, 1)
d += datetime.timedelta(days = w - d.weekday())
while d.year <= year:
yield d
d += datetime.timedelta(days = 7)
#----------------------------------------------------------------------------------
#-- add_empty_plot_page: create empty plots if the regular plots are created for the previous period
#----------------------------------------------------------------------------------
def add_empty_plot_page(period):
"""
create empty plots if the regular plots are created for the previous period
input: period --- 'mon' or 'year'; which period to caeate the empnty plots
output: <wb_dir>/Plot_new/<head>_<hist/bias>.png
"""
#
#--- today's date
#
out = time.strftime('%y:%m:%d:%Y:%j', time.gmtime())
atemp = re.split(':', out)
year = atemp[0]
mon = int(float(atemp[1]))
mday = int(float(atemp[2]))
fyear = atemp[3]
yday = int(float(atemp[4]))
chk = 0
#
#--- if this is for month plot, check whether today is less than 5th day of the month
#
if period == 'mon':
if mday < 5:
lmon = m_list[mon-1]
head = lmon + year
chk = 1
#
#--- if this is for year plot, check whether today is less than 5th day of the year
#
else:
if yday < 5:
head = fyear
chk = 1
#
#--- if so, create the empty plots
#
if chk > 0:
odir = web_dir + 'Plots_new/' + str(fyear) + '/'
for tail in ['bias', 'hist']:
out = odir + head + '_' + tail + '.png'
cmd = 'cp ' + house_keeping + 'no_plot.png ' + out
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST ---
#-----------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#------------------------------------------------------------
def test_set_weekly_range(self):
test = 647740794
thead = '2018_187_193'
[start, stop, pref] = set_weekly_range(test)
print("Weekly setting: " + str(start) +'<-->' + str(stop) + '<--->' + pref)
print("Expected: " + str(647222394) + '<-->' + str(647827194) + '<--->' + thead)
test = '2018:001:00:00:00'
[start, stop, pref] = set_weekly_range(test)
print("\nWeekly setting2: " + str(start) +'<-->' + str(stop) + '<--->' + pref)
print("\n\n")
#------------------------------------------------------------
def test_set_monthly_range(self):
test = 647740794
[start, stop, pref] = set_monthly_range(test)
print("Monthly setting: " + str(start) +'<-->' + str(stop) + '<--->' + pref)
print("Expected: 646790469<-->649468869<--->jul18")
test = '2018:001:00:00:00'
[start, stop, pref] = set_monthly_range(test)
print("\nMonthly setting2: " + str(start) +'<-->' + str(stop) + '<--->' + pref)
print("\n\n")
#------------------------------------------------------------
def test_set_yearly_range(self):
year = 2017
[start, stop, pref] = set_yearly_range(year)
print("Yearly setting: " + str(start) +'<-->' + str(stop) + '<--->' + pref)
#----------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] == 'test':
sys.argv = [sys.argv[0]]
unittest.main()
exit(1)
elif sys.argv[1] == 't':
run = ['w', 'm', 'y', 't']
stime = ''
elif sys.argv[1].lower() in ['h', '-h','-help']:
print("Usage: create_iru_bias_plots.py <ind>")
print("<ind>: test --- run unit test")
print(" t --- create all plots")
print(" time --- time where you want to create plots (week, month, year)")
print(" \"\" --- create plots for the most recent period for all\n\n")
print("create_iru_bias_plots.py <ind> <time>")
print(" ind --- string of combination of: 'w', 'm', 'y', 't' e.g, wmy")
print(" time --- time in Chandra time or format of <yyyy>:<ddd>:<hh>:<mm>:<ss>")
exit(1)
else:
try:
stime = float(sys.argv[1])
except:
stime = ''
run = ['w', 'm', 'y']
elif len(sys.argv) == 3:
ind = sys.argv[1]
try:
stime = float(sys.argv[2])
except:
stime = sys.argv[2]
run = []
for ent in sys.argv[1]:
run.append(ent)
else:
run = ['w', 'm', 'y', 't']
stime = ''
create_iru_bias_plots(run=run, stime= stime)
##
##---- recovering the plots
##
#
# for year in range(1999, 2019):
# print "YEAR: " + str(year)
# run = ['y',]
# create_iru_bias_plots(run=run, stime=year)
#
# for year in range(1999, 2019):
# out = find_w_date(year)
# for stime in out:
# print "TIME: " + Chandra.Time.DateTime(stime).date
#
# if stime < 52531199:
# continue
# if stime > 648691194:
# break
#
# run = ['w', 'm']
# create_iru_bias_plots(run=run, stime= stime)
#
|
import src.basic_assignment as ba
import pandas as pd
from pathlib import Path
from src.optimal_assignment import optimise_assignment
def get_manual_precinct_allocation():
"""
"""
precinct = pd.read_excel(
Path(__file__).parent
/ "../data/02_optimisation_input/assigned_precincts_reassigned_lawyerup.xlsx"
)
return precinct
if __name__ == "__main__":
observers = ba.get_observer_dataset()
precinct = get_manual_precinct_allocation().fillna("")
# inside legal
mask = precinct["inside_legal"] & (precinct["inside_observer"] != "")
precinct_subset = precinct[mask]
precinct.loc[mask, "inside_observer"] = optimise_assignment(
precinct_subset, observers, "inside_observer"
)
# inside not-legal
mask = (~precinct["inside_legal"]) & (precinct["inside_observer"] != "")
precinct_subset = precinct[mask]
precinct.loc[mask, "inside_observer"] = optimise_assignment(
precinct_subset, observers, "inside_observer"
)
# outside legal all day
mask = (
(precinct["outside_am_legal"])
& (precinct["outside_am_observer"] == precinct["outside_pm_observer"])
& (precinct["outside_am_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_am_observer"
)
precinct.loc[mask, "outside_am_observer"] = optimised_observer_list
precinct.loc[mask, "outside_pm_observer"] = optimised_observer_list
# outside am only legal
mask = (
(precinct["outside_am_legal"])
& (precinct["outside_am_observer"] != precinct["outside_pm_observer"])
& (precinct["outside_am_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_am_observer"
)
precinct.loc[mask, "outside_am_observer"] = optimised_observer_list
# outside pm only legal
mask = (
(precinct["outside_pm_legal"])
& (precinct["outside_am_observer"] != precinct["outside_pm_observer"])
& (precinct["outside_pm_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_pm_observer"
)
precinct.loc[mask, "outside_pm_observer"] = optimised_observer_list
# outside not-legal all day
mask = (
(~precinct["outside_am_legal"])
& (precinct["outside_am_observer"] == precinct["outside_pm_observer"])
& (precinct["outside_am_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_am_observer"
)
precinct.loc[mask, "outside_am_observer"] = optimised_observer_list
precinct.loc[mask, "outside_pm_observer"] = optimised_observer_list
# outside am only not-legal
mask = (
(~precinct["outside_am_legal"])
& (precinct["outside_am_observer"] != precinct["outside_pm_observer"])
& (precinct["outside_am_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_am_observer"
)
precinct.loc[mask, "outside_am_observer"] = optimised_observer_list
# Outside pm only not-legal
mask = (
(~precinct["outside_pm_legal"])
& (precinct["outside_am_observer"] != precinct["outside_pm_observer"])
& (precinct["outside_pm_observer"] != "")
)
precinct_subset = precinct[mask]
optimised_observer_list = optimise_assignment(
precinct_subset, observers, "outside_pm_observer"
)
precinct.loc[mask, "outside_pm_observer"] = optimised_observer_list
observers_allocated = observers.merge(
precinct[["inside_observer", "Polling Place Name"]],
left_on="name",
right_on="inside_observer",
how="left",
)
observers["inside_location"] = observers_allocated["Polling Place Name"].values
observers_allocated = observers.merge(
precinct[["outside_am_observer", "Polling Place Name"]],
left_on="name",
right_on="outside_am_observer",
how="left",
)
observers["outside_am_location"] = observers_allocated["Polling Place Name"].values
observers_allocated = observers.merge(
precinct[["outside_pm_observer", "Polling Place Name"]],
left_on="name",
right_on="outside_pm_observer",
how="left",
)
observers["outside_pm_location"] = observers_allocated["Polling Place Name"].values
precinct.to_excel(
Path(__file__).parent
/ "../data/01_output/manual_optimised_assigned_precincts.xlsx",
index=False,
encoding="utf-8",
)
observers.to_excel(
Path(__file__).parent
/ "../data/01_output/manual_optimised_assigned_observers.xlsx",
index=False,
encoding="utf-8",
)
lbj_output = ba.get_lbj_csv(precinct, observers)
lbj_output.to_excel(
Path(__file__).parent / "../data/01_output/lbj_output_manual.xlsx",
index=False,
encoding="utf-8",
)
print(lbj_output)
|
def large_sum_pos(a, b):
if len(a) > len(b):
b = '0'*(len(a)-len(b)) + b
else:
a = '0'*(len(b)-len(a)) + a
s = ''
increase = 0
for i in range(len(a)-1, -1, -1):
if int(a[i]) + int(b[i]) + increase > 9:
s = str(int(a[i])+int(b[i])+increase-10) + s
increase = 1
else:
s = str(int(a[i])+int(b[i])+increase) + s
increase = 0
s = str(increase) + s
return s
def large_sum_neg(a, b):
b = '0'*(len(a)-len(b)) + b
s = ''
decrease = 0
for i in range(len(a)-1, -1, -1):
if int(a[i])-decrease >= int(b[i]):
s = str(int(a[i])-int(b[i])-decrease) + s
decrease = 0
else:
s = str(10+int(a[i])-int(b[i])-decrease) + s
decrease = 1
return s
def post_process(res):
if '-' not in res:
return res.lstrip('0')
return '-'+res.replace('-','').lstrip('0')
def large_sum(a, b):
if '-' not in a and '-' not in b:
res = large_sum_pos(a,b)
elif '-' in a and '-' in b:
res = '-'+large_sum_pos(a.replace('-',''),b.replace('-',''))
else:
if '-' in a:
temp = a
a = b
b = temp
if int(a) >= int(b.replace('-','')):
res = large_sum_neg(a,b.replace('-',''))
else:
res = '-'+large_sum_neg(b.replace('-',''),a)
return post_process(res)
if __name__ == '__main__':
results = []
with open('input','rb') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
a, b = line.split(',')
results.append(large_sum(a,b))
with open('output', 'wb') as f:
f.write('\n'.join(results))
|
import Chapter3.DownloadMNIST_1
X_train, X_test, y_train, y_test = Chapter3.DownloadMNIST_1.splitData()
y_train_5 = (y_train == 5) #5는 True이고, 다른 숫자는 모두 False
y_test_5 = (y_test == 5)
def getBinaryClassifier():
## 숫자 5만을 식별하는 이진분류기 ##
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(max_iter=500, random_state=42) #매우 큰 데이터 셋을 효율적으로 처리, 온라인 학습에 유리
sgd_clf.fit(X_train,y_train_5)
return sgd_clf
def predict5(sgd_clf):
##예측 하기##
import numpy as np
for snum in np.random.permutation(10):
result = sgd_clf.predict(X_test[snum].reshape(1, -1))
print("예측결과 : ",result,"\t예측데이터 라벨 :",y_test[snum])
if __name__ == '__main__':
sgd_clf = getBinaryClassifier()
predict5(sgd_clf) |
from __future__ import unicode_literals
from django.db import models
class Users( models.Model ):
email = models.CharField( max_length = 255 )
created_at = models.DateTimeField( auto_now_add = True )
updated_at = models.DateTimeField( auto_now = True )
|
import sys
import pygame
from pygame.locals import *
import startgame
img_basic_address = './img/'
class UNOGame():
def __init__(self):
pygame.init()
self.background = pygame.image.load(img_basic_address+'background.png')
self.screen_width = 800
self.screen_height = 600
self.background_Color = (0,66,0)
self.playernum = 2
self.difficulty = 1
self.font = 'Berlin Sans FB'
self.clock = pygame.time.Clock()
self.FPS = 30
self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
self.screen.fill(self.background_Color)
self.screen.blit(self.background, (-30, -30))
pygame.display.update()
def text_format(self, message, textFont, textSize, textColor):
newFont = pygame.font.SysFont(textFont, textSize)
newText = newFont.render(message, K_0, textColor)
return newText
def set_players(self):
pygame.init()
self.background = pygame.image.load('./img/default.png')
self.screen.blit(self.background, (-100, -70))
selected = 1
menu = True
while menu:
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
sound = pygame.mixer.Sound('./sound/menu.wav')
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
sound.play()
if selected <=1:
selected = 1
else:
selected = selected-1
elif event.key == K_DOWN:
sound.play()
if selected >=4:
selected = 4
else:
selected = selected+1
if event.key == K_RETURN:
if selected <= 1:
self.playernum = 2
self.background = pygame.image.load('./img/background.png')
return
if selected == 2:
self.playernum = 3
self.background = pygame.image.load('./img/background.png')
return
if selected == 3:
self.playernum = 4
self.background = pygame.image.load('./img/background.png')
return
if selected >= 4:
self.background = pygame.image.load('./img/background.png')
return
if selected == 1:
text_two = self.text_format("2 PLAYERS", self.font, 50, (255,24,0))
else:
text_two = self.text_format("2 PLAYERS", self.font, 50, (0,0,0))
if selected == 2:
text_three = self.text_format("3 PLAYERS", self.font, 50, (255,24,0))
else:
text_three = self.text_format("3 PLAYERS", self.font, 50, (0,0,0))
if selected == 3:
text_four = self.text_format("4 PLAYERS", self.font, 50, (255,24,0))
else:
text_four = self.text_format("4 PLAYERS", self.font, 50, (0,0,0))
if selected == 4:
text_quit = self.text_format("BACK", self.font, 50, (255,24,0))
else:
text_quit = self.text_format("BACK", self.font, 50, (0,0,0))
two_rect = text_two.get_rect()
three_rect = text_three.get_rect()
four_rect = text_four.get_rect()
quit_rect=text_quit.get_rect()
self.screen.blit(text_two, (self.screen_width/2 - (two_rect[2]/2), 180))
self.screen.blit(text_three, (self.screen_width/2 - (three_rect[2]/2), 240))
self.screen.blit(text_four, (self.screen_width/2 - (four_rect[2]/2), 300))
self.screen.blit(text_quit, (self.screen_width/2 - (quit_rect[2]/2), 360))
pygame.display.update()
def set_difficulty(self):
self.background = pygame.image.load('./img/default.png')
self.screen.blit(self.background, (-100, -70))
selected = 1
menu = True
while menu:
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
sound = pygame.mixer.Sound('./sound/menu.wav')
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
sound.play()
if selected <=1:
selected = 1
else:
selected = selected-1
elif event.key == K_DOWN:
sound.play()
if selected >=3:
selected = 3
else:
selected = selected+1
if event.key == K_RETURN:
if selected <= 1:
self.difficulty = 1
self.background = pygame.image.load('./img/background.png')
return
if selected == 2:
self.difficulty = 2
self.background = pygame.image.load('./img/background.png')
return
if selected >= 3:
self.background = pygame.image.load('./img/background.png')
return
if selected == 1:
text_basic = self.text_format("BASIC MODE", self.font, 50, (255,24,0))
else:
text_basic = self.text_format("BASIC MODE", self.font, 50, (0,0,0))
if selected == 2:
text_advanced = self.text_format("ADVANCED MODE", self.font, 50, (255,24,0))
else:
text_advanced = self.text_format("ADVANCED MODE", self.font, 50, (0,0,0))
if selected == 3:
text_quit = self.text_format("BACK", self.font, 50, (255,24,0))
else:
text_quit = self.text_format("BACK", self.font, 50, (0,0,0))
basic_rect = text_basic.get_rect()
advanced_rect = text_advanced.get_rect()
quit_rect=text_quit.get_rect()
self.screen.blit(text_basic, (self.screen_width/2 - (basic_rect[2]/2), 200))
self.screen.blit(text_advanced, (self.screen_width/2 - (advanced_rect[2]/2), 260))
self.screen.blit(text_quit, (self.screen_width/2 - (quit_rect[2]/2), 320))
pygame.display.update()
def main_menu(self):
menu = True
selected = 1
while menu:
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
sound = pygame.mixer.Sound('./sound/menu.wav')
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
sound.play()
if selected <=1:
selected = 1
else:
selected = selected-1
elif event.key == K_DOWN:
sound.play()
if selected >=4:
selected = 4
else:
selected = selected+1
if event.key == K_RETURN:
if selected <= 1:
self.background = pygame.image.load('./img/default.png')
self.screen.blit(self.background, (-30, -30))
game = startgame.game(self.playernum, self.difficulty)
game.startgame()
self.background = pygame.image.load('./img/background.png')
self.screen.blit(self.background, (-30, -30))
if selected == 2:
self.set_players()
self.screen.blit(self.background, (-30, -30))
if selected == 3:
self.set_difficulty()
self.screen.blit(self.background, (-30, -30))
if selected >= 4:
pygame.quit()
sys.exit()
if selected == 1:
text_start = self.text_format("START", self.font, 50, (255,24,0))
else:
text_start = self.text_format("START", self.font, 50, (0,0,0))
if selected == 2:
text_player = self.text_format("PLAYERS SET", self.font, 50, (255,24,0))
else:
text_player = self.text_format("PLAYERS SET", self.font, 50, (0,0,0))
if selected == 3:
text_dfficulty = self.text_format("DIFFICULTY SET", self.font, 50, (255,24,0))
else:
text_dfficulty = self.text_format("DIFFICULTY SET", self.font, 50, (0,0,0))
if selected == 4:
text_quit = self.text_format("QUIT", self.font, 50, (255,24,0))
else:
text_quit = self.text_format("QUIT", self.font, 50, (0,0,0))
start_rect = text_start.get_rect()
player_rect = text_player.get_rect()
difficulty_rect = text_dfficulty.get_rect()
quit_rect=text_quit.get_rect()
self.screen.blit(text_start, (self.screen_width/2+70 - (start_rect[2]/2), 200))
self.screen.blit(text_player, (self.screen_width/2+70 - (player_rect[2]/2), 260))
self.screen.blit(text_dfficulty, (self.screen_width/2+70 - (difficulty_rect[2]/2), 320))
self.screen.blit(text_quit, (self.screen_width/2+70 - (quit_rect[2]/2), 380))
pygame.display.update()
self.clock.tick(self.FPS)
pygame.display.set_caption("UNO!")
if __name__ == '__main__':
uno = UNOGame()
uno.main_menu() |
# 3-1
print('3-1\n')
friends = ['Farand', 'Daryl', 'Eunice', 'Dina']
print(friends[0])
print(friends[1])
print(friends[2])
print(friends[3]+'\n')
# 3-2
print('3-2\n')
for friend in friends:
print('Hey ' + friend + ', hope to see you soon!')
print('\r')
# 3-4, 3-5, 3-6, 3-7
print('3-4, 3-5, 3-6, 3-7\n')
guests = ['Schrodinger', 'Einstein', 'Planck', 'Jankunas']
for guest in guests:
print('Dear Dr. ' + guest + ', you are cordially invited to dinner at the Bartlett home.')
guests[1] = 'Feynmann'
print('\r')
for guest in guests:
print('Dear Dr. ' + guest + ', you are cordially invited to dinner at the Bartlett home.')
guests.insert(0,'Raman')
guests.insert(3,'Zare')
guests.append('Rakitzkis')
print('\r')
for guest in guests:
print('Dear Dr. ' + guest + ', you are cordially invited to dinner at the Bartlett home.')
print('\r')
print("We're very sorry, but we can now only accompany two guests.\n")
while len(guests) > 2:
booted = guests.pop(-1)
print('Dear Dr. ' + booted + ', please accept our apologies. We will no longer be able to accomodate you at our dinner party.')
len(guests)
for guest in guests:
print('Dear Dr. ' + guest + ', you are cordially invited to dinner at the Bartlett home.')
# 3-8
print('\n3-8')
vacay = ['hawaii', 'paris', 'barcelona', 'sweden', 'iceland', 'new york']
print(vacay)
print(sorted(vacay))
print(vacay)
print(sorted(vacay, reverse = True))
print(vacay)
vacay.reverse()
print(vacay)
vacay.reverse()
print(vacay)
vacay.sort()
print(vacay)
vacay.sort(reverse = True)
print(vacay)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.codegen.avro.tailor import PutativeAvroTargetsRequest
from pants.backend.codegen.avro.tailor import rules as tailor_rules
from pants.backend.codegen.avro.target_types import AvroSourcesGeneratorTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor_rules(),
QueryRule(PutativeTargets, (PutativeAvroTargetsRequest, AllOwnedSources)),
],
target_types=[],
)
def test_find_putative_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"avro/foo/f.avsc": "",
"avro/foo/bar/baz1.avdl": "",
"avro/foo/bar/baz2.avpr": "",
"avro/foo/bar/baz3.avsc": "",
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeAvroTargetsRequest(("avro/foo", "avro/foo/bar")),
AllOwnedSources(["avro/foo/bar/baz1.avdl"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
AvroSourcesGeneratorTarget,
path="avro/foo",
name=None,
triggering_sources=["f.avsc"],
),
PutativeTarget.for_target_type(
AvroSourcesGeneratorTarget,
path="avro/foo/bar",
name=None,
triggering_sources=["baz2.avpr", "baz3.avsc"],
),
]
)
== pts
)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"avro/foo/f.avsc": "",
"avro/foo/bar/bar.avsc": "",
"avro/foo/baz/baz.avsc": "",
"avro/foo/qux/qux.avsc": "",
}
)
pts = rule_runner.request(
PutativeTargets,
[PutativeAvroTargetsRequest(("avro/foo/bar", "avro/foo/qux")), AllOwnedSources([])],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
AvroSourcesGeneratorTarget,
path="avro/foo/bar",
name=None,
triggering_sources=["bar.avsc"],
),
PutativeTarget.for_target_type(
AvroSourcesGeneratorTarget,
path="avro/foo/qux",
name=None,
triggering_sources=["qux.avsc"],
),
]
)
== pts
)
|
import BayesianNetwork
import math
class FrontierModel:
def __init__(self, vertex, currParentSet, parentInQuestion, add):
self.vertex = vertex;
self.parentInQuestion = parentInQuestion;
self.currParentSet = currParentSet;
self.counts = {};
self.currCPTs = {};
self.add = add;
def addDataToCounts(self, data):
for dataPoint in data:
parentJoint = '';
for parent in self.vertex.parents:
if (self.add or not parent == self.parentInQuestion):
parentJoint += str(dataPoint[parent.name]);
if self.add and not self.parentInQuestion == None:
parentJoint += str(dataPoint[self.parentInQuestion.name]);
if not parentJoint in self.counts.keys():
self.counts[parentJoint] = {};
for val in self.vertex.vals:
self.counts[parentJoint][str(val)] = 0;
self.counts[parentJoint][str(dataPoint[self.vertex.name])] += 1;
#for joint in self.counts.keys():
#for val in self.vertex.vals:
#print("For vertex " + str(self.vertex.name) + "the joint " + str(joint) + " and val " + str(val) + "count is " + str(self.counts[joint][(str)val]));
def normalizeCounts(self):
for parentJoint in self.counts:
totalCount = 0;
for val in self.vertex.vals:
totalCount += self.counts[parentJoint][str(val)];
self.currCPTs[parentJoint] = {};
for val in self.vertex.vals:
self.currCPTs[parentJoint][str(val)] = float(self.counts[parentJoint][str(val)]) / float(totalCount);
class IncrementalNetwork:
def __init__(self, topologicalOrdering):
self.topologicalOrdering = topologicalOrdering;
self.frontierModels = {};
self.jointDataPoints = {};
self.currentModels = {};
for vertex in topologicalOrdering:
self.frontierModels[vertex.name] = [];
self.initializeNewFrontierModels(vertex);
self.jointDataPoints[vertex.name] = [];
self.currentModels[vertex.name] = FrontierModel(vertex, vertex.parents, None, True);
def modelComplexity(self, model, vert):
numFreeParameters = 1;
for vert in model.currParentSet:
if (model.add or not vert.name == model.parentInQuestion.name):
numFreeParameters *= len(vert.vals);
numFreeParameters *= (len(model.vertex.vals) - 1);
if (model.add and model.parentInQuestion != None):
numFreeParameters *= len(model.parentInQuestion.vals);
return 0.5 * math.log(len(self.jointDataPoints[model.vertex.name])) * numFreeParameters;
def getDiffMutualInfo(self, originalModel, frontierModel, vert):
originalMutualInfo = 0;
frontierMutualInfo = 0;
for dataPoint in self.jointDataPoints[vert.name]:
jointOriginal = '';
for parent in vert.parents:
jointOriginal += str(dataPoint[parent.name]);
originalMutualInfo += math.log(originalModel.currCPTs[jointOriginal][str(dataPoint[vert.name])]);
jointFrontier = '';
for parent in vert.parents:
if(frontierModel.add or parent != frontierModel.parentInQuestion):
jointFrontier += str(dataPoint[parent.name]);
if (frontierModel.add):
jointFrontier += str(dataPoint[frontierModel.parentInQuestion.name]);
frontierMutualInfo += math.log(frontierModel.currCPTs[jointFrontier][str(dataPoint[vert.name])]);
return (frontierMutualInfo - self.modelComplexity(frontierModel, vert)) - (originalMutualInfo- self.modelComplexity(originalModel, vert));
def addDataToModels(self, data):
for vertex in self.topologicalOrdering:
self.currentModels[vertex.name].addDataToCounts(data);
self.currentModels[vertex.name].normalizeCounts();
for frontierModel in self.frontierModels[vertex.name]:
frontierModel.addDataToCounts(data);
frontierModel.normalizeCounts();
for dataPoint in data:
self.jointDataPoints[vertex.name].append(dataPoint);
def selectIdealFrontierModel(self):
maxImprovement = 0;
idealFrontierModel = None;
for vert in self.topologicalOrdering:
for model in self.frontierModels[vert.name]:
score = self.getDiffMutualInfo(self.currentModels[vert.name], model, vert);
if score > maxImprovement:
maxImprovement = score;
idealFrontierModel = model;
return idealFrontierModel;
def updateFrontier(self, idealFrontierModel):
vertForIdealFrontModel = idealFrontierModel.vertex;
self.jointDataPoints[vertForIdealFrontModel.name] = [];
parentInQuestion = idealFrontierModel.parentInQuestion;
if (idealFrontierModel.add):
#need to update frontier models to allow an add none option
vertForIdealFrontModel.parents.append(parentInQuestion);
else:
vertForIdealFrontModel.parents.remove(parentInQuestion);
self.currentModels[vertForIdealFrontModel.name] = FrontierModel(vertForIdealFrontModel, vertForIdealFrontModel.parents, None, True);
self.frontierModels[vertForIdealFrontModel.name] = [];
self.initializeNewFrontierModels(vertForIdealFrontModel);
def updateModel(self, data):
self.addDataToModels(data);
idealFrontierModel = self.selectIdealFrontierModel();
#keep current model
if idealFrontierModel == None:
return;
self.updateFrontier(idealFrontierModel);
def initializeNewFrontierModels(self, vertex):
for vert in vertex.possibleParents:
if vert in vertex.parents:
self.frontierModels[vertex.name].append(FrontierModel(vertex, vertex.parents, vert, False));
else:
self.frontierModels[vertex.name].append(FrontierModel(vertex, vertex.parents, vert, True));
|
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
import warnings
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_metrics, dict_median, smooth
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_evaluation
def roc(Y, stat, test=None, bootnum=100, legend=True, grid_line=False, label_font_size="10pt", xlabel="1 - Specificity", ylabel="Sensitivity", width=320, height=315, method='BCA', plot='data', legend_basic=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
# Drop intermediates when fpr = 0
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# tpr0_stat = tpr_stat[fpr_stat == 0][-1]
# tpr_stat = np.concatenate([[tpr0_stat], tpr_stat[fpr_stat > 0]])
# fpr_stat = np.concatenate([[0], fpr_stat[fpr_stat > 0]])
# # Vertical averaging
# idx = [np.abs(i - fpr_stat).argmin() for i in fpr_linspace]
# tpr_list = np.array(tpr_stat[idx])
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
binary_stats_train = np.array(binary_stats_train)
binary_stats_train_boot = []
tpr_bootstat = []
if bootnum > 1:
for i in range(bootnum):
bootidx = resample(list(range(len(Y))), stratify=Y) # Default stratified
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = stat[bootidx]
Ytrue_boot = Y[bootidx]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot < 0.5:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
bstat_loop = binary_evaluation(Ytrue_boot, Yscore_boot)
bstat_list = []
for key, value in bstat_loop.items():
bstat_list.append(value)
binary_stats_train_boot.append(bstat_list)
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
binary_stats_train_boot = np.array(binary_stats_train_boot)
if bootnum > 1:
if method == 'BCA':
binary_stats_jack_boot = []
jackidx = []
base = np.arange(0, len(Y))
for i in base:
jack_delete = np.delete(base, i)
jackidx.append(jack_delete)
tpr_jackstat = []
for i in jackidx:
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = stat[i]
Ytrue_jack = Y[i]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
if auc_boot < 0.5:
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
jstat_loop = binary_evaluation(Ytrue_jack, Yscore_jack)
jstat_list = []
for key, value in jstat_loop.items():
jstat_list.append(value)
binary_stats_jack_boot.append(jstat_list)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_boot[fpr_boot == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
binary_stats_jack_boot = np.array(binary_stats_jack_boot)
if bootnum > 1:
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = bca_method(binary_stats_train_boot, binary_stats_train, binary_stats_jack_boot)
elif method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = per_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
elif method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = cper_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
else:
raise ValueError("bootmethod has to be 'BCA', 'Perc', or 'CPer'.")
#stat_ib = np.array(stat_ib).T
#print(stat_ib)
# ROC up
# for i in range(len(tpr_ib.T)):
# for j in range(1, len(tpr_ib)):
# if tpr_ib[j, i] < tpr_ib[j - 1, i]:
# tpr_ib[j, i] = tpr_ib[j - 1, i]
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
for i in range(len(stat_ib)):
stat_ib[i][2] = binary_stats_train[i]
else:
tpr_ib = []
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib = np.array(tpr_ib)
tpr_ib = tpr_ib.T
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
stat_ib = []
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
binary_stats_test_dict = binary_evaluation(test_y, test_ypred)
binary_stats_test = []
for key, value in binary_stats_test_dict.items():
binary_stats_test.append(value)
stat_ib.append(binary_stats_test)
# Drop intermediates when fpr = 0
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate([tpr_test, [1]])
# Drop intermediates when fpr = 0
# tpr0_test = tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
# tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# if 'data' plot original data instead of median
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace, [1]]) # Add starting 0
tpr_ib[:, 2] = tpr_list_linspace
elif plot == 'median':
pass
else:
raise ValueError("plot must be 'data' or 'median'")
# Check upper limit / lower limit
for i in tpr_ib:
if i[0] > i[2]:
i[0] = i[2]
if i[1] < i[2]:
i[1] = i[2]
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1) # Equal Distribution Line
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if bootnum > 1:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
else:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f})".format(auc_ib[2])
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# Line Test
if test is not None:
if legend_basic == True:
legend_oob = "Test"
else:
legend_oob = "Test (AUC = {:.2f})".format(auc_test)
# Plot IB
data_test = {"x": fpr_linspace,
"y": tpr_test,
"spec": spec}
source_test = ColumnDataSource(data=data_test)
figline_test = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_oob,
source=source_test)
fig.add_tools(HoverTool(renderers=[figline_test],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
fig.legend.visible = False
if legend == True:
if legend_basic == True:
fig.legend.visible = True
fig.legend.location = "bottom_right"
else:
if test is None:
oob_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='css', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.06,color='green',size=8)
else:
ib_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "Test (AUC = {:.2f})".format(auc_test)
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
if legend_basic == True:
return fig, stat_ib
else:
return fig
def roc_boot(Y,
stat,
bootstat,
bootstat_oob,
bootidx,
bootidx_oob,
method,
smoothval=0,
jackstat=None,
jackidx=None,
xlabel="1 - Specificity",
ylabel="Sensitivity",
width=320,
height=315,
label_font_size="10pt",
legend=True,
grid_line=False,
plot_num=0,
plot='data',
test=None,
legend_basic=False,
train=None,
ci_only=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# Calculate for BOOTSTAT (IB)
pos_loop = []
tpr_bootstat = []
for i in range(len(bootidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = bootstat[i]
Ytrue_boot = Y[bootidx[i]]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot > 0.5:
pos_loop.append(pos)
else:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
pos_loop.append(abs(1 - pos))
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
# tpr_boot = interp(fpr_linspace, fpr_boot, tpr_boot)
# tpr_bootstat.append(tpr_boot)
if method == 'BCA':
tpr_jackstat = []
for i in range(len(jackidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = jackstat[i]
Ytrue_jack = Y[jackidx[i]]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
# if auc_jack < 0.5:
# fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_jack[fpr_jack == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
#save_stat = [tpr_bootstat, tpr_list, tpr_jackstat, fpr_linspace]
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
if method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
if method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.array(tpr_ib)
# ROC up
if method != 'Per':
for i in range(len(tpr_ib.T)):
for j in range(1, len(tpr_ib)):
if tpr_ib[j, i] < tpr_ib[j - 1, i]:
tpr_ib[j, i] = tpr_ib[j - 1, i]
# # Check upper limit / lower limit
if method != 'Per':
for i in range(len(tpr_ib)):
if tpr_ib[i][0] > tpr_list[i]:
tpr_ib[i][0] = tpr_list[i]
if tpr_ib[i][1] < tpr_list[i]:
tpr_ib[i][1] = tpr_list[i]
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
#print('testing.')
# Calculate for OOB
auc_bootstat_oob = []
tpr_bootstat_oob = []
for i in range(len(bootidx_oob)):
# Get Yscore and Y for each bootstrap oob and calculate
Yscore_boot_oob = bootstat_oob[i]
Ytrue_boot_oob = Y[bootidx_oob[i]]
fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=pos, drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
# if auc_boot_oob < 0.5:
# fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=abs(1-pos_loop[i]), drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
auc_bootstat_oob.append(auc_boot_oob)
# Drop intermediates when fpr = 0
tpr0_boot_oob = tpr_boot_oob[fpr_boot_oob == 0][-1]
tpr_boot_oob = np.concatenate([[tpr0_boot_oob], tpr_boot_oob[fpr_boot_oob > 0]])
fpr_boot_oob = np.concatenate([[0], fpr_boot_oob[fpr_boot_oob > 0]])
# Vertical averaging
idx_oob = [np.abs(i - fpr_boot_oob).argmin() for i in fpr_linspace]
tpr_bootstat_oob.append(np.array(tpr_boot_oob[idx_oob]))
#tpr_boot_oob = interp(fpr_linspace, fpr_boot_oob, tpr_boot_oob)
#tpr_bootstat_oob.append(tpr_boot_oob)
# Get CI for tpr
tpr_oob_lowci = np.percentile(tpr_bootstat_oob, 2.5, axis=0)
tpr_oob_medci = np.percentile(tpr_bootstat_oob, 50, axis=0)
tpr_oob_uppci = np.percentile(tpr_bootstat_oob, 97.5, axis=0)
tpr_oob = np.array([tpr_oob_lowci, tpr_oob_uppci, tpr_oob_medci]).T
#tpr_oob = per_method(tpr_bootstat_oob, tpr_list)
auc_oob = per_method(auc_bootstat_oob, auc_stat)
tpr_oob = np.concatenate((np.zeros((1, 3)), tpr_oob), axis=0) # Add starting 0
tpr_oob = np.concatenate((tpr_oob, np.ones((1, 3))), axis=0) # Add end 1
# ROC up
if method != 'Per':
for i in range(len(tpr_oob.T)):
for j in range(1, len(tpr_oob)):
if tpr_oob[j, i] < tpr_oob[j - 1, i]:
tpr_oob[j, i] = tpr_oob[j - 1, i]
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
# Drop intermediates when fpr = 0
# tpr0_test= tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate((tpr_test,[1]))
tpr_oob[:, 2] = tpr_test
# if 'data' plot original data instead of median
if train is not None:
fpr_stat, tpr_stat, _ = metrics.roc_curve(train[0], train[1], pos_label=pos, drop_intermediate=False)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace,[1]]) # Add starting 0
tpr_ib[:,2] = tpr_list_linspace
elif plot == 'median':
pass
else:
pass
# else:
# raise ValueError("plot must be 'data' or 'median'")
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
auc_oob_low = metrics.auc(fpr_linspace, tpr_oob[:, 0])
auc_oob_upp = metrics.auc(fpr_linspace, tpr_oob[:, 1])
auc_oob_mid = metrics.auc(fpr_linspace, tpr_oob[:, 2])
auc_oob = np.array([auc_oob_low, auc_oob_upp, auc_oob_mid])
# print(auc_ib)
# print(auc_oob)
# print("AUC IB {} ({},{})".format(auc_ib[2], auc_ib[0], auc_ib[1]))
# print("AUC OOB {} ({},{})".format(auc_oob[2], auc_oob[0], auc_oob[1]))
# Smooth if set
if smoothval > 1:
tpr_ib[:, 0] = smooth(tpr_ib[:, 0], smoothval)
tpr_ib[:, 1] = smooth(tpr_ib[:, 1], smoothval)
tpr_ib[:, 2] = smooth(tpr_ib[:, 2], smoothval)
tpr_oob[:, 0] = smooth(tpr_oob[:, 0], smoothval)
tpr_oob[:, 1] = smooth(tpr_oob[:, 1], smoothval)
tpr_oob[:, 2] = smooth(tpr_oob[:, 2], smoothval)
tpr_test = smooth(tpr_test, smoothval)
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if plot_num in [0, 1, 2, 4]:
if legend_basic == True:
legend_text = "Train"
else:
legend_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
if ci_only == False:
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
figlegend_ib = fig.rect([10],[20],[5],[5], color="green", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="IB (95% CI)")
# Plot OOB
data_oob = {"x": fpr_linspace,
"y": tpr_oob[:, 2],
"lowci": tpr_oob[:, 0],
"uppci": tpr_oob[:, 1],
"spec": spec,
"ci": ci_oob}
source_oob = ColumnDataSource(data=data_oob)
# Line OOB
if plot_num in [0, 1, 3, 4]:
if legend_basic == True:
legend_text = "Test"
else:
legend_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0]) / 2)
if ci_only == False:
figline = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_oob)
fig.add_tools(HoverTool(renderers=[figline],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band OOB
figband_oob = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="orange",
source=source_oob)
fig.add_layout(figband_oob)
figlegend_ib = fig.rect([10],[20],[5],[5], color="orange", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="OOB (95% CI)")
# Line Test
# if test is not None:
# if legend_basic == True:
# legend_text = "Test"
# else:
# legend_text = "Test (AUC = {:.2f})".format(auc_test)
# # Plot IB
# data_test = {"x": fpr_linspace,
# "y": tpr_test,
# "spec": spec}
# source_test = ColumnDataSource(data=data_test)
# figline_test = fig.line("x",
# "y",
# color="purple",
# line_width=2.5,
# alpha=0.8,
# legend=legend_text,
# line_dash="dashed",
# source=source_test)
# fig.add_tools(HoverTool(renderers=[figline_test],
# tooltips=[("Specificity", "@spec{1.111}"),
# ("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
ib_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0])/2)
fig.legend.visible = False
if legend_basic == True:
fig.legend.location = "bottom_right"
fig.legend.visible = True
else:
if test is not None:
if legend == True:
ib_text_add = Label(x=0.38, y=0.18,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.10,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
test_text = "Test (AUC = {:.2f})".format(auc_test)
test_text_add = Label(x=0.38, y=0.02,
text=test_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(test_text_add)
fig.quad(top=0.28, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.22,color='green',size=8)
fig.circle(0.34,0.14,color='orange',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
else:
if legend == True:
if plot_num in [0,1,4]:
if width == 320:
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='orange',size=8)
elif width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.22, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.22, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.18,color='green',size=8)
fig.circle(0.17,0.08,color='orange',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.28,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.19,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.4, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.30,color='green',size=8)
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.20, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.25,0.05,color='green',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f}+/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.22, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f}+/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
if train is None:
return fig, auc_ib, auc_oob
else:
return fig, auc_ib, auc_oob
def roc_cv(Y_predfull, Y_predcv, Ytrue, width=450, height=350, xlabel="1-Specificity", ylabel="Sensitivity", legend=True, label_font_size="13pt", show_title=True, title_font_size="13pt", title="", plot_num=0, grid_line=False):
auc_check = roc_auc_score(Ytrue, Y_predfull)
if auc_check > 0.5:
pos = 1
else:
pos = 0
fprf, tprf, thresholdf = metrics.roc_curve(Ytrue, Y_predfull, pos_label=pos, drop_intermediate=False)
specf = 1 - fprf
auc_full = metrics.auc(fprf, tprf)
auc_full_hover = [auc_full] * len(tprf)
# Figure
data = {"x": fprf, "y": tprf, "spec": specf, "aucfull": auc_full_hover}
source = ColumnDataSource(data=data)
fig = figure(title=title, plot_width=width, plot_height=height, x_axis_label=xlabel, y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))
# Figure: add line
# fig.line([0, 1], [0, 1], color="black", line_dash="dashed", line_width=2.5, legend="Equal Distribution Line")
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
if plot_num in [0, 1, 2, 4]:
figline = fig.line("x", "y", color="green", line_width=2.5, alpha=0.8, legend="FULL (AUC = {:.2f})".format(auc_full), source=source)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111}")]))
else:
pass
# ADD CV
# bootstrap using vertical averaging
# fpr, tpr with drop_intermediates for fpr = 0 (useful for plot... since we plot specificity on x-axis, we don't need intermediates when fpr=0)
fpr = fprf
tpr = tprf
tpr0 = tpr[fpr == 0][-1]
tpr = np.concatenate([[tpr0], tpr[fpr > 0]])
fpr = np.concatenate([[0], fpr[fpr > 0]])
tpr_boot = []
boot_stats = []
auc_cv = []
for i in range(len(Y_predcv)):
# Resample and get tpr, fpr
Yscore_res = Y_predcv[i]
fpr_res, tpr_res, threshold_res = metrics.roc_curve(Ytrue, Yscore_res, pos_label=pos, drop_intermediate=False)
auc_cv.append(metrics.auc(fpr_res, tpr_res))
# Drop intermediates when fpr=0
tpr0_res = tpr_res[fpr_res == 0][-1]
tpr_res = np.concatenate([[tpr0_res], tpr_res[fpr_res > 0]])
fpr_res = np.concatenate([[0], fpr_res[fpr_res > 0]])
# Vertical averaging... use closest fpr_res to fpr, and append the corresponding tpr
idx = [np.abs(i - fpr_res).argmin() for i in fpr]
tpr_list = tpr_res[idx]
tpr_boot.append(tpr_list)
# Get CI for tpr
tpr_lowci = np.percentile(tpr_boot, 2.5, axis=0)
tpr_uppci = np.percentile(tpr_boot, 97.5, axis=0)
tpr_medci = np.percentile(tpr_boot, 50, axis=0)
# Add the starting 0
tpr = np.insert(tpr, 0, 0)
fpr = np.insert(fpr, 0, 0)
tpr_lowci = np.insert(tpr_lowci, 0, 0)
tpr_uppci = np.insert(tpr_uppci, 0, 0)
tpr_medci = np.insert(tpr_medci, 0, 0)
# Get CI for cv
auc_lowci = np.percentile(auc_cv, 2.5, axis=0)
auc_uppci = np.percentile(auc_cv, 97.5, axis=0)
auc_medci = np.percentile(auc_cv, 50, axis=0)
auc_ci = (auc_uppci - auc_lowci) / 2
auc_ci_hover = [auc_ci] * len(tpr_medci)
auc_med_hover = [auc_medci] * len(tpr_medci)
# Concatenate tpr_ci
tpr_ci = np.array([tpr_lowci, tpr_uppci, tpr_medci])
# specificity and ci-interval for HoverTool
spec2 = 1 - fpr
ci2 = (tpr_uppci - tpr_lowci) / 2
data2 = {"x": fpr, "y": tpr_medci, "lowci": tpr_lowci, "uppci": tpr_uppci, "spec": spec2, "ci": ci2}
source2 = ColumnDataSource(data=data2)
if plot_num in [0, 1, 3, 4]:
figline = fig.line("x", "y", color="orange", line_width=2.5, alpha=0.8, legend="CV (AUC = {:.2f} +/- {:.2f})".format(auc_medci, auc_ci,), source=source2)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111} (+/- @ci{1.111})")]))
# Figure: add 95CI band
figband = Band(base="x", lower="lowci", upper="uppci", level="underlay", fill_alpha=0.1, line_width=0.5, line_color="black", fill_color="orange", source=source2)
fig.add_layout(figband)
else:
pass
# Change font size
if show_title is True:
fig.title.text = "AUC FULL ({}) & AUC CV ({} +/- {})".format(np.round(auc_full, 2), np.round(auc_medci, 2), np.round(auc_ci, 2))
fig.title.text_font_size = title_font_size
fig.xaxis.axis_label_text_font_size = label_font_size
fig.yaxis.axis_label_text_font_size = label_font_size
# Extra padding
fig.min_border_left = 20
fig.min_border_right = 20
fig.min_border_top = 20
fig.min_border_bottom = 20
# Edit legend
fig.legend.location = "bottom_right"
# fig.legend.label_text_font_size = "1pt"
# fig.legend.label_text_font = "1pt"
# if legend is False:
# fig.legend.visible = False
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
auc_full = np.round(auc_full, 2)
auc_cv1 = np.round(auc_medci, 2)
auc_cv2 = np.round(auc_ci, 2)
ib_text = "FULL (AUC = {:.2f})".format(auc_full)
oob_text = "CV (AUC = {:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
fig.legend.visible = False
if legend == True:
if plot_num in [0,1,4]:
if width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 237:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.40, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.35,0.05, color='green',size=8)
elif width == 237:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.27, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.11, bottom=0, left=0.17, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.22,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
return fig
def per_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the percentile bootstrap interval."""
if stat.ndim == 1:
boot_ci = []
# Calculate bootci for each component (peak), and append it to bootci
for i in range(len(bootstat[0])):
bootstat_i = [item[i] for item in bootstat]
lower_ci = np.percentile(bootstat_i, 2.5)
upper_ci = np.percentile(bootstat_i, 97.5)
mid_ci = np.percentile(bootstat_i, 50)
boot_ci.append([lower_ci, upper_ci, mid_ci])
boot_ci = np.array(boot_ci)
elif stat.ndim == 0:
lower_ci = np.percentile(bootstat, 2.5)
upper_ci = np.percentile(bootstat, 97.5)
mid_ci = np.percentile(bootstat, 50)
boot_ci = [lower_ci, upper_ci, mid_ci]
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = per_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def cper_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the bias-corrected bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop)
# new alpha
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct3 = 100 * norm.cdf((2 * z0))
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
append_low = np.percentile(bootstat_i, pct1[i])
append_mid = np.percentile(bootstat_i, pct3[i])
append_upp = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = cper_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def bca_method(bootstat, stat, jackstat):
"""Calculates bootstrap confidence intervals using the bias-corrected and accelerated bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop, loc=0, scale=1)
# new alpha
jmean = np.mean(jackstat, axis=0)
num = np.sum((jmean - jackstat) ** 3, axis=0)
den = np.sum((jmean - jackstat) ** 2, axis=0)
ahat = num / (6 * den ** (3 / 2))
# Ignore warnings as they are delt with at line 123 with try/except
with warnings.catch_warnings():
warnings.simplefilter("ignore")
zL = z0 + norm.ppf(0.05 / 2, loc=0, scale=1)
pct1 = 100 * norm.cdf((z0 + zL / (1 - ahat * zL)))
zU = z0 + norm.ppf((1 - 0.05 / 2), loc=0, scale=1)
pct2 = 100 * norm.cdf((z0 + zU / (1 - ahat * zU)))
zM = z0 + norm.ppf((0.5), loc=0, scale=1)
pct3 = 100 * norm.cdf((z0 + zM / (1 - ahat * zM)))
# pct3 = (pct1 + pct2) / 2
# for i in range(len(pct3)):
# if np.isnan(pct3[i]) == True:
# pct3[i] = (pct2[i] + pct1[i]) / 2
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
try:
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct3[i])
except ValueError:
# Use BC (CPerc) as there is no skewness
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct2 = 100 * norm.cdf((2 * z0))
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
var = []
var_jstat = []
for j in range(len(bootstat)):
var.append(bootstat[j][:, k])
for m in range(len(jackstat)):
var_jstat.append(jackstat[m][:, k])
var_boot = bca_method(var, stat[:, k], var_jstat)
boot_ci.append(var_boot)
boot_ci = np.array(boot_ci)
return boot_ci
def get_sens_spec(Ytrue, Yscore, cuttoff_val):
"""Get sensitivity and specificity from cutoff value."""
Yscore_round = np.where(np.array(Yscore) > cuttoff_val, 1, 0)
tn, fp, fn, tp = metrics.confusion_matrix(Ytrue, Yscore_round).ravel()
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
return sensitivity, specificity
def get_sens_cuttoff(Ytrue, Yscore, specificity_val):
"""Get sensitivity and cuttoff value from specificity."""
fpr0 = 1 - specificity_val
fpr, sensitivity, thresholds = metrics.roc_curve(Ytrue, Yscore, pos_label=1, drop_intermediate=False)
idx = np.abs(fpr - fpr0).argmin() # this find the closest value in fpr to fpr0
# Check that this is not a perfect roc curve
# If it is perfect, allow sensitivity = 1, rather than 0
if specificity_val == 1 and sensitivity[idx] == 0:
for i in range(len(fpr)):
if fpr[i] == 1 and sensitivity[i] == 1:
return 1, 0.5
return sensitivity[idx], thresholds[idx]
def get_spec_sens_cuttoff(Ytrue, Yscore, metric, val):
"""Return specificity, sensitivity, cutoff value provided the metric and value used."""
if metric == "specificity":
specificity = val
sensitivity, threshold = get_sens_cuttoff(Ytrue, Yscore, val)
elif metric == "cutoffscore":
threshold = val
sensitivity, specificity = get_sens_spec(Ytrue, Yscore, val)
return specificity, sensitivity, threshold
def get_stats(Ytrue, Yscore, specificity, parametric):
"""Calculates binary metrics given the specificity."""
sensitivity, cutoffscore = get_sens_cuttoff(Ytrue, Yscore, specificity)
stats = binary_metrics(Ytrue, Yscore, cut_off=cutoffscore, parametric=parametric)
return stats
|
import pickle
def Append(l):
fwb=open("student1.dat","ab")
pickle.dump(l,fwb)
fwb.close()
print("data written succsessfully to binary file")
def read():
frb=open("student1.dat","rb")
l=pickle.load(frb)
print(l)
l=pickle.load(frb)
print(l)
l=pickle.load(frb)
print(l)
l=pickle.load(frb)
print(l)
frb.close()
def readall(file):
frb=open(file,"rb")
while True:
try:
l=pickle.load(frb)
print(l)
except EOFError:
break
frb.close()
def search_rno(r):
frb=open("student1.dat","rb")
found=0
while True:
try:
l=pickle.load(frb)
if r==l[0]:
print(l)
found=1
break #because rno is unique
except EOFError:
break
if found==0:
print("Roll no",r,"does not exist.")
frb.close()
def search_name(n):
#[1,'Aakarsh',99]
frb=open("student1.dat","rb")
found=0
while True:
try:
l=pickle.load(frb)
if n==l[1]: #made changes
print(l)
found=1
except EOFError:
break
if found==0:
print("Name",n,"does not exist.")
frb.close()
def update(r):
import os
frb=open("student1.dat","rb")
fwb=open("temp.dat","wb")
found=0
while True:
try:
l=pickle.load(frb)
if r==l[0]:
l[1]=input("Enter the new name")
l[2]=int(input("Enter the new marks"))
pickle.dump(l,fwb)
found=1
else:
pickle.dump(l,fwb)
except EOFError:
break
frb.close()
fwb.close()
os.remove("student1.dat")
os.rename("temp.dat","student1.dat")
if found==0:
print("Roll no",r,"does not exist.")
else:
print("Record updated....")
readall()
def delete(r):
import os
frb=open("student1.dat","rb")
fwb=open("temp.dat","wb")
found=0
while True:
try:
l=pickle.load(frb)
if r!=l[0]: #record not be deleted
pickle.dump(l,fwb)
else:
found=1
except EOFError:
break
frb.close()
fwb.close()
os.remove("student1.dat")
os.rename("temp.dat","student1.dat")
if found==0:
print("Roll no",r,"does not exist.")
else:
print("Record deleted....")
readall()
#to copy records matching the criteria from student1.dat to hundred.dat : copy paste
def copy():
frb=open("student1.dat","rb")
fwb=open("hundred.dat","wb")
count=0
while True:
try:
l=pickle.load(frb)
if l[2]==100: #record not be deleted
pickle.dump(l,fwb)
count+=1
except EOFError:
break
frb.close()
fwb.close()
print(count,"records copied to hundred.dat")
#to move records matching the criteria from student1.dat to hundred1.dat : cut paste
def transfer():
import os
frb=open("student1.dat","rb") #master file
fwb1=open("hundred1.dat","wb")#paste
fwb2=open("temp.dat","wb") #cut ..will be renamed to student1.dat later
count=0
while True:
try:
l=pickle.load(frb)
if l[2]==100: #record not be deleted
pickle.dump(l,fwb1)
count+=1
else:
pickle.dump(l,fwb2)
except EOFError:
break
frb.close()
fwb1.close()
fwb2.close()
os.remove("student1.dat")
os.rename("temp.dat","student1.dat")
print(count,"records moved to hundred1.dat")
def searchmenu():
while True:
print("--------------SEARCH MENU----------------")
print("1. Search by roll no")
print("2. Search by name")
print("3. Return to Main Menu")
ch=int(input("Enter your choice:"))
if ch==1:
rno=int(input("Enter the roll no whose record is to be searched:"))
search_rno(rno)
elif ch==2:
nm=input("Enter the name whose record is to be searched:")
search_name(nm)
elif ch==3:
break
def mainmenu():
while True:
print("--------------BINARY FILE OPERATIONS----------------")
print("1. Append records")
print("2. Display all records")
print("3. Search Records")
print("4. Update Records")
print("5. Delete Records")
print("6. Copy Records")
print("7. Transfer Records")
print("8. EXIT")
ch=int(input("Enter your choice:"))
if ch==1:
print("Enter the student record")
rno=int(input("Enter the roll no:"))
name=input("Enter the name:")
marks=int(input("Enter the marks:"))
rec=[rno,name,marks]
Append(rec)
elif ch==2:
readall('student1.dat')
elif ch==3:
searchmenu()
elif ch==4:
rn=int(input("Enter the roll no whose record is to be updated:"))
update(rn)
elif ch==5:
rn=int(input("Enter the roll no whose record is to be deleted:"))
delete(rn)
elif ch==6:
copy()
elif ch==7:
transfer()
elif ch==8:
break
mainmenu()
print("bye bye......")
#search_name('Aakriti')
#search_rno(1)
#update(4)
#copy()
|
import json
from test_utils import MockRequestResponse, TestPlatformClient
class TestCreateConverstaion(TestPlatformClient):
def test_create_conversation(self, layerclient, monkeypatch):
def verify_request_args(method, url, headers, data, params):
assert method == 'POST'
assert url == (
'https://api.layer.com/apps/TEST_APP_UUID/conversations'
)
assert headers == {
'Accept': 'application/vnd.layer+json; version=1.0',
'Authorization': 'Bearer TEST_BEARER_TOKEN',
'Content-Type': 'application/json',
}
json_data = json.loads(data)
assert json_data == {
'participants': 'TEST_CONVERSATION_UUID',
'metadata': None,
'distinct': True,
}
return MockRequestResponse(
True,
{
'id': 'layer:///conversation/TEST_CONVERSATION_UUID',
'url': 'layer:///conversation/TEST_CONVERSATION_UUID',
},
)
monkeypatch.setattr('requests.request', verify_request_args)
layerclient.create_conversation('TEST_CONVERSATION_UUID')
def test_create_conversation_with_options(self, layerclient, monkeypatch):
def verify_request_args(method, url, headers, data, params):
assert method == 'POST'
assert url == (
'https://api.layer.com/apps/TEST_APP_UUID/conversations'
)
assert headers == {
'Accept': 'application/vnd.layer+json; version=1.0',
'Authorization': 'Bearer TEST_BEARER_TOKEN',
'Content-Type': 'application/json',
}
json_data = json.loads(data)
assert json_data == {
'participants': 'TEST_CONVERSATION_UUID',
'metadata': {
'Topic': 'A coffee conversation',
'Background': '#C0FFEE',
},
'distinct': False,
}
return MockRequestResponse(
True,
{
'id': 'layer:///conversation/TEST_CONVERSATION_UUID',
'url': 'layer:///conversation/TEST_CONVERSATION_UUID',
},
)
monkeypatch.setattr('requests.request', verify_request_args)
layerclient.create_conversation(
'TEST_CONVERSATION_UUID',
False,
{
'Topic': 'A coffee conversation',
'Background': '#C0FFEE',
},
)
|
#!/usr/bin/env python
#Copyright (c) 2016, Eduard Broecker
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
#WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
#DAMAGE.
import sys
sys.path.append('..')
import canmatrix.importany as im
def createStoreMacro(signal, prefix="", frame="frame"):
startBit = signal.getStartbit(bitNumbering = 1, startLittle = 1)
byteOrder = signal._is_little_endian
length = signal._signalsize
startByte = int(startBit/8)
startBitInByte = startBit % 8
currentTargetLength = (8-startBitInByte)
mask = ((0xffffffffffffffff)>> (64-length))
code = "#define storeSignal%s%s(value) do{" % (prefix,signal._name)
if signal._is_signed:
code += "value|=((value&0x8000000000000000)>>(64-length));"
code += "value&=0x%X;" % (mask)
code += "%s[%d]|=value<<%d;" % (frame, startByte, startBitInByte)
if byteOrder:
endByte = int((startBit+length) / 8)
for count in range(startByte+1, endByte):
code += "%s[%d]|=value<<%d;" % (frame, count, currentTargetLength)
currentTargetLength += 8;
else: # motorola / big-endian
endByte = int((startByte * 8 + 8 - startBitInByte - length) / 8);
for count in range(startByte-1, endByte-1, -1):
code += "%s[%d]|=value<<%d;" % (frame, count, currentTargetLength)
currentTargetLength += 8;
code += "}while(0);\n"
return code
def createDecodeMacro(signal, prefix="", macrosource="source", source="source"):
startBit = signal.getStartbit(bitNumbering = 1, startLittle = 1)
byteOrder = signal._is_little_endian
length = signal._signalsize
mask = ((0xffffffffffffffff)>> (64-length))
startByte = int(startBit/8)
startBitInByte = startBit % 8
code = "#define getSignal%s%s(%s) ((((%s[%d])>>%d" % (prefix, signal._name, macrosource, source, startByte, startBitInByte)
currentTargetLength = (8-startBitInByte)
if byteOrder:
endByte = int((startBit+length) / 8)
if (startBit+length) % 8 == 0:
endByte -= 1
for count in range(startByte +1, endByte+1):
code += "|(%s[%d])<<%d" % (source, count, currentTargetLength)
currentTargetLength += 8
else: # motorola / big-endian
endByte = int((startByte * 8 + 8 - startBitInByte - length) / 8);
for count in range(startByte-1, endByte-1, -1):
code += "|%s[%d]<<%d" % (source, count, currentTargetLength)
currentTargetLength += 8;
code += ")&0x%X)" % (mask)
if signal._is_signed:
msb_sign_mask = 1 << (length - 1);
code += "^0x%x)-0x%x " % (msb_sign_mask,msb_sign_mask);
else:
code += ")"
code += "\n"
return code
def createDecodeMacrosForFrame(Frame, prefix="", macrosource="source", source="source"):
code = ""
for signal in Frame._signals:
code += createDecodeMacro(signal, prefix, macrosource, source)
return code
def createStoreMacrosForFrame(Frame, prefix= "", framename = "frame"):
code = ""
for signal in Frame._signals:
code += createStoreMacro(signal, prefix, frame = framename)
return code
def main():
from optparse import OptionParser
usage = """
%prog [options] canDatabaseFile targetFile.c
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.xls(x)|*.sym
"""
parser = OptionParser(usage=usage)
parser.add_option("", "--frame",
dest="exportframe", default=None,
help="create macros for Frame(s); Comma seperated list of Names ")
parser.add_option("", "--ecu",
dest="exportecu", default=None,
help="create macros for Ecu(s) Comma seperated ")
(cmdlineOptions, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit(1)
infile = args[0]
outfile = args[1]
dbs = im.importany(infile)
db = next(iter(dbs.values()))
sourceCode = ""
if cmdlineOptions.exportframe == None and cmdlineOptions.exportecu == None:
for frame in db._fl._list:
sourceCode += createDecodeMacrosForFrame(frame, "_" + frame._name + "_")
sourceCode += createStoreMacrosForFrame(frame, "_" + frame._name + "_")
if cmdlineOptions.exportframe != None:
for frameId in cmdlineOptions.exportframe.split(','):
try:
frame = db.frameById(int(frameId))
except ValueError:
frame = db.frameByName(frameId)
if frame != None:
sourceCode += createDecodeMacrosForFrame(frame, "_" + frame._name + "_")
sourceCode += createStoreMacrosForFrame(frame, "_" + frame._name + "_")
if cmdlineOptions.exportecu != None:
ecuList = cmdlineOptions.exportecu.split(',')
for frame in db._fl._list:
for ecu in ecuList:
if ecu in frame._Transmitter:
sourceCode += createStoreMacrosForFrame(frame, "_" + frame._name + "_")
for signal in frame._signals:
if ecu in signal._receiver:
sourceCode += createDecodeMacro(signal, "_" + frame._name + "_")
cfile = open(outfile, "w")
cfile.write(sourceCode)
cfile.close()
if __name__ == '__main__':
sys.exit(main())
|
import numpy as np
from keras.datasets import mnist
import matplotlib.pyplot as plt
def init_params(layers_shape):
W = []
b = []
for i in range(len(layers_shape) - 1):
W.append(np.random.randn(layers_shape[i + 1], layers_shape[i]) * 0.01)
b.append(np.zeros([layers_shape[i+1], 1]))
return {"W": W, "b": b}
def relu(x):
return np.maximum(0, x)
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
#normalizing the inputs to be in range -0.5,0.5
def normalize_inputs(x):
return x/256-0.5
#returns onehot representation of a number
def one_hot(y):
res = np.zeros([10, y.shape[1]])
for i in range(y.shape[1]):
res[y[0, i], i] = 1
return res
#calculating sgd gradients
def calculate_gradients(X, Y, params, cache):
#extracting parameters
Z1, Z2, Z3 = cache["Z"]
A1, A2, A3 = cache["A"]
_, W2, W3 = params["W"]
m = Y.shape[1]
#calculating gradients
dZ3 = A3 - Y
dW3 = 1/m*np.dot(dZ3, A2.T)
db3 = 1/m*np.sum(dZ3, axis=1, keepdims=True)
dZ2 = np.dot(W3.T, dZ3)*(A2 > 0)
dW2 = 1/m*np.dot(dZ2, A1.T)
db2 = 1/m*np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2)*(A1 > 0)
dW1 = 1/m*np.dot(dZ1, X.T)
db1 = 1/m*np.sum(dZ1, axis=1, keepdims=True)
dW = [dW1, dW2, dW3]
db = [db1, db2, db3]
grads = {"dW": dW, "db": db}
return grads
#method used to calculate momentum
def calculate_momentum(grads, mom_grads, iterations, beta1=0.9):
#extracting parameters
VdW1, VdW2, VdW3 = mom_grads["VdW"]
Vdb1, Vdb2, Vdb3 = mom_grads["Vdb"]
dW1, dW2, dW3 = grads["dW"]
db1, db2, db3 = grads["db"]
#calculating momentum
VdW1 = beta1*VdW1 + (1 - beta1)*dW1
VdW2 = beta1*VdW2 + (1 - beta1)*dW2
VdW3 = beta1*VdW3 + (1 - beta1)*dW3
Vdb1 = beta1*Vdb1 + (1 - beta1)*db1
Vdb2 = beta1*Vdb2 + (1 - beta1)*db2
Vdb3 = beta1*Vdb3 + (1 - beta1)*db3
VdW = [VdW1, VdW2, VdW3]
Vdb = [Vdb1, Vdb2, Vdb3]
return {"VdW": VdW, "Vdb": Vdb}
#method used to calculate values used for rmsprop
def calculate_rms(grads, rms_grads, iterations, beta2=0.9, epsilon=0.00000001):
#retrieving parameters
SdW1, SdW2, SdW3 = rms_grads["SdW"]
Sdb1, Sdb2, Sdb3 = rms_grads["Sdb"]
dW1, dW2, dW3 = grads["dW"]
db1, db2, db3 = grads["db"]
#calculating rms
SdW1 = beta2*SdW1 + (1 - beta2)*np.square(dW1)
SdW2 = beta2*SdW2 + (1 - beta2)*np.square(dW2)
SdW3 = beta2*SdW3 + (1 - beta2)*np.square(dW3)
Sdb1 = beta2*Sdb1 + (1 - beta2)*np.square(db1)
Sdb2 = beta2*Sdb2 + (1 - beta2)*np.square(db2)
Sdb3 = beta2*Sdb3 + (1 - beta2)*np.square(db3)
SdW = [SdW1, SdW2, SdW3]
Sdb = [Sdb1, Sdb2, Sdb3]
return {"SdW": SdW, "Sdb": Sdb}
#forward propagation method
def forward_prop(inputs, parameters):
#retrieving parameters
W1, W2, W3 = parameters["W"]
b1, b2, b3 = parameters["b"]
#performing forward prop
Z1 = np.dot(W1, inputs) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = softmax(Z3)
Z = [Z1, Z2, Z3]
A = [A1, A2, A3]
cache = {"Z": Z, "A": A}
return A3, cache
#crossentropy loss
def loss(y_pred, y):
m = y.shape[1]
return -1/m*np.sum(np.sum(y*np.log(y_pred), axis=1), axis=0)
'''
backpropagation with sgd or rms-prop or momentum or adam
sgd is obtained with rms_prop=False and beta1=0
momentum is obtained with rms_prop=False and beta1!=0
rms_prop is obained with beta1=0 and rms_prop=True
adam is obtained with rms_prop=True and beta1!=0
'''
def back_propagate(X, Y, cache, params, mom_grads, rms_grads, iteration, rms_prop=False, learning_rate=0.01, beta1=0.9, beta2=0.9, epsilon=0.00000001):
#getting params
W1, W2, W3 = params["W"]
b1, b2, b3 = params["b"]
#sgd gradients
grads = calculate_gradients(X, Y, params, cache)
#momentum
mom_grads = calculate_momentum(grads, mom_grads, iteration, beta1=beta1)
VdW1, VdW2, VdW3 = mom_grads["VdW"]
Vdb1, Vdb2, Vdb3 = mom_grads["Vdb"]
#rms_prop
if rms_prop:
rms_grads = calculate_rms(
grads, rms_grads, iteration, beta2=beta2, epsilon=epsilon)
SdW1, SdW2, SdW3 = rms_grads["SdW"]
Sdb1, Sdb2, Sdb3 = rms_grads["Sdb"]
else:
SdW1, SdW2, SdW3 = [1, 1, 1]
Sdb1, Sdb2, Sdb3 = [1, 1, 1]
#updating parameters
W1 -= learning_rate*VdW1/(np.sqrt(SdW1)+epsilon)
W2 -= learning_rate*VdW2/(np.sqrt(SdW2)+epsilon)
W3 -= learning_rate*VdW3/(np.sqrt(SdW3)+epsilon)
b1 -= learning_rate*Vdb1/(np.sqrt(Sdb1)+epsilon)
b2 -= learning_rate*Vdb2/(np.sqrt(Sdb2)+epsilon)
b3 -= learning_rate*Vdb3/(np.sqrt(Sdb3)+epsilon)
W = [W1, W2, W3]
b = [b1, b2, b3]
return {"W": W, "b": b}, mom_grads, rms_grads
#calculates accuracy as guesses/size of the set
def accuracy(y_pred, y):
m = y.shape[1]
count = 0
for i in range(m):
pred = np.argmax(y_pred[:, i])
res = np.argmax(y[:, i])
if pred == res:
count += 1
return count/m
(x_train, y_train), (x_test, y_test) = mnist.load_data()
m, h, w = x_train.shape
m_test = x_test.shape[0]
iteration = 0
# hyperparameters
epochs = 30
batch_size = 32
alpha_zero = 0.0001
# beta1 = 0 => no momentum
beta1 = 0.9
#not really necessary to tune epsilon it is used to be sure that we don't divide by 0
epsilon = 0.0000001
beta2 = 0.9
# decay_rate = 0 => no decay
decay_rate = 0
# inputs preprocessing
x_train = normalize_inputs(np.reshape(x_train, [m, h*w]).T)
y_train = one_hot(np.reshape(y_train, [1, m]))
x_test = normalize_inputs(np.reshape(x_test, [m_test, h*w]).T)
y_test = one_hot(np.reshape(y_test, [1, m_test]))
mom_grads = {"VdW": [0, 0, 0], "Vdb": [0, 0, 0]}
rms_grads = {"SdW": [0, 0, 0], "Sdb": [0, 0, 0]}
params = init_params([h*w, 128, 128, 10])
# initializing graph
loss_train = []
ep = []
plt.plot([], [], [], [])
plt.ylabel('loss')
plt.xlabel('epochs')
y_pred, _ = forward_prop(x_train, params)
l = loss(y_pred, y_train)
ep.append(0)
loss_train.append(l)
plt.ion()
plt.show()
# start training
for i in range(epochs):
# learning rate decay
alpha = alpha_zero/(1+decay_rate*i)
# shuffling the dataset
randomize = np.arange(m)
np.random.shuffle(randomize)
X = x_train[:, randomize]
Y = y_train[:, randomize]
# mini-batch training
for j in range(m//batch_size):
iteration += 1
X_batch = X[:, batch_size * j:batch_size * (j + 1)]
Y_batch = Y[:, batch_size * j:batch_size * (j + 1)]
y_pred, cache = forward_prop(X_batch, params)
params, mom_grads, rms_grads = back_propagate(
X_batch, Y_batch, cache, params, mom_grads, rms_grads, iteration, rms_prop=True, learning_rate=alpha, beta1=beta1, beta2=beta2, epsilon=epsilon)
#computing debugging data like loss accuracy and loss graph
y_pred, _ = forward_prop(x_train, params)
l = loss(y_pred, y_train)
acc = accuracy(y_pred, y_train)
print("epoch: " + str(i) + " loss:" + str(l) + " accuracy:" + str(acc))
loss_train.append(l)
ep.append(i)
plt.plot(ep, loss_train)
plt.draw()
plt.pause(0.001)
plt.savefig("train.png")
plt.close()
#benchmarking model on test set
y_pred, _ = forward_prop(x_test, params)
acc = accuracy(y_pred, y_test)
print("accuracy on test set:", acc)
|
from PyQt5 import QtWidgets
from PlotCanvas import PlotCanvas
from SignalSendingPackage.Ui_VisualizerMainWindow import Ui_VisualizerMainWindow
from LoggersConfig import loggers
class VisualizerMainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.window_is_closed = False
self.VisualizerPlot = None
self.user_interface = Ui_VisualizerMainWindow()
self.setup_ui()
self.setup_plot()
def closeEvent(self, event):
loggers['Debug'].debug(f'VisualizerMainWindow: closeEvent: signal sending visualization window was closed')
loggers['Application'].info(
f'VisualizerMainWindow: closeEvent: signal sending visualization window was closed'
)
self.window_is_closed = True
def setup_plot(self):
self.VisualizerPlot = self.user_interface.frame
def setup_ui(self):
self.user_interface.setupUi(self)
|
import re
def to_valid(url, VID_ID):
youtube_urls_test = ['']
youtube_urls_test.pop(0)
youtube_urls_test.append(url)
youtube_regex = (
r'(https?://)?(www\.)?'
'(youtube|youtu|youtube-nocookie)\.(com|be)/'
'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
VID_ID = youtube_regex_match.group(6)
if youtube_regex_match != None:
return VID_ID
else:
raise Exception('NOT_VALID_URL') |
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
import os
import data
def step1():
subscription_key = "cced4caa372c41deac94a069a20212f2"
endpoint = "https://kardel2.cognitiveservices.azure.com/"
credentials = CognitiveServicesCredentials(subscription_key)
text_analytics = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)
documents = data.contents2docs(data.get_contents(filename="eng_summary1.txt"))
response = text_analytics.sentiment(documents=documents)
f = open(os.path.join(data.ROOT_DIR, "score.txt"), 'w')
for document in response.documents:
print("Document Id: ", document.id, ", Sentiment Score: ",
"{:.2f}".format(document.score), file=f)
f.close()
def step2():
fscore = open(os.path.join(data.ROOT_DIR, "score.txt"), 'r')
fcontent = open(os.path.join(data.ROOT_DIR, "eng_summary1.txt"), 'r')
fyygq = open(os.path.join(data.ROOT_DIR, "yygq.txt"), 'w')
finsult = open(os.path.join(data.ROOT_DIR, "insult.txt"), 'w')
for scoreLine, content in zip(fscore, fcontent):
score = float(scoreLine.split(" ")[-1])
if score > 0.5:
print(content, "score="+str(score), file=fyygq)
else:
print(content, "score="+str(score), file=finsult)
fscore.close()
fcontent.close()
fyygq.close()
finsult.close()
if __name__ == '__main__':
step2() |
class Client():
def __init__(self):
self.id = null
self.email = null
self.name = null
|
'''
Created on Jul 31, 2016
@author: mingtan
'''
import sys
minvaluemap = dict()
maxvaluemap = dict()
'''
compute the min and max value for every column (features).
'''
def MinMaxValues(oriTrainData):
fi = open(oriTrainData, 'r')
ind = 0
for line in fi:
line = line.strip()
(control, targetb, targetd, featurestr) = line.split('\t')
featpairs = featurestr.strip().split()
for fpair in featpairs:
fn, fv = fpair.split(':')
fv = float(fv)
if(fn not in minvaluemap):
minvaluemap[fn] = fv
else:
if(minvaluemap[fn] > fv):
minvaluemap[fn] = fv
if(fn not in maxvaluemap):
maxvaluemap[fn] = fv
else:
if(maxvaluemap[fn] < fv):
maxvaluemap[fn] = fv
print ind
ind += 1
fi.close()
'''
rescale the feature values by (feature-min)/(max-min)
'''
def Normalize(oriData, newData):
fi = open(oriData, 'r')
fo = open(newData,'w')
for line in fi:
line = line.strip()
(control, targetb, targetd, featurestr) = line.split('\t')
featpairs = featurestr.strip().split()
fo.write(control + '\t' +targetb+'\t'+targetd+'\t')
for fpair in featpairs:
fn, fv = fpair.split(':')
fv = float(fv)
minfv = minvaluemap[fn]
maxfv = maxvaluemap[fn]
if(maxfv == minfv):
fv = fv/maxfv
elif(maxfv > minfv):
fv = (fv-minfv)/(maxfv-minfv)
else:
print 'ERROR'
fo.write(fn+':'+str(round(fv,4))+' ')
fo.write('\n')
fi.close()
fo.close()
if __name__ == '__main__':
oriTrainData = sys.argv[1]
oriDevData = sys.argv[2]
newTrainData = sys.argv[3]
newDevData = sys.argv[4]
MinMaxValues(oriTrainData)
print 'MinMax Value Computation Finished.'
Normalize(oriTrainData, newTrainData)
print 'Normalized train set finished.'
Normalize(oriDevData, newDevData)
print 'Noramal validation set finished.'
|
import csv
import re
import zipfile
import io
from contextlib import contextmanager
from datetime import datetime
from datetime import timedelta
from beancount.core.number import D
from beancount.core import amount
from beancount.core import flags
from beancount.core import data
from beancount.ingest import importer
from .helpers import identify, make_posting, parse_amount
FIELDS = [
"Date",
"Date de valeur",
"Débit",
"Crédit",
"Libellé",
"Solde",
]
@contextmanager
def open_file(f):
fd = open(f.name, encoding='iso-8859-15')
try:
yield fd
finally:
fd.close()
class Importer(importer.ImporterProtocol):
def __init__(self, checking_account, **kwargs):
csv.register_dialect("ccm", "excel", delimiter=";")
self.checking_account = checking_account
def identify(self, f):
if f.mimetype() != "text/csv":
return False
with open_file(f) as f:
return identify(f, "ccm", FIELDS)
def extract(self, f, existing_entries=None):
entries = []
row = None
row_date = None
with open_file(f) as fd:
rd = csv.reader(fd, dialect="ccm")
header = True
line_index = 0
for row in rd:
# Check header
if header:
if set(row) != set(FIELDS):
raise InvalidFormatError()
header = False
line_index += 1
continue
if len(row) != 6:
continue
# Extract data
row_date = datetime.strptime(row[0], "%d/%m/%Y")
label = row[4]
txn_amount = row[2]
if txn_amount == '':
txn_amount = row[3]
txn_amount = parse_amount(txn_amount)
# Prepare the transaction
meta = data.new_metadata(f.name, line_index)
txn = data.Transaction(
meta=meta,
date=row_date.date(),
flag=flags.FLAG_OKAY,
payee="",
narration=label,
tags=set(),
links=set(),
postings=[],
)
# Create the postings.
first_posting = make_posting(self.checking_account, txn_amount)
txn.postings.append(first_posting)
# Done
entries.append(txn)
line_index += 1
if line_index > 0:
balance_check = data.Balance(
meta=data.new_metadata(f.name, line_index + 1),
date=row_date.date() + timedelta(days=1),
account=self.checking_account,
amount=parse_amount(row[5]),
diff_amount=None,
tolerance=None,
)
entries.append(balance_check)
return entries
|
import json
data = {'a': 'A', 'c': 3.0, 'b': (2, 4, 'a', True)}
print "data: ", data
# dump string
data_string = json.dumps(data)
print "encoded: ", data_string
# load string
print "decoded: ", json.loads(data_string)
print "sort: ", json.dumps(data, sort_keys=True)
print "indent: ", json.dumps(data, sort_keys=True, indent = 4)
"""
data: {'a': 'A', 'c': 3.0, 'b': (2, 4, 'a', True)}
encoded: {"a": "A", "c": 3.0, "b": [2, 4, "a", true]}
decoded: {u'a': u'A', u'c': 3.0, u'b': [2, 4, u'a', True]}
sort: {"a": "A", "b": [2, 4, "a", true], "c": 3.0}
indent: {
"a": "A",
"b": [
2,
4,
"a",
true
],
"c": 3.0
}
"""
|
"""
Test values defined in constants module
"""
from django.test import SimpleTestCase
from api.constants import Limits
class ConstantsTest(SimpleTestCase):
"""
Test constants
"""
def test_account_manage_limits(self):
"""
Test account manager limits in reasonable range
"""
minimum = 1
maximum = 50
self.assertGreaterEqual(Limits.ACCOUNT_MANAGED, minimum)
self.assertGreaterEqual(Limits.ACCOUNT_MANAGER, minimum)
self.assertLessEqual(Limits.ACCOUNT_MANAGED, maximum)
self.assertLessEqual(Limits.ACCOUNT_MANAGER, maximum)
|
"""
Module for working with tables of data, typically with at least one columns
containing gene accessions.
Results tables saved from DESeq are a perfect example of this, but any data can
be used.
"""
import sys
import tempfile
from matplotlib.mlab import csv2rec, rec2csv
import pybedtools
from pybedtools.featurefuncs import gff2bed, add_color, extend_fields
import gffutils
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from matplotlib import cm
from scipy import stats
from rpy2.robjects import r
import rpy2.robjects as RO
import random
from gffutils.helpers import asinterval
import colormap_adjust
class ResultsTable(object):
def __init__(self, data, dbfn=None, id_column='id', csv2rec_kwargs=None):
"""
Generic class for handling tables of data.
:param data: If a string, assume it's a filename and load that using
`csv2rec_kwargs`. Otherwise, assume it's a record array.
:param dbfn: Filename for a `gffutils.FeatureDB`. Optional, but really
handy.
:param id_column: Which column contains gene accessions that can be
looked up in the `gffutils.FeatureDB`.
:param csv2rec_kwargs: Kwargs passed to `matplotlib.mlab.csv2rec`.
Default is dict(delimiter="\\t", missing="NA").
"""
if csv2rec_kwargs is None:
csv2rec_kwargs = dict(delimiter='\t', missing='NA')
if isinstance(data, basestring):
data = csv2rec(data, **csv2rec_kwargs)
self.id_column = id_column
self.data = data
self.dbfn = dbfn
self.gffdb = None
if self.dbfn:
self.gffdb = gffutils.FeatureDB(dbfn)
self._cached_lookup = None
@property
def colnames(self):
return self.data.dtype.names
def __repr__(self):
return "<%s instance with %s items>" % (self.__class__.__name__,
len(self.data))
def __str__(self):
if not self.gffdb:
raise ValueError('Please attach a GFF database created by '
'gffutils by setting the .gffdb attribute to the '
'database\'s path.')
fields = ['chrom', 'source', 'featuretype', 'start', 'end', 'score',
'strand', 'frame', 'attributes']
s = []
for i, item in enumerate(self.data):
d = dict(zip(self.colnames, item))
d['_index'] = i
try:
feature = self.gffdb[item.id]
d.update(zip(fields, feature.tostring().strip().split('\t')))
except gffutils.FeatureNotFoundError:
d.update({'attributes': 'Feature not found'})
for key, val in d.items():
s.append('%s: %s' % (key, val))
return '\n'.join(s)
def __len__(self):
return len(self.data)
def __getitem__(self, ind):
orig_kwargs = dict(dbfn=self.dbfn, id_column=self.id_column)
if isinstance(ind, int):
if ind > len(self) - 1:
raise IndexError
new_instance = self.__class__(
self.data[ind:ind + 1].copy(), **orig_kwargs)
else:
new_instance = self.__class__(self.data[ind].copy(), **orig_kwargs)
return new_instance
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.data, attr)
def to_file(self, filename, **kwargs):
"""
Saves results to file, which will be gzipped if `filename` has a .gz
extension.
kwargs are passed to matplotlib.mlab.rec2csv
"""
rec2csv(self.data, filename, **kwargs)
def strip_unknown_features(self):
"""
Remove features not found in the `gffutils.FeatureDB`. This will
typically include 'ambiguous', 'no_feature', etc, but can also be
useful if the database was created from a different one than was used
to create the table.
"""
if not self.gffdb:
return self
ind = []
for i, gene_id in enumerate(self.id):
try:
self.gffdb[gene_id]
ind.append(i)
except gffutils.FeatureNotFoundError:
pass
ind = np.array(ind)
return self[ind]
def random_subset(self, n, idx=True):
"""
Random subset of all rows
:param n: Number of rows to return
:param idx: If True, return the index; if False, returns a subsetted
version.
"""
ind = random.sample(xrange(len(self.data)), n)
if idx:
return ind
return self[ind]
def sorted_by(self, attr, absolute=False, reverse=False):
"""
Re-sort by an attribute and return a copy.
:param attr: Attribute to sort by; must be a column in `self.colnames`
:param absolute: If True, then ignore sign when sorting
:param reverse: If True, highest values are first
"""
vals = getattr(self, attr)
if absolute:
vals = abs(vals)
ind = np.argsort(vals)
if reverse:
ind = ind[::-1]
return self[ind]
def histogram_of_hits(self, bed, field='log2foldchange',
log=False, labels=None):
"""
Plots a histogram of data values indicated by `field` for all genes
with and without peaks in `bed`.
"""
have_peaks = self.genes_with_peak(bed, as_ind=True)
hits = getattr(self, field)[have_peaks]
misses = getattr(self, field)[~have_peaks]
if log:
hits = np.log2(hits)
misses = np.log2(misses)
hits = hits[np.isfinite(hits)]
misses = misses[np.isfinite(misses)]
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
mmax = max(hits.max(), misses.max())
bins = np.linspace(0, mmax, 50)
kwargs = dict(bins=bins, color=(.5, .5, .5))
ax1.hist(hits, **kwargs)
ax2.hist(misses, **kwargs)
ax1.set_title('genes with peaks in promoter')
ax2.set_title('genes without peaks in promoter')
ax1.set_xlabel(field)
ax2.set_xlabel(field)
ax1.set_ylabel('Number of genes\n(total=%s)' % len(hits),
va='center')
ax2.set_ylabel('Number of genes\n(total=%s)' % len(misses),
va='center')
fig.subplots_adjust(hspace=0.5)
# Null hypothesis is that the two samples come from populations having
# the same location (Sokal & Rolf p.427).
#
# Result is one-tailed pval; multiply by 2 to get two-tailed pval.
s = stats.mannwhitneyu(hits, misses)
results = {'U': s[0],
'pval': s[1]}
ax2.text(x=.7,
y=.7,
s='Mann-Whitney U\np=%.3f' % s[1],
transform=ax2.transAxes)
return results, fig
def features(self, ignore_unknown=False):
"""
Generator of currently-selected features.
Looks up each feature in the attached `gffutils.FeatureDB` and converts
it into a `pybedtools.Interval` object for use with `pybedtools`.
Raises a warning if you haven't yet attached a `gffutils.FeatureDB` to
this instance.
:param ignore_unknown: If `ignore_unknown=False` then an exception will
be raised if a feature cannot be found; if `ignore_unknown=True`
then silently ignore these cases. Consider using the
`strip_unknown_features()` method to handle these cases up front.
"""
if not self.gffdb:
raise ValueError('Please attach a GFF database created by '
'gffutils by setting the .gffdb attribute to the '
'database\'s path.')
for i in self.data[self.id_column]:
try:
yield asinterval(self.gffdb[i])
except gffutils.FeatureNotFoundError:
if ignore_unknown:
continue
else:
raise gffutils.FeatureNotFoundError('%s not found' % i.id)
def align_with(self, other):
"""
Ensure identical sorting of this object's data with another.
Returns `self`, sorted the same way as `other`.
:param other: Another instance of a ResultsTable or ResultsTable
subclass.
"""
ind = self.gene_ind(other[other.id_column])
return self[ind]
def scatter(self, x, y, xfunc=None, yfunc=None, xscale=None,
yscale=None, xlab=None, ylab=None, genes_to_highlight=None,
label_genes=False, general_kwargs=dict(color="k", alpha=0.2,
linewidths=0), marginal=True, offset_kwargs={},
label_kwargs=None, ax=None, one_to_one=None, callback=None,
xlab_prefix=None, ylab_prefix=None):
"""
Do-it-all method for making annotated scatterplots.
:param x, y:
Variables to plot -- say, "df.baseMeanA" and "df.baseMeanB"
:param xfunc, yfunc:
Functions to apply to `xvar` and `yvar` respectively. Default is
log2; set to None to have no transformation.
:param xlab, ylab:
Labels for x and y axes; default is to use function names for
`xfunc` and `yfunc` and variable names `xvar` and `yvar`, e.g.,
"log2(baseMeanA)"
:param ax:
If `ax=None`, then makes a new fig and returns the Axes object,
otherwise, plots onto `ax`
:param general_kwargs:
Kwargs for matplotlib.scatter; specifies how all points look
:param genes_to_highlight:
Provides lots of control to colors. It is a list of (`ind`,
`kwargs`) tuples, where each `ind` specifies genes to plot with
`kwargs`. Each dictionary updates a copy of `general_kwargs`. If
`genes_to_highlight` has a "name" kwarg, this must be a list that't
the same length as `ind`. It will be used to label the genes in
`ind` using `label_kwargs`.
:param marginal:
Boolean, toggles display of non-finite cases where `x` or `y` is
nonzero but the other one is zero (and `xfunc` or `yfunc` are log)
:param callback:
Function to call upon clicking a point. Default is to print the
gene name, but an example of another useful callback would be
a mini-browser connected to a genomic_signal object from which the
expression data were calculated.
:param one_to_one:
If not None, a dictionary of matplotlib.plot kwargs that will be
used to plot a 1:1 line.
:param label_kwargs:
Kwargs for labeled genes (e.g., dict=(style='italic')). Will only
be used if an entry in `genes_to_highlight` has a `name` key.
:param offset_kwargs:
Kwargs to be passed to matplotlib.transforms.offset_copy, used for
adjusting the positioning of gene labels in relation to the actual
point.
:param xlab_prefix, ylab_prefix:
Optional label prefix that will be added to the beginning of `xlab`
and/or `ylab`.
"""
# Construct defaults---------------------------------------------------
def identity(x):
return x.copy()
if xlab_prefix is None:
xlab_prefix = ""
if ylab_prefix is None:
ylab_prefix = ""
if xlab is None:
try:
xname = x.name
except AttributeError:
xname = 'x'
if xfunc is not None:
xlab = xlab_prefix + "%s(%s)" % (xfunc.__name__, xname)
else:
xlab = xlab_prefix + "%s" % xname
if ylab is None:
try:
yname = y.name
except AttributeError:
yname = 'y'
if yfunc is not None:
ylab = ylab_prefix + "%s(%s)" % (yfunc.__name__, yname)
else:
ylab = ylab_prefix + "%s" % yname
if xfunc is None:
xfunc = identity
if yfunc is None:
yfunc = identity
if general_kwargs is None:
general_kwargs = {}
if genes_to_highlight is None:
genes_to_highlight = []
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if label_kwargs is None:
label_kwargs = dict(horizontalalignment='right',
verticalalignment='center', style='italic',
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
# ---------------------------------------------------------------------
xi = xfunc(x)
yi = yfunc(y)
xv = np.isfinite(xi.astype(float))
yv = np.isfinite(yi.astype(float))
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
if marginal:
xi[~xv] = global_min
yi[~yv] = global_min
# Plot everybody
ax.scatter(xi, yi, picker=5, **general_kwargs)
# one-to-one line, if kwargs were specified
if one_to_one:
ax.plot([global_min, global_max],
[global_min, global_max],
**one_to_one)
# plot any specially-highlighted genes, and label if specified
for ind, kwargs in genes_to_highlight:
names = kwargs.pop('names', None)
updated_kwargs = general_kwargs.copy()
updated_kwargs.update(kwargs)
ax.scatter(xi[ind], yi[ind], **updated_kwargs)
if names:
transOffset = matplotlib.transforms.offset_copy(ax.transData,
fig=ax.figure, **offset_kwargs)
for xii, yii, name in zip(xi[ind], yi[ind], names):
ax.text(xii,
yii,
name,
transform=transOffset,
**label_kwargs)
# register callback
if callback is not None:
ax.figure.canvas.mpl_connect('pick_event', callback)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
return ax
def genes_in_common(self, other):
"""
Return a list of shared IDs.
:param other: List of gene IDs, or another similar object
"""
these = set(self[self.id_column].tolist())
if isinstance(other, ResultsTable):
those = set(other[other.id_column].tolist())
else:
those = set(other)
common = these.intersection(those)
return list(common)
def gene_ind(self, genes, idx=True):
"""
Returns an array of indices for `genes`.
Useful for matching up two ResultsTable instances that are not
guaranteed to have the same gene order (though they should have the
same total gene set)
:param genes: An iterable of feature accessions that are in the
accession column.
"""
# make a dictionary mapping current gene to index.
if not self._cached_lookup:
self._cached_lookup = dict(zip(self.data.id, np.arange(len(self.data))))
ind = []
for gene in genes:
try:
ind.append(self._cached_lookup[gene])
except KeyError:
continue
ind = np.array(ind)
if idx:
return ind
return self[ind]
class DESeqResults(ResultsTable):
def __init__(self, *args, **kwargs):
"""
Subclass of :class:`ResultsTable` specifically for working with DESeq
results.
"""
super(DESeqResults, self).__init__(*args, **kwargs)
def strip_deseq_nongenes(self):
"""
DESeq adds "no_feature", "not_aligned", etc. features. This method
removes them for better plotting.
"""
to_remove = [
'no_feature',
'not_aligned',
'alignment_not_unique',
'ambiguous',
'too_low_aQual']
remove_ind = self.gene_ind(to_remove)
keep_ind = []
for i in range(len(self)):
if i not in remove_ind:
keep_ind.append(i)
return self[keep_ind]
def enriched(self, pval=0.05, column='padj', idx=True):
"""
Enriched genes at `pval` significance.
:param pval: Alpha to use as a cutoff
:param column: Column to apply cutoff to
:param idx: If True, return the index; if False, returns a subsetted
version.
"""
ind1 = self.data[column] <= pval
ind2 = self.log2foldchange > 0
ind = ind1 & ind2
if idx:
return ind
return self[ind]
def disenriched(self, pval=0.05, column='padj', idx=True):
"""
Disenriched genes at `pval` significance.
:param pval: Alpha to use as a cutoff
:param column: Column to apply cutoff to
:param idx: If True, return the index; if False, returns a subsetted
version.
"""
ind1 = self.data[column] <= pval
ind2 = self.log2foldchange < 0
ind = ind1 & ind2
if idx:
return ind
return self[ind]
def nonsig(self, pval=0.05, column='padj', idx=True):
"""
Non-significant genes (that were still expressed at some level)
:param pval: Alpha to use as a cutoff
:param column: Column to apply cutoff to
:param idx: If True, return the index; if False, returns a subsetted
version.
"""
ind = (self.data[column] > pval) & (self.basemean > 0)
if idx:
return ind
return self[ind]
def random_nonsig(self, n, pval=0.05, column='padj'):
"""
Random subset of nonsignifcant genes at `pval` significance
"""
# get inds of nonsig as bool
ind1 = self.nonsig(pval=pval, column=column)
# indices as integers
ind2 = np.nonzero(ind1)[0]
# indices to keep
keepers = random.sample(ind2, n)
# array of all False
final_ind = np.ones_like(ind1) == 0
# Only set the keepers to True
final_ind[keepers] = True
if idx:
return final_ind
return self[final_ind]
def colormapped_bedfile(self, genome, cmap=None):
"""
Create a BED file with features colored according to adjusted pval
(phred transformed). Downregulated features have the sign flipped.
`cmap` is a matplotlib colormap; default is matplotlib.cm.RdBu_r.
Requires a FeatureDB to be attached.
"""
if self.dbfn is None:
raise ValueError("FeatureDB required")
db = gffutils.FeatureDB(self.dbfn)
def scored_feature_generator(d):
for i in range(len(d)):
try:
feature = db[d.id[i]]
except gffutils.FeatureNotFoundError:
raise gffutils.FeatureNotFoundError(d.id[i])
score = -10 * np.log10(d.padj[i])
lfc = d.log2foldchange[i]
if np.isnan(lfc):
score = 0
if lfc < 0:
score *= -1
feature.score = str(score)
feature = extend_fields(gff2bed(asinterval(feature)), 9)
fields = feature.fields[:]
fields[6] = fields[1]
fields[7] = fields[2]
fields.append(str(d.padj[i]))
fields.append(str(d.pval[i]))
fields.append('%.3f' % d.log2foldchange[i])
fields.append('%.3f' % d.basemeana[i])
fields.append('%.3f' % d.basemeanb[i])
yield pybedtools.create_interval_from_list(fields)
x = pybedtools.BedTool(scored_feature_generator(self)).saveas()
norm = x.colormap_normalize()
if cmap is None:
cmap = cm.RdBu_r
cmap = colormap_adjust.cmap_center_point_adjust(cmap, [norm.vmin, norm.vmax], 0)
def score_zeroer(f):
f.score = '0'
return f
return x.each(add_color, cmap=cmap, norm=norm)\
.sort()\
.each(score_zeroer)\
.truncate_to_chrom(genome)\
.saveas()
def autosql_file(self):
"""
Returns a temp filename containing the autosql defining the extra fields.
This for creating bigBed files from BED files created by
colormapped_bed. When a user clicks on a feature, the DESeq results
will be reported.
"""
fn = pybedtools.BedTool._tmp()
AUTOSQL = """
table example
"output from DESeq"
(
string chrom; "chromosome"
uint chromStart; "start coord"
uint chromEnd; "stop coord"
string name; "name of feature"
uint score; "always zero"
char[1] strand; "+ or - for strand"
uint thickStart; "Coding region start"
uint thickEnd; "Coding region end"
uint reserved; "color according to score"
string padj; "DESeq adjusted p value"
string pval; "DESeq raw p value"
string logfoldchange; "DESeq log2 fold change"
string basemeana; "DESeq baseMeanA"
string basemeanb; "DESeq baseMeanB"
)
"""
fout = open(fn, 'w')
fout.write(AUTOSQL)
fout.close()
return fn
def hypergeom(m, n, n1, n2):
"""
m = overlapping genes
n = total genes that could be sampled
n1 = number of genes in set 1
n2 = number of genes in set 2
"""
if m == 0:
return 1.0
return r['phyper'](min(n1, n2), n1, n - n1, n2)[0] \
- r['phyper'](m - 1, n1, n - n1, n2)[0]
def hypergeom_scipy(m, n, n1, n2, p=False):
"""
Given gene counts `n1` and `n2`, each drawn from `n` total genes, return
the probability that `m` genes would be shared due random chance alone.
e.g.,
n1 = 100 # significantly enriched genes from sample 1
n2 = 50 # significantly enriched genes from sample 2
n = 15000 # total number of genes that could be sampled
m = 10 # number of genes that overlap in the two lists
See: http://www.nslij-genetics.org/wli/pub/ieee-embs06.pdf
Thanks to Brent Pedersen (https://github.com/brentp/bio-playground) for
implementation.
>>> hypergeom(1, 1000, 1000, 1000) # has to be shared.
1.0
>>> all(hypergeom(i, 1000, 1000, 1000) == 1.0 for i in range(100))
True
>>> hypergeom(1, 30000, 20, 20)
0.013253396616299651
>>> hypergeom(2, 30000, 20, 20)
7.9649366037104485e-05
>>> hypergeom(11, 30000, 20, 20)
4.516176321800458e-11
>>> hypergeom(10, 30000, 20, 20) # very low prob.
4.516176321800458e-11
>>> hypergeom(20, 30000, 20, 20) # very low chance that all are shared.
4.516176321800458e-11
"""
if m <= 0:
return 1.0
mmin = m - 1
mmax = min(n1, n2)
return stats.hypergeom.cdf(mmax, n, n1, n2) \
- stats.hypergeom.cdf(mmin, n, n1, n2)
|
# Sorting a list using a lambda function, and indexing into a tuple for the value
planets = [
("Mercury", 2440, 5.43, 0.395),
("Venus", 6052, 5.24, 0.723),
("Earth", 6378, 5.52, 1.000),
("Mars", 3396, 3.93, 1.530),
("Jupiter", 71492, 1.33, 5.210),
("Saturn", 60268, 0.59, 9.551),
("Uranus", 25559, 1.27, 19.213),
("Neptune", 24764, 1.64, 30.070)
]
size = lambda planets: planets[1]
density = lambda planets: planets[2]
aus_from_sun = lambda planets: planets[3]
# Sort changes the list itself in place, thus changing the list itself
# Tuples are immutable and therefore cannot be changed or sorted
planets.sort(key=aus_from_sun, reverse=True)
for i in planets:
print("{}\n".format(i))
# What if you wanted a sorted copy of the list
# You can sort a tuple with a method instead called tuple.sorted()
# You can sort both a tuple AND make a copy of it or an iterable with the .sorted() method
pokemon = ["Bulbasaur", "Ivysaur", "Venusaur", "Charmander", "Charmeleon", "Charizard", "Squirtle", "Wartortle", "Blastoise"]
# print(help(dict))
pokesort = sorted(pokemon)
print(pokesort)
pokedex_entry_num = list(range(1, len(pokemon)+1))
pokemondict = dict(list(zip(pokedex_entry_num, pokemon)))
print(pokemondict)
|
n,x = map(int,input().split())
coins = list(map(int,input().strip().split()))[:n]
dp = [10**9]*(x+1)
dp[0] = 0
for i in range(x+1):
for j in range(n):
if i - coins[j] >= 0:
dp[i] = min(dp[i],dp[i-coins[j]]+1)
if dp[x] == 10**9:
dp[x] = -1
ans = dp[x]
print(ans)
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plot
from matplotlib.backends.backend_pdf import PdfPages
import datetime as dt
from tkinter import filedialog
from os import path
def main():
def createPDFlabels(labelData, savepath, name='defaultName.pdf', customField=True):
"""
Create multipage PDFs of labels from label data
"""
# The PDF document
saveOutput = path.join(savepath, name)
pdf_pages = PdfPages(saveOutput)
descriptions = {'month 1':'Watercolor paper(3), Watercolor pencils(12), Pencil(1), Fan brush(1), Eraser(1), Table cover(1), Stickers(2), Notebook(2), Pencil sharpener(1)',
'month 2':'Tote bag(1), Acrylic paint(3), Acrylic brush(2), Marker(1), Paint palette(1), Stickers(2), Coloring sheets(3), Sponges(3), Manila Paper(2), Watercolor paper(2)',
'month 3':'Oil pastel set(1), Chalk pastel set(1), Pencil crayon set(1), Multimedia paper(2), Manila paper(2), Washi tape(1), Pencil(1), Eraser(1), Microfiber towel(1)',
'month 4':'Construction Paper(1), Pipecleaners(3), Glitter Glue(1), Glue(1), Cardstock Paper(1), Neon Acrylic Paint(1), Eye Dropper(1), Paint Pallete(1), Watercolor Paper(1), Wide Brush(1), Liquid Watercolor(1)'}
for i in range(labelData.shape[0]):
# This is what is going on each page
toText = 'To:'
addressText = ''
toCompany = labelData['ship to - company'][i]
toName = labelData['ship to - name'][i]
toAddress1 = labelData['ship to - address 1'][i]
toAddress2 = labelData['ship to - address 2'][i]
toAddress3 = labelData['ship to - address 3'][i]
toCity = labelData['ship to - city'][i]
toProvince = labelData['ship to - state'][i]
toPostalCode = labelData['ship to - postal code'][i]
toCountry = labelData['ship to - country'][i]
if pd.isnull(labelData['custom - field 1'][i]):
description = 'No description Information'
weight = 'unknown'
value = 'unknown'
elif (labelData['custom - field 1'][i]).lower() in descriptions:
description = descriptions[(labelData['custom - field 1'][i]).lower()]
weight = '0.8'
value = '20.00'
else:
description = labelData['custom - field 1'][i]
weight = '0.8'
value = '20.00'
# Make sure that all the necessary information is there (Name)
if pd.isnull(toName):
raise ValueError('Customer {} of {} is missing name'.format(i, name))
if pd.isnull(toAddress1):
raise ValueError('Customer {} of {} is missing Address 1'.format(toName, name))
if pd.isnull(toCity):
raise ValueError('Customer {} of {} is missing City'.format(toName, name))
if pd.isnull(toCountry):
raise ValueError('Customer {} of {} is missing Country'.format(toName, name))
if pd.isnull(toProvince):
addressTextEnd = str(toCity) + ', ' + str(toCountry) + '\n' + str(toPostalCode)
if pd.notnull(toProvince):
addressTextEnd = str(toCity) + ', ' + str(toProvince) + ', ' + str(toCountry) + '\n' + str(toPostalCode)
if pd.notnull(toCompany):
addressText = addressText + '\n' + str(toCompany)
if pd.notnull(toName):
addressText = addressText + '\n' + str(toName)
if pd.notnull(toAddress1):
addressText = addressText + '\n' + str(toAddress1)
if pd.notnull(toAddress2):
addressText = addressText + '\n' + str(toAddress2)
if pd.notnull(toAddress3):
addressText = addressText + '\n' + str(toAddress3)
fullText = toText + '\n' + addressText + '\n' + addressTextEnd
a, (b, c) = plot.subplots(2,1, figsize=(4, 6))
a.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
b.axis('off')
c.axis('off')
b.text(.1, 0.35, 'From:\nShip Ninja \nc/o Think With Art \n113 Lampton Crescent \nMarkham, ON, Canada \nL6E 1N2')
b.text(.1, -.15, fullText)
# Include Custon Field 1 for manual orders
if customField:
if pd.notnull(labelData['custom - field 1'][i]):
manualText = labelData['custom - field 1'][i]
else:
manualText = 'No description Information'
b.text(0.1, .75, 'Order: \n'+ manualText, fontsize=8)
# elif pd.notnull(labelData)
# Check to see if there's a gift message
if 'GIFT' in str(labelData['notes - from buyer'][i]):
giftMsg = str(labelData['notes - from buyer'][i]).split(': True\n\n', 1)[-1]
#force the gift message to have less that 50 Chars per line
tempText = giftMsg.split(' ', )
tmpText=''
characterLimit = 50
for i in range(len(tempText)):
if len(tmpText.split('\n')[-1] + tempText[i]) > characterLimit:
tmpText = tmpText + '\n'
tmpText = tmpText + tempText[i] + ' '
b.text(.065, -0.25, tmpText)
# Insert CN22 Declaration for international packages
if toCountry != 'CA':
img = plot.imread("CN22.png")
c.imshow(img,zorder=0, aspect='equal')
descriptionSplit = description.split(' ')
tempDesc = ''
descLim = 40
for i in range(len(descriptionSplit)):
if len(tempDesc.split('\n')[-1] + descriptionSplit[i]) > descLim:
tempDesc = tempDesc + '\n'
tempDesc = tempDesc + descriptionSplit[i] + ' '
c.text(340, 0, tempDesc, fontsize=6)
c.text(380, 160, weight, fontsize=8)
c.text(500, 160, value, fontsize=8)
c.text(450, 280, dt.datetime.today().strftime("%b-%d-%Y"), fontsize=8)
# Done with the page
pdf_pages.savefig(a, dpi=150)
plot.close(a)
# Write the PDF document to the disk
pdf_pages.close()
def boxCycles(boxData, savepath, name='Box Data', numCycles=4):
"""
Creates PDF label package for each cycle in data
Input:
boxData: pandas dataframe of customer data for boxes including all cycles
numCycles: how many cycles should the program look at?
Output:
A pdf of labels for each cycle
"""
for j in range(1, numCycles+1):
temp = boxData[boxData['custom - field 1'] == 'Month {}'.format(j)].reset_index(drop=True)
createPDFlabels(temp, savepath=savepath, name='{} Cycle {}.pdf'.format(name, j))
def createAllPDFs():
"""
Split that data so good and then run each section
through the label-making program, createPDFlabels
or through the boxCycle program for the box data.
"""
# Which file should we use?
filepath = filedialog.askopenfile()
savepath = filedialog.askdirectory()
data = pd.read_csv(filepath.name)
# Lowercase all the columns to avoid mistakes
data.columns = data.columns.str.lower()
# Eliminate any rows that have the Tag: 'Dont Ship'
if 'Dont Ship' in data['tags']:
data = data[data['tags'] != 'Dont Ship'].reset_index(drop=True)
# Fix UM --> US Country Code error
for i in range(data.shape[0]):
if data['ship to - country'][i] == 'UM':
data['ship to - country'][i] = 'US'
manual = data[data['market - store name'] == 'Manual Orders'].reset_index(drop=True)
nonManual = data[data['market - store name'] != 'Manual Orders'].reset_index(drop=True)
# USdata = nonManual[nonManual['ship to - country'] == 'US'].reset_index(drop=True)
NonUSdata = nonManual[nonManual['ship to - country'] != 'US'].reset_index(drop=True)
## Uncomment the following code to produce the US labels
# USmanual = manual[manual['ship to - country'] == 'US'].reset_index(drop=True)
# createPDFlabels(USmanual, savepath=savepath, name='US Manual.pdf')
# USboxes = USdata[USdata['amount - order shipping'] == 0].reset_index(drop=True)
# boxCycles(USboxes, savepath=savepath, name='US Boxes')
# USadapters = USdata[USdata['amount - order shipping'] > 0].reset_index(drop=True)
# createPDFlabels(USadapters, savepath=savepath, name='US Adapters.pdf')
NonUSmanual = manual[manual['ship to - country'] != 'US'].reset_index(drop=True)
print(NonUSmanual.shape)
createPDFlabels(NonUSmanual, savepath=savepath, name='Non-US Manual.pdf')
NonUSboxes = NonUSdata.reset_index(drop=True)
boxCycles(NonUSboxes, savepath=savepath, name='Non-US Boxes')
createAllPDFs()
if __name__ == '__main__':
main() |
from .graphsage import GraphSAGE
from .gcn import GCN
from .gat import GAT
from .VAE import VAE
from .focalloss import FocalLoss
from .mmd import mix_rbf_mmd2 |
org_plaintext = 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy|thisisareallyreallylongstringasfalsfassfasfaasff_'
admin_plaintext = "yyyyyyyyyyyyyyyy' union select 'hisisareallyreal',1-- -tringasfalsfassfasfaasff_"
org_cipher = base64.b64decode(urllib.unquote('NGavsbCl2edw1Do6YfQS729nAN4G%2B2ylXChxfV7PhqdWQDPLQDOAW3gWmYm7LXHz7tNZ7gFRjkVvonxtMRpDALvYPXMBeCu%2BZ9332%2BcNY3M%3D'))
admin_cipher = map(ord, org_cipher)
for i in xrange(0, 16):
admin_cipher[i] = ord(org_cipher[i]) ^ ord(org_plaintext[i + 16]) ^ ord(admin_plaintext[i + 16])
for i in xrange(32, 48):
admin_cipher[i] = ord(org_cipher[i]) ^ ord(org_plaintext[i + 16]) ^ ord(admin_plaintext[i + 16])
admin_cipher = ''.join(map(chr, admin_cipher))
print 'Admin Cookie:', urllib.quote(base64.b64encode(admin_cipher))
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mashutian
@time: 2019-03-04 19:30
@desc: mlp by keras
"""
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
def changeto2D(array_1D):
result=[]
for x in array_1D:
y = 1 - x
result.append(y)
array=np.array(result)
array_2D = [(i, j) for i, j in zip(array_1D, array)]
output= np.array(array_2D)
return output
loss="mean_squared_error"
#mean_squared_error
# categorical_crossentropy
#sparse_categorical_crossentropy [0,1)
# binary_crossentropy
optimizer="adam" #sgd adam RMSprop
pred_probility_path=r"E:\\data\\subscribe\\vec\\subscribe\\w2v_ab-"+optimizer+"-"+loss+".txt"
x_test = np.loadtxt(r'E:\data\subscribe\vec\test\w2v_abstract_test.txt')
#y_test=np.loadtxt(r'E:\data\cv\test_label.txt')
#y_test = np.loadtxt(r'E:\data\cv\test_label.txt')
# y_test_1D = np.loadtxt(r'E:\data\cv\test_label.txt')
# y_test=changeto2D(y_test_1D)
x_train = np.loadtxt(r'E:\data\subscribe\vec\train\w2v_abstract_train.txt')
print(len(x_train))
y_train = np.loadtxt(r'E:\data\subscribe\vec\train\label1.txt')
# y_train_1D = np.loadtxt(r'E:\data\subscribe\vec\train\label1.txt')
# y_train=changeto2D(y_train_1D)
# 模型
model = Sequential()
model.add(Dense(64, input_dim=400, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='sigmoid'))#softmax sigmoid
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.fit(x_train, y_train, epochs=20, batch_size=500)#d2v 32
#评估
y_predict = model.predict(x_test)
list_y_predict=y_predict.tolist()
# pred_true=0
f = open(pred_probility_path, "w", encoding="utf8")
for j in range(len(y_predict)):
f.write(str(list_y_predict[j])+"\n")
# if y_predict[j][0]>0.5:
# y_predict[j][0]=1
# else:
# y_predict[j][0]=0
# if y_predict[j][0]==y_test[j][0]:
# pred_true+=1
# zql=pred_true/len(y_predict)
# print(zql)
# keras 参数设置?
|
# coding: utf-8
"""
Created on 31.01.2013 12:17:07
@author: Oleksandr Poliatykin
"""
from pylab import *
from matplotlib.patches import Polygon
def func(f_x):
return (f_x - 3) * (f_x - 5) * (f_x - 7) + 85
ax = subplot(111)
a, b = 2, 9 # integral area
x = arange(0, 10, 0.01)
y = func(x)
plot(x, y, linewidth=2)
# make the shaded region
ix = arange(a, b, 0.01)
iy = func(ix)
verts = [(a, 0)] + list(zip(ix, iy)) + [(b, 0)]
poly = Polygon(verts, facecolor='0.8', edgecolor='k')
ax.add_patch(poly)
text(0.5 * (a + b), 30,
r"$\int_a^b f(x)\mathrm{d}x$", horizontalalignment='center',
fontsize=20)
axis([0, 10, 0, 180])
figtext(0.9, 0.05, 'x')
figtext(0.1, 0.9, 'y')
ax.set_xticks((a, b))
ax.set_xticklabels(('a', 'b'))
ax.set_yticks([])
show()
|
#!/usr/bin/env python3
# Simple script to figure out how many SGs we have that aren't being used.
# If it proves fruitful, I might make it more intelligent and see if we can
# safely delete some of them.
import boto3
ec2_client = boto3.client('ec2')
sg_paginator = ec2_client.get_paginator('describe_security_groups')
sg_page_iterator = sg_paginator.paginate()
security_groups = {}
security_group_attachments = {}
for page in sg_page_iterator:
for sg in page['SecurityGroups']:
security_groups[sg['GroupId']] = sg
security_group_attachments[sg['GroupId']] = []
print("Found {:d} security groups.".format(len(security_groups)))
# Print results
for sg_id, attachments in security_group_attachments.items():
if not attachments:
print("SG {} has no attachments! (Name: {})".format(sg_id, security_groups[sg_id]['GroupName'])) |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.decorators import api_view
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from .models import User, Group, Subject, Tendency
from .models import User_Group, User_Subject, User_Tendency
from .models import Wait
from .serializers import UserSerializer, UserSubjectSerializer, UserTendencySerializer
from. serializers import WaitSerializer, GroupSerializer, UserGroupSerializer
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.http import Http404
# Create your views here.
def index(request):
return render(request, 'rest/index.html', {})
@api_view(['GET', 'POST'])
def user_list(request):
if request.method == 'GET':
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = UserSerializer(data = data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@api_view(['GET', 'PUT', 'DELETE'])
def user_detail(request, pk):
try:
user = User.objects.get(pk=pk)
except(User.DoesNotExist):
return (HttpResponse(status=404))
if (request.method == 'GET'):
serializer = UserSerializer(user)
return (JsonResponse(serializer.data))
elif (request.method == 'PUT'):
data = JSONParser().parse(request)
serializer = UserSerializer(user, data=data)
if serializer.is_valid():
serializer.save()
return (JsonResponse(serializer.data))
return (JsonResponse(serializer.errors, status=404))
elif request.method == 'DELETE':
user.delete()
return (HttpResponse(status=204))
@api_view(['POST'])
def user_login(request):
if (request.method == 'POST'):
data = JSONParser().parse(request)
try:
user = User.objects.filter(auth_id = data['auth_id'])
if (user[0].auth_pw == data['auth_pw']):
serializer = UserSerializer(user[0])
return (Response(data=serializer.data, status=status.HTTP_200_OK))
else:
return (Response(status=status.HTTP_404_NOT_FOUND))
except(User.DoesNotExist):
return (Response(status=status.HTTP_404_NOT_FOUND))
else:
return (Response(status = status.HTTP_400_BAD_REQUEST))
@api_view(['GET', 'POST'])
def choice_subject(request):
if (request.method == 'POST'):
data = JSONParser().parse(request)
user_id = data['id']
list = []
for key in data:
if(key == 'id'):
continue
try:
subject_id = (Subject.objects.get(name=key)).id
if(data[key] == 1):
insert = User_Subject.objects.create(user_id=User.objects.get(pk=user_id), subject_id=Subject.objects.get(pk=subject_id))
list.append(insert)
else:
queryset = User_Subject.objects.all()
queryset = queryset.filter(user_id=User.objects.get(pk=user_id), subject_id=Subject.objects.get(pk=subject_id))
queryset.delete()
except:
continue
return Response(status=status.HTTP_200_OK)
elif (request.method == 'GET'):
user_subject = User_Subject.objects.all()
serializer = UserSubjectSerializer(user_subject, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['POST', 'GET'])
def choice_tendency(request):
if (request.method=='POST'):
data = JSONParser().parse(request)
user_id = data['id']
user = User.objects.get(pk=user_id)
try:
queryset = User_Tendency.objects.filter(user_id=user)
queryset.delete()
except:
print('user(',user_id,') choose tendencies.')
try:
insert = User_Tendency.objects.create(user_id=user, rule=data['규칙'], learning=data['학습량'], \
numberPeople=data['인원'], friendship=data['친목'], environment=data['환경'], style=data['스타일'])
except:
return Response(status=status.HTTP_406_NOT_ACCEPTABLE)
return Response(status=status.HTTP_200_OK)
elif (request.method=='GET'):
user_tendency = User_Tendency.objects.all()
serializer = UserTendencySerializer(user_tendency, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
# 매칭 구현을 위한 뷰 (사용 할지 말지는 미지수)
class FindGroup(APIView):
def get(self, request):
waiter = Wait.objects.all()
serializer = WaitSerializer(waiter, many=True)
return Response(serializer.data)
def post(self, request):
data = JSONParser().parse(request)
try:
user = User.objects.get(pk=data['id'])
except User.DoesNotExist:
return Http404
Wait.objects.filter(user=user).delete()
Wait.objects.create(user=user)
return Response(status=status.HTTP_201_CREATED)
# 매칭 구현을 위한 뷰 (사용 할지 말지는 미지수)
class FindGroupDetail(APIView):
def get_object(self, pk):
try:
return Wait.objects.get(pk=pk)
except Wait.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
waiter = self.get_object(pk)
serializer = WaitSerializer(waiter)
return Response(serializer.data)
def delete(self, request, pk, format=None):
waiter = self.get_object(pk)
waiter.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# 방 목록, 생성 클래스
class group_list(APIView):
def get(self, request):
groups = Group.objects.filter(public=True).order_by("-created_date")
serializer = GroupSerializer(groups, many=True)
return Response(serializer.data)
def post(self, request):
data = JSONParser().parse(request)
serializer = GroupSerializer(data=data)
if (serializer.is_valid()):
serializer.save()
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(status.HTTP_406_NOT_ACCEPTABLE)
# 방 가입, 삭제 클래스
class group_detail(APIView):
def get_object(self, pk):
try:
return Group.objects.get(pk=pk)
except Group.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk):
group = self.get_object(pk)
serializer = GroupSerializer(group)
return Response(data = serializer.data, status = status.HTTP_200_OK)
def delete(self, request, pk):
obj = self.get_object(pk)
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# 스터디 그룹 가입 함수.
@api_view(['POST'])
def join_group(request):
if (request.method != 'POST'):
return Response(status=status.HTTP_400_BAD_REQUEST)
data = JSONParser().parse(request)
user_id = data['user_id']
group_id = data['group_id']
try:
user = User.objects.get(pk=user_id)
group = Group.objects.get(pk=group_id)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
num_of_people = group.num_people
max_of_people = group.max_num_people
if(num_of_people<max_of_people):
obj, created = User_Group.objects.update_or_create(user=user, group=group)
if(created):
group.num_people += 1
group.save()
except:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_201_CREATED)
class UserGroupList(APIView):
def get_object(self, user_pk, group_pk):
try:
print(user_pk, group_pk)
user = User.objects.get(pk= user_pk)
group = Group.objects.get(pk= group_pk)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except Group.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return user, group
def get(self, request, user_pk, group_pk):
user, group = self.get_object(user_pk, group_pk)
try:
user_group = User_Group.objects.filter(user = user).get(group = group)
return Response(data=200, status=status.HTTP_200_OK)
except User_Group.DoesNotExist:
return Response(data=404, status=status.HTTP_404_NOT_FOUND)
def delete(self, request, user_pk, group_pk):
user, group = self.get_object(user_pk, group_pk)
try:
user_group = User_Group.objects.filter(user = user).get(group = group)
except User_Group.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
group = user_group.group
user_group.delete()
group.num_people -= 1
group.save()
return Response(status=status.HTTP_204_NO_CONTENT)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
class UserGroupListUser(APIView):
def get_object(self, pk):
try:
user = User.objects.get(pk = pk)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return user
def get(self, request, pk):
user = self.get_object(pk)
try:
user_group = User_Group.objects.filter(user = user)
list_id = []
for obj in user_group:
list_id.append(obj.group.id)
group = Group.objects.filter(pk__in = list_id).order_by("-created_date")
serializer = GroupSerializer(group, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except User_Group.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND) |
import glob
import shutil
import os
def timer(func):
"""Decorator to print how long a function runs."""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
t_total = time.time() - start
print(f"{func.__name__} took {round(t_total,2)}s")
return result
return wrapper
def data_count(classtype, filenames):
if classtype == 0:
return sum(map(lambda x: "NORMAL" in x, filenames))
elif classtype == 1:
return sum(map(lambda x: "PNEUMONIA" in x, filenames))
@timer
def move_files(filelist, dest):
files = glob.glob(f"{dest}*")
for f in files:
shutil.rmtree(f)
for f in filelist:
part = f.split(os.path.sep)[-2]
if not os.path.exists(f"{dest}{part}/"):
os.makedirs(f"{dest}{part}/")
shutil.copy(f, f"{dest}{part}/")
|
#import csv
import cv2
import time
import random
import imutils
import numpy as np
from PIL import ImageGrab
import pydirectinput as pdi
from PIL import Image, ImageOps
say = 0
global soltara
soltara = 63
global sagtara
sagtara = 64
def tara():
global sagtara
sagtara = 64
global soltara
soltara = 64
pixel = img[30, sagtara]
minLineLength = 5
maxLineGap = 1
lines = cv2.HoughLinesP(img,1,np.pi/180,100,minLineLength,maxLineGap)
if lines is not None:
list1 = []
list2 = []
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
list1.append((x1,y1))
list2.append((x2,y2))
for i in range(0, int(len(list1) / 2)):
sequence = [i for i in range(0, len(list2))]
q = random.choice(sequence)
mesafe = list1[i][0] - list2[q][0]
if mesafe < 0:
mesafe = mesafe * -1
if mesafe < 20:
cv2.line(img,list1[i],list2[q],(255,255,255),2)
while sagtara < 127:
pixel = img[50, sagtara]
if pixel == 0:
sagtara = sagtara + 1
else:
break
while soltara > 1:
pixel = img[50, soltara]
if pixel == 0:
soltara = soltara - 1
else:
break
while(True):
global img
global gray
# Capture frame-by-frame
img = ImageGrab.grab(bbox=(0,0,1280,720)) #ets tam ekran
img = np.array(img)
height, width, channels = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
img = cv2.Canny(img, 100, 200)
img = img[int(height / 3):int((height / 3) * 2), int((width / 8) * 2):int((width / 8) * 6)]
height, width = img.shape
img = img[int((height / 8) * 6):height, 0:width]
img = cv2.resize(img, (128, 128))
kernel = np.ones((5,2),np.uint8)
img = cv2.dilate(img,kernel,iterations = 3)
if say > 100:
tara()
if say == 200:
print('\a')
say = say + 1
cikti = cv2.line(img,(soltara, 50), (sagtara, 50), (255, 255, 255), 1)
orta = int((soltara + sagtara) / 2)
#sure = int((64 - orta) / 100)
#if sure < 0:
# sure = sure * -1
if say > 100:
if soltara < 5 or sagtara > 123:
print("x")
else:
if orta < 60:
pdi.press("a")
elif orta > 68:
pdi.press("d")
# Display the resulting frame
cikti = cv2.resize(cikti, (512, 512))
cv2.imshow('wow',cikti)
#cv2.imshow('wow2',araba)
if sagtara == 128:
sagtara = 64
if soltara == 0:
soltara = 64
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
import tweepy
import csv
data=open('az21.txt','w',encoding='utf-8')
consumer_key = "nvEV4sEBSWM3HjwkcPu9ug6VR"
consumer_secret = "3I6VFDNLbRGGkq7um1RqouLFs7EArViu3KoKMdN72QzN2i7Mwm"
access_key = "1086269917295390720-fc1DqWryLNZIIibNiHsJ0yVdAqJiYn"
access_secret = "ITWTam10QS07n5dQ4JNNpIfJedAiHtrhiRY2xw0k0gZsh"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
with open('wiki_name.csv','r',encoding='UTF-8') as inp, open('tweeter_screen_names.csv','w') as out:
writer=csv.writer(out)
for row in csv.reader(inp):
# print(row[0])
if row:
try:
user =api.search_users(row[0])
temp=[]
flag=1
for i in user:
if i.verified== True:
temp.append(i.screen_name)
flag=0
if flag:
print(row[0],"not_varified")
print(temp)
except Exception as e:
print(e)
# public_tweets = api.home_timeline()
# for tweet in public_tweets:
# print(tweet.text)
# data.write(str(user))
# print(str(user))
# print(i.screen_name)
# print(i.screen_name)
# user = api.get_user('narendramodi')
# was=str(user)
# # print(user)
# data.write(was)
# # data.close
# print('done')
# # print(user.screen_name)
# # print(user.followers_count)
# # for friend in user.friends():
# # print(friend.screen_name) |
import tempfile, os
from subprocess import check_output, PIPE, Popen, STDOUT
from printSubprocessStdout import printSubprocessStdout
from shlex import quote
def add(newEntry):
print('Trying to save entry {} to crontab.'.format(newEntry))
fd, path = tempfile.mkstemp()
process = Popen('crontab -l > ' + quote(path), stdout=PIPE, stderr=PIPE, shell=True)
out, err = process.communicate()
printSubprocessStdout(out)
printSubprocessStdout(err)
code = process.returncode
if code != 0 and code != 1:
raise Exception(
'Unrecognized issue when reading existing crontab entries (code {}).'.format(code))
with open(path, 'a') as f:
f.write(newEntry)
f.write('\n')
process = Popen('crontab ' + quote(path), stdout=PIPE, stderr=PIPE, shell=True) # Array form of Popen cannot be used here, a direct call to load new crontab was needed.
out, err = process.communicate()
printSubprocessStdout(out)
printSubprocessStdout(err)
code = process.returncode
if code != 0:
raise Exception('Cannot save new cron job.')
print('New cron entry saved.')
os.remove(path)
print('Making sure rsyslog is running for logging. If not, it will be started.')
printSubprocessStdout(
check_output('service rsyslog status || service rsyslog start', shell=True))
print('Making sure cron service is running. If not, it will be started.')
printSubprocessStdout(
check_output('service cron status || service cron start', shell=True))
|
#!/usr/bin/env python
# coding: utf-8
# # Point Operation
# ## Modifying Image Intensity
# In[1]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
imBGR = cv2.imread('p1.jpg')
imRGB = cv2.cvtColor(imBGR, cv2.COLOR_BGR2RGB) # change image pattern into RGB
imGRAY = cv2.cvtColor(imRGB, cv2.COLOR_RGB2GRAY) # change image pattern into Gray scale
imRGB_int16 = np.int16(imRGB) # extend values range by changing a data type into int-16-bit for further operation
imGRAY_int16 = np.int16(imGRAY)
plt.imshow(imRGB),plt.title("Original Image")
#plt.imshow(imGRAY,'gray')
plt.show()
# ### Contrast Adjustment (with limiting the result by clamping)
# In[2]:
contr_im = imRGB_int16 * 1.5 # increase contrast by factor = 1.5
contr_im_clip = np.clip(contr_im, 0, 255) # limit values in range (0,255)
contr_im_uint8 = np.uint8(contr_im_clip) # change a data type into unsigned-int-8-bit for image show
#p1_rgb_contr = plt.imshow(contr_im_uint8)
plt.subplot(2,2,1),plt.imshow(imRGB)
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(contr_im_uint8)
plt.title("Contrast Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Contrast Image Histogram
# In[3]:
plt.subplot(3,1,1),plt.hist(imRGB.ravel(),256,[0,256])
plt.title("Original Image")#, plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(contr_im_uint8.ravel(),256,[0,256])
plt.title("Contrast Image")#, plt.xticks([]),plt.yticks([])
plt.show()
# ### Brightness Adjustment (with limiting the result by clamping)
# In[4]:
bright_im = imRGB_int16 + 20 # increase brightness by adding a value
bright_im_clip = np.clip(bright_im, 0, 255) # limit values in range (0,255)
bright_im_uint8 = np.uint8(bright_im_clip)
#p1_rgb_bright = plt.imshow(bright_im_uint8)
plt.subplot(2,2,1),plt.imshow(imRGB)
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(bright_im_uint8)
plt.title("Brightness Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Brightness Image Histogram
# In[5]:
plt.subplot(3,1,1),plt.hist(imRGB.ravel(),256,[0,256])
plt.title("Original Image")#, plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(bright_im_uint8.ravel(),256,[0,256])
plt.title("Brightness Image")#, plt.xticks([]),plt.yticks([])
plt.show()
# ### Inverting Image
# In[6]:
inv_im = -imRGB + 255 # reverse the order of pixel values
#p1_rgb_inv = plt.imshow(inv_im)
plt.subplot(2,2,1),plt.imshow(imRGB)
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(inv_im)
plt.title("Inverting Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Inverting Image Histogram
# In[7]:
plt.subplot(3,1,1),plt.hist(imRGB.ravel(),256,[0,256])
plt.title("Original Image")#, plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(inv_im.ravel(),256,[0,256])
plt.title("Inverting Image")#, plt.xticks([]),plt.yticks([])
plt.show()
# ### Threshold Operation
# In[8]:
ret, th_im = cv2.threshold(imGRAY,60,255,cv2.THRESH_BINARY) # modify image with threshold value = 60
#p1_gray_th = plt.imshow(th_im, 'gray')
plt.subplot(2,2,1),plt.imshow(imGRAY, 'gray')
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(th_im, 'gray')
plt.title("Threshold Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Threshold Image Histogram
# In[9]:
plt.subplot(3,1,1),plt.hist(imGRAY.ravel(),256,[0,256])
plt.title("Original Image"), #plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(th_im.ravel(),256,[0,256])
plt.title("Threshold Image"), #plt.xticks([]),plt.yticks([])
plt.show()
# ### Auto-contrast
# In[10]:
max_val = 0
min_val = 255
for i in range(0, imGRAY.shape[0]):
if max(imGRAY[i]) > max_val:
max_val = max(imGRAY[i])
print(max_val)
for j in range(0, imGRAY.shape[0]):
if min(imGRAY[j]) < min_val:
min_val = min(imGRAY[j])
print(min_val)
# In[11]:
# plot histogram of the original gray scale image
plt.hist(imGRAY.ravel(),256,[0,256]), plt.title("Original Image")
plt.show()
# In[12]:
# from the image histogram of 8-bit gray image(amin = 0, amax = 255), define alow =0 and ahigh = 220
alow = 0; ahigh = 220; amin = 0; amax = 255
auto_contr_factor = (amax-amin)/(ahigh-alow)
auto_contr_im = amin + ((imGRAY_int16 - alow) * auto_contr_factor)
auto_contr_im_clip = np.clip(auto_contr_im, 0, 255)
auto_contr_im_uint8 = np.uint8(auto_contr_im_clip)
#p1_gray_auto_contr = plt.imshow(auto_contr_im_uint8)
plt.subplot(2,2,1),plt.imshow(imGRAY, 'gray')
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(auto_contr_im_uint8, 'gray')
plt.title("Auto-Contrast Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Auto-Contrast Histogram
# In[13]:
plt.subplot(3,1,1),plt.hist(imGRAY.ravel(),256,[0,256])
plt.title("Original Image"), #plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(auto_contr_im_uint8.ravel(),256,[0,256])
plt.title("Auto-Contrast Image"), #plt.xticks([]),plt.yticks([])
plt.show()
# ### Modified Auto-contrast
# In[14]:
# find a histogram of the gray scale image
hist,bins = np.histogram(imGRAY.ravel(),256,[0,256])
# In[15]:
#find new alow and ahigh values by saturating both end of image histogram 0.5%
def lowhigh_vals():
ql = 0
for i in range(len(hist)):
ql = ql + hist[i]
if ql >= 0.005*sum(hist):
break
qh = sum(hist)
for j in range(len(hist)):
qh = qh - hist[j]
if qh <= 0.005*sum(hist):
break
return i,j
print("(alown, ahighn) = ",lowhigh_vals())
# In[16]:
# using alown and ahighn from a function: lowhigh_vals()
alown = 4; ahighn = 223; amin = 0; amax = 255
Mod_auto_contr_factor = (amax-amin)/(ahighn-alown)
Mod_auto_contr_im = amin + ((imGRAY_int16 - alown) * auto_contr_factor)
Mod_auto_contr_im_clip = np.clip(Mod_auto_contr_im, 0, 255)
Mod_auto_contr_im_uint8 = np.uint8(Mod_auto_contr_im_clip)
#p1_gray_Mod_auto_contr = plt.imshow(Mod_auto_contr_im_uint8)
plt.subplot(2,2,1),plt.imshow(imGRAY, 'gray')
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(Mod_auto_contr_im_uint8, 'gray')
plt.title("Modified Auto-Contrast Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Modified Auto-Contrast Histogram
# In[17]:
plt.subplot(3,1,1),plt.hist(imGRAY.ravel(),256,[0,256])
plt.title("Original Image"), #plt.xticks([]),plt.yticks([])
plt.subplot(3,1,3),plt.hist(Mod_auto_contr_im_uint8.ravel(),256,[0,256])
plt.title("Modified Auto-Contrast Image"), #plt.xticks([]),plt.yticks([])
plt.show()
# ### Histogram Equalization
# In[18]:
hist_imGRAY,bins = np.histogram(imGRAY.flatten(),256,[0,256])
cum_hist = hist_imGRAY.cumsum() # cumulative histogram
cum_hist_normalized = cum_hist * hist_imGRAY.max()/ cum_hist.max() # cumulative histogram normalization
plt.hist(imGRAY.flatten(),256,[0,256])
plt.plot(cum_hist_normalized, color = 'g')
plt.xlim([0,256])
plt.legend(('image histogram','cdf'), loc = 'upper left')
plt.show()
# In[19]:
# Histogram Equalization Function(hist_eq)
hist_eq = cum_hist*255/cum_hist.max() # cum_hist.max() is an image size (M*N)
hist_eq_uint8 = np.uint8(hist_eq)
# In[20]:
imGRAY_eq = hist_eq_uint8[imGRAY]
hist_imGRAY_eq,bins = np.histogram(imGRAY_eq.flatten(),256,[0,256])
cum_hist_eq = hist_imGRAY_eq.cumsum()
cum_hist_eq_normalized = cum_hist_eq * hist_imGRAY_eq.max()/ cum_hist_eq.max()
plt.plot(cum_hist_eq_normalized, color = 'g')
plt.hist(imGRAY_eq.flatten(),256,[0,256])
plt.xlim([0,256])
plt.legend(('equalized histogram','cdf'), loc = 'upper left')
plt.show()
# In[21]:
plt.subplot(2,2,1),plt.imshow(imGRAY, 'gray')
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(imGRAY_eq, 'gray')
plt.title("Equalized Image"), plt.xticks([]),plt.yticks([])
plt.show()
# ### Histogram Specification
# In[22]:
hist_imGRAY,bins = np.histogram(imGRAY.flatten(),256,[0,256])
cdf = hist_imGRAY.cumsum()
cdf_normalized = cdf / cdf.max()
plt.plot(cdf_normalized, color = 'r')
plt.xlim([0,256])
plt.legend(('cdf',), loc = 'upper left')
plt.show()
# In[23]:
# Piecewise Linear Distribution
a = [0,51,102,153,204,255]
PL = cdf_normalized
for i in range(0,256):
if a[0] <= i < a[1]:
PL[i] = PL[a[0]] + ((i-a[0])*(PL[a[1]] - PL[a[0]])/(a[1]-a[0]))
elif a[1] <= i < a[2]:
PL[i] = PL[a[1]] + ((i-a[1])*(PL[a[2]] - PL[a[1]])/(a[2]-a[1]))
elif a[2] <= i < a[3]:
PL[i] = PL[a[2]] + ((i-a[2])*(PL[a[3]] - PL[a[2]])/(a[3]-a[2]))
elif a[3] <= i < a[4]:
PL[i] = PL[a[3]] + ((i-a[3])*(PL[a[4]] - PL[a[3]])/(a[4]-a[3]))
elif a[4] <= i < a[5]:
PL[i] = PL[a[4]] + ((i-a[4])*(PL[a[5]] - PL[a[4]])/(a[5]-a[4]))
else:
PL[i] = 1
# In[24]:
plt.plot(PL, color = 'b')
plt.plot(cdf_normalized, color = 'r')
plt.xlim([0,256])
plt.legend(('Piecewise Linear Distribution','cdf'), loc = 'upper left')
plt.show()
# In[25]:
imGRAY_sp = PL[imGRAY]
# In[26]:
plt.subplot(2,2,1),plt.imshow(imGRAY, 'gray')
plt.title("Original Image"), plt.xticks([]),plt.yticks([])
plt.subplot(2,2,2),plt.imshow(imGRAY_sp, 'gray')
plt.title("Specified Image"), plt.xticks([]),plt.yticks([])
plt.show()
|
def bubblesort(arr):
n = len(arr)
for i in range(1):
for j in range(0,n-i-1):
if arr[j]>arr[j+1] :
arr[j],arr[j+1] = arr[j+1],arr[j]
return arr
arr = [2,15,1,11,7,-2,15,12,-8,0]
print('beforesort{}'.format(arr))
print('aftersort{}'.format(bubblesort(arr))) |
height=float(input("Enter your height in inches: "))
weight=float(input("Enter your weight in pounds: "))
BMI=(720*weight)/(height**2)
if 19 < BMI < 25:
print("Your BMI is within the healthy range")
elif BMI > 25:
print("You have a BMI above the healthy range")
else:
print("You have a BMI below the healthy range")
|
#!/usr/bin/env python3
# Advent of code Year 2019 Day 13 solution
# Author = seven
# Date = December 2019
import sys
from os import path
sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))
from shared import vm
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
input = input_file.read()
class AnalyzePoint(vm.VM):
def __init__(self, program: str, drone_pos: tuple):
self.drone_pos = drone_pos
self.input_mode = 0
super().__init__(program=program, input=vm.IO(), output=vm.IO())
def load_from_input(self, a: vm.Param):
self.input.value = self.drone_pos[self.input_mode]
self.input_mode = (self.input_mode+1) % 2
super().load_from_input(a)
def paint_state(tractor_beam, min_x, max_x, min_y, max_y, square_pos, square_size):
for y in range(min_y, max_y + 1):
line = ''
for x in range(min_x, max_x + 1):
if x >= square_pos[0] and x < square_pos[0] + square_size and y >= square_pos[1] and y < square_pos[1] + square_size:
line += '0'
else:
line += '#' if (x, y) in tractor_beam else '.'
print(line)
tractor_beam = set()
max_x = 49
max_y = 15000
square_size = 100
x_low_bound = 0
x_high_bound = max_x
x_high_bound_calibrated = False
highest_x = -1
highest_y = -1
square_pos = None
tracted_in_50_50 = None
# These high / low bounds could be kept tighter to speed up, but kept it loose...
for y in range(max_y + 1):
highest_y = y
if square_pos is not None:
break
if tracted_in_50_50 is None and y >= 50:
tracted_in_50_50 = len(tractor_beam)
print('Check range {} to {} for y: {}'.format(x_low_bound, x_high_bound, y))
is_first_in_line_found = False
for x in range(x_low_bound, x_high_bound + 1):
if is_first_in_line_found and x <= highest_x:
tractor_beam.add((x, y))
else:
program = AnalyzePoint(program=input, drone_pos=(x, y))
program.run()
if program.output.value == 0:
continue
is_first_in_line_found = True
if not x_high_bound_calibrated and not (0, 0) == (x, y):
highest_x = x
x_high_bound = x + 2
x_high_bound_calibrated = True
if x > 0 and (x-1, y) not in tractor_beam:
x_low_bound = x
if x >= x_high_bound:
x_high_bound += 2
if x > highest_x:
highest_x = x
tractor_beam.add((x, y))
# At this point we've added a tracted position, check if square fits
if x - x_low_bound >= square_size - 1:
top_right = (x, y - square_size + 1)
if top_right in tractor_beam:
square_pos = (x_low_bound, y - square_size + 1)
break
print('Part One: {0}'.format(tracted_in_50_50))
x, y = square_pos
print('Part Two: {}'.format(x * 10**4 + y))
print(square_pos)
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class SummaryDriveDriveItem(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SummaryDriveDriveItem - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'access_latency': 'float',
'access_slow': 'float',
'busy': 'float',
'bytes_in': 'float',
'bytes_out': 'float',
'drive_id': 'str',
'iosched_latency': 'float',
'iosched_queue': 'float',
'time': 'int',
'type': 'str',
'used_bytes_percent': 'float',
'used_inodes': 'float',
'xfer_size_in': 'float',
'xfer_size_out': 'float',
'xfers_in': 'float',
'xfers_out': 'float'
}
self.attribute_map = {
'access_latency': 'access_latency',
'access_slow': 'access_slow',
'busy': 'busy',
'bytes_in': 'bytes_in',
'bytes_out': 'bytes_out',
'drive_id': 'drive_id',
'iosched_latency': 'iosched_latency',
'iosched_queue': 'iosched_queue',
'time': 'time',
'type': 'type',
'used_bytes_percent': 'used_bytes_percent',
'used_inodes': 'used_inodes',
'xfer_size_in': 'xfer_size_in',
'xfer_size_out': 'xfer_size_out',
'xfers_in': 'xfers_in',
'xfers_out': 'xfers_out'
}
self._access_latency = None
self._access_slow = None
self._busy = None
self._bytes_in = None
self._bytes_out = None
self._drive_id = None
self._iosched_latency = None
self._iosched_queue = None
self._time = None
self._type = None
self._used_bytes_percent = None
self._used_inodes = None
self._xfer_size_in = None
self._xfer_size_out = None
self._xfers_in = None
self._xfers_out = None
@property
def access_latency(self):
"""
Gets the access_latency of this SummaryDriveDriveItem.
The average operation latency.
:return: The access_latency of this SummaryDriveDriveItem.
:rtype: float
"""
return self._access_latency
@access_latency.setter
def access_latency(self, access_latency):
"""
Sets the access_latency of this SummaryDriveDriveItem.
The average operation latency.
:param access_latency: The access_latency of this SummaryDriveDriveItem.
:type: float
"""
self._access_latency = access_latency
@property
def access_slow(self):
"""
Gets the access_slow of this SummaryDriveDriveItem.
The rate of slow (long-latency) operations.
:return: The access_slow of this SummaryDriveDriveItem.
:rtype: float
"""
return self._access_slow
@access_slow.setter
def access_slow(self, access_slow):
"""
Sets the access_slow of this SummaryDriveDriveItem.
The rate of slow (long-latency) operations.
:param access_slow: The access_slow of this SummaryDriveDriveItem.
:type: float
"""
self._access_slow = access_slow
@property
def busy(self):
"""
Gets the busy of this SummaryDriveDriveItem.
The percentage of time the drive was busy.
:return: The busy of this SummaryDriveDriveItem.
:rtype: float
"""
return self._busy
@busy.setter
def busy(self, busy):
"""
Sets the busy of this SummaryDriveDriveItem.
The percentage of time the drive was busy.
:param busy: The busy of this SummaryDriveDriveItem.
:type: float
"""
self._busy = busy
@property
def bytes_in(self):
"""
Gets the bytes_in of this SummaryDriveDriveItem.
The rate of bytes written.
:return: The bytes_in of this SummaryDriveDriveItem.
:rtype: float
"""
return self._bytes_in
@bytes_in.setter
def bytes_in(self, bytes_in):
"""
Sets the bytes_in of this SummaryDriveDriveItem.
The rate of bytes written.
:param bytes_in: The bytes_in of this SummaryDriveDriveItem.
:type: float
"""
self._bytes_in = bytes_in
@property
def bytes_out(self):
"""
Gets the bytes_out of this SummaryDriveDriveItem.
The rate of bytes read.
:return: The bytes_out of this SummaryDriveDriveItem.
:rtype: float
"""
return self._bytes_out
@bytes_out.setter
def bytes_out(self, bytes_out):
"""
Sets the bytes_out of this SummaryDriveDriveItem.
The rate of bytes read.
:param bytes_out: The bytes_out of this SummaryDriveDriveItem.
:type: float
"""
self._bytes_out = bytes_out
@property
def drive_id(self):
"""
Gets the drive_id of this SummaryDriveDriveItem.
Drive ID LNN:bay.
:return: The drive_id of this SummaryDriveDriveItem.
:rtype: str
"""
return self._drive_id
@drive_id.setter
def drive_id(self, drive_id):
"""
Sets the drive_id of this SummaryDriveDriveItem.
Drive ID LNN:bay.
:param drive_id: The drive_id of this SummaryDriveDriveItem.
:type: str
"""
self._drive_id = drive_id
@property
def iosched_latency(self):
"""
Gets the iosched_latency of this SummaryDriveDriveItem.
The average time spent in the I/O scheduler.
:return: The iosched_latency of this SummaryDriveDriveItem.
:rtype: float
"""
return self._iosched_latency
@iosched_latency.setter
def iosched_latency(self, iosched_latency):
"""
Sets the iosched_latency of this SummaryDriveDriveItem.
The average time spent in the I/O scheduler.
:param iosched_latency: The iosched_latency of this SummaryDriveDriveItem.
:type: float
"""
self._iosched_latency = iosched_latency
@property
def iosched_queue(self):
"""
Gets the iosched_queue of this SummaryDriveDriveItem.
The average length of the I/O scheduler queue.
:return: The iosched_queue of this SummaryDriveDriveItem.
:rtype: float
"""
return self._iosched_queue
@iosched_queue.setter
def iosched_queue(self, iosched_queue):
"""
Sets the iosched_queue of this SummaryDriveDriveItem.
The average length of the I/O scheduler queue.
:param iosched_queue: The iosched_queue of this SummaryDriveDriveItem.
:type: float
"""
self._iosched_queue = iosched_queue
@property
def time(self):
"""
Gets the time of this SummaryDriveDriveItem.
Unix Epoch time in seconds of the request.
:return: The time of this SummaryDriveDriveItem.
:rtype: int
"""
return self._time
@time.setter
def time(self, time):
"""
Sets the time of this SummaryDriveDriveItem.
Unix Epoch time in seconds of the request.
:param time: The time of this SummaryDriveDriveItem.
:type: int
"""
self._time = time
@property
def type(self):
"""
Gets the type of this SummaryDriveDriveItem.
The type of the drive.
:return: The type of this SummaryDriveDriveItem.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this SummaryDriveDriveItem.
The type of the drive.
:param type: The type of this SummaryDriveDriveItem.
:type: str
"""
self._type = type
@property
def used_bytes_percent(self):
"""
Gets the used_bytes_percent of this SummaryDriveDriveItem.
The percent of blocks used on the drive.
:return: The used_bytes_percent of this SummaryDriveDriveItem.
:rtype: float
"""
return self._used_bytes_percent
@used_bytes_percent.setter
def used_bytes_percent(self, used_bytes_percent):
"""
Sets the used_bytes_percent of this SummaryDriveDriveItem.
The percent of blocks used on the drive.
:param used_bytes_percent: The used_bytes_percent of this SummaryDriveDriveItem.
:type: float
"""
self._used_bytes_percent = used_bytes_percent
@property
def used_inodes(self):
"""
Gets the used_inodes of this SummaryDriveDriveItem.
The number of inodes allocated on the drive.
:return: The used_inodes of this SummaryDriveDriveItem.
:rtype: float
"""
return self._used_inodes
@used_inodes.setter
def used_inodes(self, used_inodes):
"""
Sets the used_inodes of this SummaryDriveDriveItem.
The number of inodes allocated on the drive.
:param used_inodes: The used_inodes of this SummaryDriveDriveItem.
:type: float
"""
self._used_inodes = used_inodes
@property
def xfer_size_in(self):
"""
Gets the xfer_size_in of this SummaryDriveDriveItem.
The average size of write operations.
:return: The xfer_size_in of this SummaryDriveDriveItem.
:rtype: float
"""
return self._xfer_size_in
@xfer_size_in.setter
def xfer_size_in(self, xfer_size_in):
"""
Sets the xfer_size_in of this SummaryDriveDriveItem.
The average size of write operations.
:param xfer_size_in: The xfer_size_in of this SummaryDriveDriveItem.
:type: float
"""
self._xfer_size_in = xfer_size_in
@property
def xfer_size_out(self):
"""
Gets the xfer_size_out of this SummaryDriveDriveItem.
The average size of read operations.
:return: The xfer_size_out of this SummaryDriveDriveItem.
:rtype: float
"""
return self._xfer_size_out
@xfer_size_out.setter
def xfer_size_out(self, xfer_size_out):
"""
Sets the xfer_size_out of this SummaryDriveDriveItem.
The average size of read operations.
:param xfer_size_out: The xfer_size_out of this SummaryDriveDriveItem.
:type: float
"""
self._xfer_size_out = xfer_size_out
@property
def xfers_in(self):
"""
Gets the xfers_in of this SummaryDriveDriveItem.
The rate of write operations.
:return: The xfers_in of this SummaryDriveDriveItem.
:rtype: float
"""
return self._xfers_in
@xfers_in.setter
def xfers_in(self, xfers_in):
"""
Sets the xfers_in of this SummaryDriveDriveItem.
The rate of write operations.
:param xfers_in: The xfers_in of this SummaryDriveDriveItem.
:type: float
"""
self._xfers_in = xfers_in
@property
def xfers_out(self):
"""
Gets the xfers_out of this SummaryDriveDriveItem.
The rate of read operations.
:return: The xfers_out of this SummaryDriveDriveItem.
:rtype: float
"""
return self._xfers_out
@xfers_out.setter
def xfers_out(self, xfers_out):
"""
Sets the xfers_out of this SummaryDriveDriveItem.
The rate of read operations.
:param xfers_out: The xfers_out of this SummaryDriveDriveItem.
:type: float
"""
self._xfers_out = xfers_out
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# thirdorder, help compute anharmonic IFCs from minimal sets of displacements
# Copyright (C) 2012-2014 Wu Li <wu.li.phys2011@gmail.com>
# Copyright (C) 2012-2014 Jesús Carrete Montaña <jcarrete@gmail.com>
# Copyright (C) 2012-2014 Natalio Mingo Bisquert <natalio.mingo@cea.fr>
# Copyright (C) 2014 Antti J. Karttunen <antti.j.karttunen@iki.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import os.path
import copy
import itertools
import contextlib
try:
import cStringIO as StringIO
except ImportError:
import StringIO
try:
import hashlib
hashes=True
except ImportError:
hashes=False
import numpy as np
import scipy as sp
import scipy.linalg
import scipy.spatial
import scipy.spatial.distance
H=1e-3 # Magnitude of the finite displacements, in nm.
SYMPREC=1e-5 # Tolerance for symmetry search
sowblock="""
.d88888b .88888. dP dP dP
88. "' d8' `8b 88 88 88
`Y88888b. 88 88 88 .8P .8P
`8b 88 88 88 d8' d8'
d8' .8P Y8. .8P 88.d8P8.d8P
Y88888P `8888P' 8888' Y88'
ooooooooooooooooooooooooooooooooo
"""
reapblock="""
888888ba 88888888b .d888888 888888ba
88 `8b 88 d8' 88 88 `8b
a88aaaa8P' a88aaaa 88aaaaa88a a88aaaa8P'
88 `8b. 88 88 88 88
88 88 88 88 88 88
dP dP 88888888P 88 88 dP
oooooooooooooooooooooooooooooooooooooooooooo
"""
doneblock="""
888888ba .88888. 888888ba 88888888b
88 `8b d8' `8b 88 `8b 88
88 88 88 88 88 88 a88aaaa
88 88 88 88 88 88 88
88 .8P Y8. .8P 88 88 88
8888888P `8888P' dP dP 88888888P
ooooooooooooooooooooooooooooooooooooooooo
"""
@contextlib.contextmanager
def dir_context(directory):
"""
Context manager used to run code in another directory.
"""
curdir=os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(curdir)
def gen_SPOSCAR(poscar,na,nb,nc):
"""
Create a dictionary similar to the first argument but describing a
supercell.
"""
nruter=dict()
nruter["na"]=na
nruter["nb"]=nb
nruter["nc"]=nc
nruter["lattvec"]=np.array(poscar["lattvec"])
nruter["lattvec"][:,0]*=na
nruter["lattvec"][:,1]*=nb
nruter["lattvec"][:,2]*=nc
nruter["elements"]=copy.copy(poscar["elements"])
nruter["numbers"]=na*nb*nc*poscar["numbers"]
nruter["positions"]=np.empty((3,poscar["positions"].shape[1]*na*nb*nc))
pos=0
for pos,(k,j,i,iat) in enumerate(itertools.product(xrange(nc),
xrange(nb),
xrange(na),
xrange(
poscar["positions"].shape[1]))):
nruter["positions"][:,pos]=(poscar["positions"][:,iat]+[i,j,k])/[
na,nb,nc]
nruter["types"]=[]
for i in xrange(na*nb*nc):
nruter["types"].extend(poscar["types"])
return nruter
def calc_dists(sposcar):
"""
Return the distances between atoms in the supercells, their
degeneracies and the associated supercell vectors.
"""
ntot=sposcar["positions"].shape[1]
posi=np.dot(sposcar["lattvec"],sposcar["positions"])
d2s=np.empty((27,ntot,ntot))
for j,(ja,jb,jc) in enumerate(itertools.product(xrange(-1,2),
xrange(-1,2),
xrange(-1,2))):
posj=np.dot(sposcar["lattvec"],(sposcar["positions"].T+[ja,jb,jc]).T)
d2s[j,:,:]=scipy.spatial.distance.cdist(posi.T,posj.T,"sqeuclidean")
d2min=d2s.min(axis=0)
dmin=np.sqrt(d2min)
degenerate=(np.abs(d2s-d2min)<1e-4)
nequi=degenerate.sum(axis=0,dtype=np.intc)
maxequi=nequi.max()
shifts=np.empty((ntot,ntot,maxequi))
sorting=np.argsort(np.logical_not(degenerate),axis=0)
shifts=np.transpose(sorting[:maxequi,:,:],(1,2,0)).astype(np.intc)
return (dmin,nequi,shifts)
def calc_frange(poscar,sposcar,n,dmin):
"""
Return the maximum distance between n-th neighbors in the structure.
"""
natoms=len(poscar["types"])
tonth=[]
warned=False
for i in xrange(natoms):
ds=dmin[i,:].tolist()
ds.sort()
u=[]
for j in ds:
for k in u:
if np.allclose(k,j):
break
else:
u.append(j)
try:
tonth.append(.5*(u[n]+u[n+1]))
except IndexError:
if not warned:
sys.stderr.write(
"Warning: supercell too small to find n-th neighbours\n")
warned=True
tonth.append(1.1*max(u))
return max(tonth)
def move_two_atoms(poscar,iat,icoord,ih,jat,jcoord,jh):
"""
Return a copy of poscar with atom iat displaced by ih nm along
its icoord-th Cartesian coordinate and atom jat displaced by
jh nm along its jcoord-th Cartesian coordinate.
"""
nruter=copy.deepcopy(poscar)
disp=np.zeros(3)
disp[icoord]=ih
nruter["positions"][:,iat]+=scipy.linalg.solve(nruter["lattvec"],
disp)
disp[:]=0.
disp[jcoord]=jh
nruter["positions"][:,jat]+=scipy.linalg.solve(nruter["lattvec"],
disp)
return nruter
def write_ifcs(phifull,poscar,sposcar,dmin,nequi,shifts,frange,filename):
"""
Write out the full anharmonic interatomic force constant matrix,
taking the force cutoff into account.
"""
natoms=len(poscar["types"])
ntot=len(sposcar["types"])
shifts27=list(itertools.product(xrange(-1,2),
xrange(-1,2),
xrange(-1,2)))
frange2=frange*frange
nblocks=0
f=StringIO.StringIO()
for ii,jj in itertools.product(xrange(natoms),
xrange(ntot)):
if dmin[ii,jj]>=frange:
continue
jatom=jj%natoms
shiftsij=[shifts27[i] for i in shifts[ii,jj,:nequi[ii,jj]]]
for kk in xrange(ntot):
if dmin[ii,kk]>=frange:
continue
katom=kk%natoms
shiftsik=[shifts27[i] for i in shifts[ii,kk,:nequi[ii,kk]]]
d2min=np.inf
for shift2 in shiftsij:
carj=np.dot(sposcar["lattvec"],shift2+sposcar["positions"][:,jj])
for shift3 in shiftsik:
cark=np.dot(sposcar["lattvec"],shift3+sposcar["positions"][:,kk])
d2=((carj-cark)**2).sum()
if d2<d2min:
best2=shift2
best3=shift3
d2min=d2
if d2min>=frange2:
continue
nblocks+=1
Rj=np.dot(sposcar["lattvec"],
best2+sposcar["positions"][:,jj]-sposcar["positions"][:,jatom])
Rk=np.dot(sposcar["lattvec"],
best3+sposcar["positions"][:,kk]-sposcar["positions"][:,katom])
f.write("\n")
f.write("{:>5}\n".format(nblocks))
f.write("{0[0]:>15.10e} {0[1]:>15.10e} {0[2]:>15.10e}\n".
format(list(10.*Rj)))
f.write("{0[0]:>15.10e} {0[1]:>15.10e} {0[2]:>15.10e}\n".
format(list(10.*Rk)))
f.write("{:>6d} {:>6d} {:>6d}\n".format(ii+1,jatom+1,katom+1))
for ll,mm,nn in itertools.product(xrange(3),
xrange(3),
xrange(3)):
f.write("{:>2d} {:>2d} {:>2d} {:>20.10e}\n".
format(ll+1,mm+1,nn+1,phifull[ll,mm,nn,ii,jj,kk]))
ffinal=open(filename,"w")
ffinal.write("{:>5}\n".format(nblocks))
ffinal.write(f.getvalue())
f.close()
ffinal.close()
|
from common import base
from time import sleep
import allure
class LoginPage(base.Base):
cancel_update_hint = '//*[contains(@text, "取消")]'
banner_indicators = '//*[@resource-id="com.stage.mpsy.stg:id/layoutIndicators"]'
AE_logo = '//*[@resource-id="com.stage.mpsy.stg:id/imgLogo"]'
login_form = '//*[@content-desc="登录"]'
register_form = '//*[@content-desc="注册"]'
user = '//*[@resource-id="com.stage.mpsy.stg:id/iptEdtAccount"]'
pwd = '//*[@resource-id="com.stage.mpsy.stg:id/iptEdtPassword"]'
remember_me = '//*[@resource-id="com.stage.mpsy.stg:id/chkSaveAccount"]'
forgot_pwd = '//*[@resource-id="com.stage.mpsy.stg:id/txtForgotPassword"]'
login_button = '//*[@resource-id="com.stage.mpsy.stg:id/btnLogin"]'
go_login_page_button = "//*[@resource-id='com.stage.mpsy.stg:id/txtExperienceNow']"
go_browse = '//*[@resource-id="com.stage.mpsy.stg:id/txtGoBrowse"]'
def login(self, account, pwd):
self.cancel_the_hint_with_new_version()
self.skip_ad_page()
self.input_username(account)
self.input_pwd(pwd)
self.click_login_button()
@allure.step('輸入帳號')
def input_username(self, account):
self.find_element(self.user).send_keys(account)
@allure.step('輸入密碼')
def input_pwd(self, pwd):
self.find_element(self.pwd).send_keys(pwd)
@allure.step('檢查錯誤訊息')
def check_error_msg(self, err_msg):
assert self.find_element(f'//*[contains(@text, "{err_msg}")]') is not None
def close_keyboard(self):
self.driver.hide_keyboard()
@allure.step('點擊記住我')
def click_remember_me(self):
self.find_element(self.remeber_me).click()
@allure.step('點擊忘記密碼')
def click_forgot_pwd(self):
self.find_element(self.forgot_pwd).click()
@allure.step('點擊登入, 進入首頁')
def click_login_button(self):
self.find_element(self.login_button).click()
@allure.step('若有新版本提示, 取消他')
def cancel_the_hint_with_new_version(self):
if self.find_element(self.cancel_update_hint):
self.find_element(self.cancel_update_hint).click()
@allure.step('跳過滑動廣告, 進入登入頁面')
def skip_ad_page(self):
assert self.find_element(self.banner_indicators) is not False
[self.slide(direction='swipe left') for i in range(4)]
self.find_element(self.go_login_page_button).click()
@allure.step('點擊註冊表單')
def click_register_form(self):
self.find_element(self.register_form).click()
@allure.step('點擊登入表單')
def click_login_form(self):
self.find_element(self.login_form).click()
@allure.step('點擊先去逛逛, 進入首頁')
def click_go_browse(self):
self.find_element(self.go_browse).click()
|
import logging
import fmcapi
def test__staticroutes(fmc):
logging.info("Testing StaticRoutes class. Requires a registered device")
obj1 = fmcapi.StaticRoutes(fmc=fmc)
obj1.device(device_name="device_name")
logging.info("All StaticRoutes -- >")
result = obj1.get()
logging.info(result)
logging.info(f"Total items: {len(result['items'])}")
del obj1
logging.info("Testing StaticRoutes class done.\n")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 8/21/19
useful functions involving rectangles
adapted from previous rectangle.py, molds.py, and segment.py
xyalw rectangle (or xy for short)
uv rectangle
bounded uv rectangle
"""
from math import atan2, hypot, cos, sin
import numpy as np
" convert uv format to xyalw format"
def uv2xy(rec):
u,v,ulo,uhi,vlo,vhi = rec[:6]
uc = (ulo+uhi)/2.
vc = (vlo+vhi)/2.
return (u*uc+v*vc, v*uc-u*vc, atan2(v,u), (uhi-ulo)/2., (vhi-vlo)/2.)
"""
set up rectangle s.t. vlo > 0 (and if possible ulo > 0)
means that ulo,vlo is the visible corner, and 3 corners are visible if ulo>0
"""
def standardize(rec):
u,v,ulo,uhi,vlo,vhi = rec
assert uhi > ulo and vhi > vlo
if vlo > 0:
return rec if uhi > 0 else (v,-u,vlo,vhi,-uhi,-ulo)
elif vhi < 0:
return (-u,-v,-uhi,-ulo,-vhi,-vlo) if ulo<0 else (-v,u,-vhi,-vlo,ulo,uhi)
elif ulo > 0:
return (-v,u,-vhi,-vlo,ulo,uhi)
elif uhi < 0:
return (v,-u,vlo,vhi,-uhi,-ulo)
else:
raise ValueError("rectangle covers origin")
" convert xyalw format to uv format "
def xy2uv(rec):
x,y,a,l,w = rec
u,v = cos(a), sin(a)
centeru = x*u + y*v
centerv = x*v - y*u
return standardize((u,v,centeru-l,centeru+l,centerv-w,centerv+w))
"""
transform uniform distribution over rectangle bounds to normal distribution
on xyalw rectangle format
angle's variance is set to 0
TODO check that you are dividing by 2 correctly
"""
def uvUniform2xyNormal(msmt):
u,v,maxulo,minuhi,maxvlo,minvhi,minulo,maxuhi,minvlo,maxvhi = msmt
meanuv = np.array((minulo+maxulo,minuhi+maxuhi,minvlo+maxvlo,minvhi+maxvhi))/2
varuv = np.array((maxulo-minulo,maxuhi-minuhi,maxvlo-minvlo,maxvhi-minvhi))
varuv = varuv**2 / 12.
uv2xyTM = np.array(((u,u,v,v),(v,v,-u,-u),(0,0,0,0),(-1,1,0,0),(0,0,-1,1)))/2
xy_mean = uv2xyTM.dot(meanuv)
xy_mean[2] = atan2(v,u)
xy_cov = (uv2xyTM * varuv).dot(uv2xyTM.T)
return xy_mean, xy_cov
"""
determines whether bounded uv rectangle could be a car based on size
if possible, returns tighter bounds fitting car shape
if not returns None
"""
car_dims = ((2.95,4.9),(1.35,1.9),(1.25,2.)) # 99th percentile for kitti scored cars
car_dim_minlen = car_dims[0][0]
car_dim_maxlen = car_dims[0][1]*1.2
car_dim_minwid = car_dims[1][0]
car_dim_maxwid = car_dims[1][1]*1.2
def fitRecToMold(rec):
u,v,maxulo,minuhi,maxvlo,minvhi,minulo,maxuhi,minvlo,maxvhi = rec
# car where u,v is heading
erru, ulo, uhi = _fitInterval(minulo, maxulo, minuhi, maxuhi,
car_dim_minlen, car_dim_maxlen)
errv, vlo, vhi = _fitInterval(minvlo, maxvlo, minvhi, maxvhi,
car_dim_minwid, car_dim_maxwid)
if erru < 0 and errv < 0:
standardcar = (u,v,ulo,uhi,vlo,vhi)
else:
standardcar = None
# car where v,-u is heading
erru, ulo, uhi = _fitInterval(minulo, maxulo, minuhi, maxuhi,
car_dim_minwid, car_dim_maxwid)
errv, vlo, vhi = _fitInterval(minvlo, maxvlo, minvhi, maxvhi,
car_dim_minlen, car_dim_maxlen)
if erru < 0 and errv < 0:
tcar = (u,v,ulo,uhi,vlo,vhi)
else:
tcar = None
return standardcar, tcar
"""
a crazy function that takes rectangle bounds on lo and hi parameters,
and length bounds (hi-lo), and finds the polygon of possible lo-hi values
return polygon (lo,hi) corners in cc order
ultimately abandoned -- getting covariance of poygon would be a huge pain
"""
def _fitInterval(minlo,maxlo,minhi,maxhi, minlen,maxlen):
# helper function bounds rectangle in one direction (interval)
assert maxhi-minlo > minhi-maxlo and maxlen > minlen
# corners sorted by length
if minhi-minlo > maxhi-maxlo:
currentcorners = [(minhi,maxlo),(maxhi,maxlo),(minhi,minlo),(maxhi,minlo)]
else:
currentcorners = [(minhi,maxlo),(minhi,minlo),(maxhi,maxlo),(maxhi,minlo)]
if minlen > maxhi-minlo:
crop = minlen - (maxhi-minlo)
return crop, (minlo-crop/2,maxhi+crop/2)
elif minlen > maxhi-maxlo and minlen > minhi-minlo:
locrop = 3
lowercorners = ((minlo+minlen,minlo), (maxhi,maxhi-minlen))
elif minlen > maxhi-maxlo:
locrop = 2
lowercorners = ((minlo+minlen,minlo), (maxlo+minlen,maxlo))
elif minlen > minhi-minlo:
locrop = 2
lowercorners = ((minhi,minhi-minlen), (maxhi,maxhi-minlen))
elif minlen > minhi-maxlo:
locrop = 1
lowercorners = ((minhi,minhi-minlen), (maxlo+minlen,maxlo))
else:
locrop = 0
lowercorners = []
if maxlen < minhi-maxlo:
crop = minhi-maxlo - maxlen
return crop, (maxlo+crop/2,minhi-crop/2)
elif maxlen < maxhi-maxlo and maxlen < minhi-minlo:
hicrop = 1
uppercorners = ((maxlo+maxlen,maxlo), (minhi,minhi-maxlen))
elif maxlen < maxhi-maxlo:
hicrop = 2
uppercorners = ((maxhi,maxhi-maxlen), (minhi,minhi-maxlen))
elif maxlen < minhi-minlo:
hicrop = 2
uppercorners = ((maxlo+maxlen,maxlo), (minlo+maxlen,minlo))
elif maxlen < minhi-maxlo:
hicrop = 3
uppercorners = ((maxhi,maxhi-maxlen), (minlo+maxlen,minlo))
else:
hicrop = 4
uppercorners = []
corners = lowercorners + currentcorners[locrop:hicrop] + uppercorners
corners = np.array([(lo,hi) for hi,lo in corners])
# sort again
return 0., corners
"""
converting lo-hi bounds to normal dist first, then soft update with size bounds
"""
def uvBound2xyNormal(minlo,maxlo,minhi,maxhi, minlen,maxlen):
varlo = (maxlo-minlo)**2 / 12
varhi = (maxhi-minhi)**2 / 12
meanmid = (minlo+maxlo+minhi+maxhi)/4
varmid = varlo/4+varhi/4
meanlen = (minhi+maxhi-minlo-maxlo)/4
varlen = varhi/4+varlo/4
cov = varhi/4-varlo/4
meanmsmt = (minlen+maxlen)/2
varmsmt = (maxlen-minlen)**2 / 12
prec = 1./(varlen+varmsmt)
postmeanmid = meanmid + cov*prec*(meanmsmt - meanlen)
postmeanlen = meanlen + varlen*prec*(meanmsmt - meanlen)
postvarlen = varlen * (1-varlen*prec)
postcov = cov * (1-varlen*prec)
postvarmid = varmid - cov*cov*prec
return np.array((postmeanmid, postmeanlen)), np.array(((postvarmid,postcov),
(postcov,postvarlen)))
def _eigs2x2(mtx):
if abs(mtx[0,1]) < 1e-10:
return mtx[[0,1],[0,1]], np.eye(2)
vardiff = (mtx[0,0]-mtx[1,1])/2
varmean = (mtx[0,0]+mtx[1,1])/2
dterm = hypot(vardiff, mtx[0,1])
eigs = np.array((varmean + dterm, varmean - dterm))
vecs = np.array(((vardiff + dterm, mtx[0,1]),(vardiff - dterm, mtx[0,1])))
vecs /= np.hypot(vecs[:,0], vecs[:,1])[:,None]
assert all(eigs > 0)
if eigs[1] > eigs[0]:
vecs = vecs[::-1].copy()
eigs = eigs[::-1].copy()
return eigs, vecs
if __name__ == '__main__':
#from matplotlib.axes import add_line
import matplotlib.pyplot as plt
# minlo, maxlo = 1., 5.
# minhi, maxhi = 5., 7.
# minlen, maxlen = .5, 2.5
minlo, maxlo = 3., 5.
minhi, maxhi = 5., 7.
minlen, maxlen = 1.5, 3.5
center = (minhi+minlo+maxhi+maxlo)/4
meanlen = (maxhi+minhi-maxlo-minlo)/4
fig, ax = plt.subplots()
rec = plt.Rectangle(((maxlo+minhi)/2,(minhi-maxlo)/2), (maxhi-minhi)/2**.5,
(maxlo-minlo)/2**.5, angle=45., fill=False, edgecolor='r')
ax.add_patch(rec)
#plt.show()
rec = plt.Rectangle((center-5, minlen), 10, maxlen-minlen, angle=0.,
fill=False, edgecolor='b')
ax.add_patch(rec)
pmean, pcov = uvBound2xyNormal(minlo,maxlo,minhi,maxhi,minlen,maxlen)
pcoveigs, pcovvecs = _eigs2x2(pcov)
angle = np.arctan2(pcovvecs[0,1],pcovvecs[0,0]) * 180/3.14159
if pcovvecs[0,0] < 0: pcovvecs[0] *= -1
if pcovvecs[1,1] < 0: pcovvecs[1] *= -1
pcc = (pcoveigs*12)**.5
lowerleftx = pmean[0]-pcovvecs[0,0]*pcc[0]/2-pcovvecs[1,0]*pcc[1]/2
lowerlefty = pmean[1]-pcovvecs[0,1]*pcc[0]/2-pcovvecs[1,1]*pcc[1]/2
rec = plt.Rectangle((lowerleftx, lowerlefty), pcc[0], pcc[1],
angle=angle, fill=False, edgecolor='g')
ax.add_patch(rec)
plt.axis('equal')
#plt.show() |
from django.contrib import admin
from .models import SendEmail
# Register your models here.
admin.site.register(SendEmail)
|
class Calc: #계산기 클래스 선언(클래스의 첫 글자는 대문자로)
#필드 : 클래스 내부의 변수
#메소드 : 클래스 내부의 함수
#메소드의 첫 번째 매개변수는 self => 현재 객체가 들어감
result = 0
def __init__(self): #생성자 : 클래스로 객체를 만들때 호출되는 함수
self.result = 0
def adder(self, num):
self.result += num
return self.result
#객체 : 클래스로 만든 변수
#calc1.__init__() 생성자 호출
calc1 = Calc() #계산기 클래스로 calc1객체(변수) 생성
calc2 = Calc()
calc3 = Calc()
print(calc1.adder(3))
print(calc1.adder(5))
print(calc1.adder(7))
print()
print(calc2.adder(2))
print(calc2.adder(4))
print(calc2.adder(6))
|
for string in "the world is not enough":
print(string.lower()) |
import argparse
import json
import logging
import os
import pathlib
import time
import requests
BASE_URL = "https://atlas.ripe.net/api/v2/measurements/" # "https://webhook.site/8d482d35-ee70-4d12-a4d2-1428fa32813d/"
API_KEY = os.getenv("RIPE_KEY")
if API_KEY is None:
logging.error("No API key found at env variable 'RIPE_KEY'")
exit(1)
def start_definition(definition):
logging.info("Start definition")
response = requests.post(BASE_URL + "?key=" + API_KEY, json=definition)
if response.ok:
logging.info(" response okay")
return response.json()
else:
logging.error(response.content)
WAIT_TIME_SECONDS = 40
MAX_MEASUREMENTS = 100
def get_measurements_running():
logging.debug("GET " + BASE_URL + "my?status=0,1,2")
response = requests.get(BASE_URL + "my?key=%s&status=0,1,2" % API_KEY)
if response.ok:
c = response.json()
logging.debug(c)
return c
else:
logging.error(response.content)
raise RuntimeError
def any_measurement_running():
c = get_measurements_running()
if c["count"] > 0:
return True
else:
return False
def get_measurement_status(measurement_id, update=False):
"""
retrieves the status of the given measurement and returns the json if status == 200 otherwise None
if update is set to True, it will also send a PATCH before to request a status update
"""
measurement_id = str(measurement_id)
measurement_id = measurement_id.strip()
api_url = f'{BASE_URL}{measurement_id}'
if update:
patch_url = f'{api_url}?key={API_KEY}'
response = requests.patch(patch_url)
if response.status_code != 200:
return None
response = requests.get(api_url)
if response.status_code != 200:
return None
return response.json()
def measurement_not_running(measurement_id):
response = requests.get(BASE_URL + str(measurement_id))
if response.ok:
r = response.json()
# id (integer): Numeric ID of this status
# 0: Specified, 1: Scheduled, 2: Ongoing,
# 4: Stopped, 5: Forced to stop, 6: No suitable probes, 7: Failed, 8: Archived
if r["status"]["id"] not in (0, 1, 2):
return True
else:
return False
def retrieve_measurement(measurement_id):
logging.debug("GET " + BASE_URL + str(measurement_id) + "/results/")
response = requests.get(BASE_URL + str(measurement_id) + "/results/")
if response.ok:
result = response.json()
return result
else:
logging.error(f"{measurement_id} - HTTP Error {response.status_code}")
logging.error(response.content)
def stop_measurement(measurement_id):
logging.debug("DELETE " + BASE_URL + str(measurement_id))
response = requests.delete(BASE_URL + str(measurement_id) + "?key=%s" % API_KEY)
if response.ok:
logging.debug("measurement %d deleted", measurement_id)
logging.debug(response.content)
else:
logging.warning(f"{measurement_id} measurement delete error, maybe cannot be deleted")
logging.warning(response.content)
def update_measurement(measurement_id):
"""Do this because RIPE Atlas sometimes does not update the measurement status until PATCH"""
logging.debug("PATCH " + BASE_URL + str(measurement_id))
response = requests.patch(BASE_URL + str(measurement_id) + "?key=%s" % API_KEY, json={"is_public": True})
if response.ok:
logging.debug(f"Measurement {measurement_id} patched")
else:
logging.error(f'Could not PATCH existing measurement:')
logging.error(response.content)
def update_downloaded_finished(measurement_responses):
for m_id in measurement_responses["downloaded"]:
# Check status
# ... if still active, kill it again and patch it
# ... if already stopped, move to finished
if measurement_not_running(m_id):
# Perfect .. move it
measurement_responses["finished"].append(m_id)
else:
# Send DELETE again
stop_measurement(m_id)
# Send PATCH to update the current status as RIPE does not keep status continuously updated
update_measurement(m_id)
# Update downloaded list to remove the ones that have been finished
measurement_responses["downloaded"] = [m for m in measurement_responses["downloaded"] if m not in measurement_responses["finished"]]
def download_everything(base_dir, measurement_responses):
"""
Will try to download all running measurements exactly one time.
This method will NOT retry to download, call method multiple times with some delay.
Will do some housekeeping to keep track of running, downloaded and finished measurements.
"""
base_path = pathlib.Path(base_dir)
downloaded_files = 0
# Check all measurements that have already been downloaded
update_downloaded_finished(measurement_responses)
for case, m_id_list in measurement_responses.items():
# ignore helper categories
if case in ["downloaded", "finished"]:
continue
# create our case directory from the base path
case_dir = base_path.joinpath(case)
case_dir.mkdir(parents=True, exist_ok=True)
for m_id in m_id_list:
# Replaced: fn = base_dir + "/" + case + "/" + str(m_id) + ".json"
# Build filepath from case path
file_name = str(m_id).strip()
fn = case_dir.joinpath(f'{file_name}.json')
# Grab the results for this measurement
m_json = retrieve_measurement(m_id)
# check if we actually got a result
if m_json is None:
logging.warning(f'Could not download measurement result {m_id}')
continue
elif isinstance(m_json, list) and len(m_json) == 0:
# Removing logging for empty result, this just spams the console
# apparently this case happens more often than thought
# logging.warning(f'Got empty result for {m_id}')
continue
# If we already have downloaded some results for a measurement,
# open the existing file and compare the results
# if there are more responses in the new file than in the old one, keep the measurement as active
# i.e. do not add the measurement into the downloaded helper category
# if however we have the same number of results, we add the measurement to the downloaded category
# thus sending a stop measurement to ripe
if fn.is_file():
# Should not happen anymore except for case2 and case4
# if case in ("case1", "case3"):
# logging.warning("... check measurement for %d ... exists" % m_id)
# measurement_responses["downloaded"].append(m_id)
# else:
logging.info(f'{m_id}: Updating existing result for {case}')
with open(fn, "r+") as fp:
res_old = json.load(fp)
logging.debug(f'{m_id}: res old - new: {len(res_old)} - {len(m_json)}')
if isinstance(m_json, list) and len(m_json) > len(res_old):
# New result is larger than old one, keep the measurement as active
# Do NOT mark the measurement as downloaded or something
fp.seek(0)
json.dump(m_json, fp, indent=2)
fp.truncate()
elif isinstance(m_json, list) and len(m_json) == len(res_old):
# Result did not change, add to downloaded list
stop_measurement(m_id)
measurement_responses["downloaded"].append(m_id)
else:
# Found result for measurement, download it
logging.debug("write result %s %d" % (case, m_id))
with open(fn, "w") as f:
json.dump(m_json, f, indent=2)
downloaded_files += 1
# logging.info(f'{m_id} ({case}) Stopping Measurement')
# Quick Hack, I want case 2 and case4 to be checked more often
# TODO If CAS OR DAS > 1
# if case in ("case1", "case3"):
# stop_measurement(m_id)
# measurement_responses["downloaded"].append(m_id)
# Update List of still running cases within each case
measurement_responses[case] = [m for m in m_id_list if m not in measurement_responses["downloaded"]]
total_responses = sum(map(len, measurement_responses.values()))
logging.info(f"Downloaded {downloaded_files} new files (from {total_responses} responses at this time)")
def wait_and_download(result_dir, measurement_responses, nr_measurement=0):
if nr_measurement:
# MM: get ALL running measurements on the given API key
# this does also include non-involved measurements
running = get_measurements_running()
while running["count"] + nr_measurement > MAX_MEASUREMENTS:
logging.info(
f'Waiting {WAIT_TIME_SECONDS} seconds; {running["count"]} running and {nr_measurement} to start'
)
time.sleep(WAIT_TIME_SECONDS)
# MM: Try to download all currently running measurements
download_everything(result_dir, measurement_responses)
running = get_measurements_running()
logging.info(" ... finished waiting")
def kill_all_running_measurements():
"""
Stop all RIPE Atlas measurements with status 0,1,2
Since measurements sometimes do not stop immediately after sending the DELETE via API,
repeat DELETE and update via PATCH inbetween
"""
running = get_measurements_running()
while running["count"] > 0:
logging.info(f"Running measurements: {running['count']}")
for m in running["results"]:
logging.info(f"... stop {m['id']}")
stop_measurement(m["id"])
logging.info("Wait 5 seconds")
time.sleep(5)
for m in running["results"]:
logging.info(f"... patch {m['id']}")
update_measurement((m["id"]))
logging.info("Wait 10 seconds")
time.sleep(10)
running = get_measurements_running()["count"]
logging.info("No measurement running")
def main():
"""
Execute command from input parameter
check ... retrieve and print the number of running measurements
kill ... kill all the processes, until no measurement is running
"""
commands = "check, kill"
logging.basicConfig(format="%(message)s")
logging.getLogger('').setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("command", type=str, help=commands)
args = parser.parse_args()
if args.command == "check":
logging.info("Check number of running processes")
running = get_measurements_running()["count"]
logging.info(f"{running} measurements running in Status (0,1,2)")
elif args.command == "kill":
logging.info("Killing all running measurements")
kill_all_running_measurements()
else:
parser.error(f"Please provide a correct command ({commands})")
if __name__ == '__main__':
main()
|
def find_word(word): #word = word_dict
for i in range(len(word)):
search = input("찾는 단어 입력 : ")
if search in word:
print(search, " : ", word[search])
else:
print("없는 단어입니다.")
word_dict = {"apple":"사과", "banana":"바나나", "cat":"고양이"}
find_word(word_dict)#딕셔너리를 전달
|
from db import db
class NoteModel(db.Model):
'''
Model for creating notes
'''
id = db.Column(db.Integer,primary_key= True)
createdAt = db.Column(db.String(10),nullable = False)
note = db.Column(db.String(300))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
user = db.relationship('UserModel')
def __init__(self,user_id,createdAt,note):
self.createdAt = createdAt
self.note = note
self.user_id = user_id
def json(self):
return {"createdAt":self.createdAt,"note":self.note,"user_id":self.user_id,"notes_id":self.id}
@classmethod
def find_by_user(cls,id):
return cls.query.filter_by(user_id = id)
@classmethod
def find_by_id(cls,id):
return cls.query.filter_by(id = id)
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() |
#!/usr/bin/python
""" Test file of the Mirror twisted client.
"""
from twisted.internet import reactor
from mirror import MirrorClient
def on_mirror(tag, state):
""" Print message when a tag is installed or removed from the mirror.
"""
state_text = 'On' if state is True else 'Off'
print '[RFID] [Mirror] {0} is now {1}'.format(tag, state_text)
if __name__ == '__main__':
# pylint: disable=C0103
mirror = MirrorClient('/dev/mirror')
mirror.subscribe(on_mirror)
reactor.callWhenRunning(mirror.start)
reactor.run()
|
import csv
import glob
import os
import re
import shutil
import sys
from copy import copy
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from mylib import general_method as gm
from mylib.psiquic_class import PQdata, PQdataList
def get_target_id(column_data: str, db_name: str):
if db_name in column_data:
return (
column_data.split(db_name + ":")[1]
.split("(")[0]
.split("|")[0]
.replace('"', "")
)
else:
False
def has_bar_in_row(row):
for column in row:
if "|" in column:
return True
return False
def has_bar_in_row_list(row_list: list):
for row in row_list:
if has_bar_in_row(row):
return True
return False
def expansion_tsv_row(row_list: list):
result_list = []
for row in row_list:
if has_bar_in_row(row):
for i in range(len(row)):
if "|" in row[i]:
row_a = copy(row)
row_a[i] = re.split("|", row[i])[0]
row_b = copy(row)
row_b[i] = re.split("[|\t$]", row[i])[1]
result_list.append(row_a)
result_list.append(row_b)
break
else:
result_list.append(row)
if has_bar_in_row_list(result_list):
result_list = expansion_tsv_row(result_list)
return result_list
def list2tsv(data: list):
result = ""
for i in range(len(data)):
if i != 0:
result += "\t"
if "psi-mi" in data[i]:
result += data[i].split('"')[1].split(":")[1]
elif ":" in data[i]:
result += data[i].split(":")[1].split("(")[0]
else:
result += data[i]
return result
def except_columns(row):
except_list = [2, 3, 4, 5, 7, 14]
for i in range(len(row)):
if i in except_list:
row[i] = ""
return row
# barで分けて展開したものをファイルの保存
def expansion_tsv(dirname):
services_list = gm.list_serveice()
in_dir = "data/"
out_dir = "expdata/"
for service in services_list:
try:
shutil.rmtree(dirname + out_dir + service)
except:
pass
try:
os.mkdir(dirname + out_dir + service)
except:
pass
dir_list = glob.glob(dirname + in_dir + service + "/*.tsv", recursive=True)
for i in range(len(dir_list)):
print("... expantion tsv :", dir_list[i], "\t", i + 1, "/", len(dir_list))
with open(dir_list[i]) as f1:
reader = csv.reader(f1, delimiter="\t")
with open(
dirname + out_dir + service + "/" + dir_list[i].split("/")[-1],
mode="w",
) as f:
f.write("")
for row_bef in reader:
row_bef = except_columns(row_bef)
row_list = expansion_tsv_row([row_bef])
# print("------------------------------\n")
with open(
dirname + out_dir + service + "/" + dir_list[i].split("/")[-1],
mode="a",
) as f:
for row in row_list:
f.write("\t".join(row) + "\n")
if __name__ == "__main__":
dirname = "./"
expansion_tsv(dirname)
|
'''
https://www.hackerrank.com/contests/blackrock-codesprint/challenges/suggest-better-spending-rates
'''
def total_income_calc(p, r, S):
total_income = 0
for t in range(1, len(S)+1):
income_t = p * S[t-1] * ((1 + r / 100) ** t) / (100 ** t)
if (t-1):
for i in range(t-1):
income_t = income_t * (100 - S[i])
total_income += income_t
return total_income
def get_and_test_input():
pass
def spending_rates_combinations(S, threshold, adjust_budget=0):
if len(S) == 1:
yield [S[0] + adjust_budget]
else:
for i in range((-threshold), threshold + 1):
if abs(adjust_budget - i) / (len(S) - 1) > threshold:
continue
else:
a = [S[0] + i]
for e in list(spending_rates_combinations(S[1:], threshold, adjust_budget-i)):
yield a + e
'''
print(list(spending_rates_combinations([37], 1, 0)))
print(list(spending_rates_combinations([42, 37], 1, 0)))
print(list(spending_rates_combinations([29, 42, 37, 100], 1, 0)))
print(list(spending_rates_combinations([29, 42, 37, 10], 3, 0)), )
'''
|
#!/usr/bin/env python3
import os
for i in range(5000,10000,10):
os.system("python3 longList.py " + str(i))
os.system("TIMEFORMAT=%R")
time = os.system("time java SortsRunner list.txt")
f = open("selection.txt", "a+")
f.write(str(time))
if i == 5020:
break
|
#questions for M7m7 and Karim
#question1 who makes the reset of the simulation
#question2 we need to normalize the observations
#question3 how we break?
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import copy
import time
import collections as col
import sys
import random
from math import cos, sin, atan, pow
import numpy as np
import csv
import pickle
PI = 3.14159265359
class ddpglearner:
def __init__(self):
self.ns = rospy.get_param("~namespace", "/catvehicle")
rospy.init_node('DDPGlearner', anonymous=True)
rospy.Subscriber('{0}/front_laser_points'.format(self.ns), LaserScan, self.laserCallback)
rospy.Subscriber('{0}/cmd_vel'.format(self.ns), Twist, self.velCallback)
rospy.Subscriber('/reset_alert', Bool, self.resetCallback)
rospy.Subscriber('{0}/vel'.format(self.ns), Twist, self.realVelCallback)
self.pub_cmdvel = rospy.Publisher('{0}/cmd_vel'.format(self.ns), Twist, queue_size=50)
self.pub_reset = rospy.Publisher('/reset_alert',Bool,queue_size=50)
self.resetter = 0
self.vel = 0
self.ang = 0
self.realVelX = 100
self.laserMsg = LaserScan()
self.laserReading = [0,0,0,0,0]
self.noOfsamples = 50
self.msgTwist = Twist()
self.msgBool =Bool()
self.resetflag=False
#code from Torcs
self.initial_run = True
self.stuck_step = 0
#subscribers callbacks
def laserCallback(self, lasermsg):
self.laserMsg = lasermsg
self.noOfsamples = len(self.laserMsg.ranges)
lengthLas = 5
for r in range(lengthLas):
self.laserReading[r] = sum(self.laserMsg.ranges[r*(self.noOfsamples/lengthLas):(r+1)*(self.noOfsamples/lengthLas)])/(self.noOfsamples/lengthLas)
def velCallback(self, twistmsg):
self.vel = twistmsg.linear.x
self.ang = twistmsg.angular.z
# velocity and laser readings as state
def realVelCallback(self, twistmsg):
self.realVelX = twistmsg.linear.x
def resetCallback(self, resetbool):
if resetbool.data:
self.resetter = 1
def publish(self, action):
#self.msgTwist.linear.x = max(state[0] + action[0], 0)
self.msgTwist.linear.x = 25
self.msgTwist.angular.z = action[0]/2
self.pub_cmdvel.publish(self.msgTwist)
# if self.resetflag:
# self.msgBool.data=True
# self.pub_reset.publish(self.msgBool)
# print("-------------------reset here------------------")
# self.resetflag=False
# Calculate the reward
def calcReward(self):
c1 = 0.2
c2 = 0.7
c3 = -0.5
c4 = -5
#return (c1*self.vel + c2*(self.laserReading[1]-20) + c3*abs(self.laserReading[0]-self.laserReading[2]) + c4*self.resetter)
if self.realVelX < 0.5:
slowMove = 1
else:
slowMove = 0
return (c2 * (self.laserReading[2] - 15) + c3 * abs(self.laserReading[0] - self.laserReading[4]) + c4 * slowMove)
def resetStuff(self):
print("Reset")
self.time_step = 0
self.msgTwist.linear.x = 0
self.msgTwist.angular.z = 0
self.pub_cmdvel.publish(self.msgTwist)
self.resetter = 0
time.sleep(2)
return self.make_observaton()
def make_observaton(self):
names = ['focus',
'speedX', 'angle',
]
Observation = col.namedtuple('Observaion', names)
return Observation(focus=np.array(self.laserReading,dtype=np.float32)/30,
speedX=self.realVelX/8,
angle=self.ang/0.5)
def step(self, u):
# convert thisAction to the actual torcs actionstr
#client = self.client
# One-Step Dynamics Update #################################
# Apply the Agent's action into torcs
this_action = self.publish(u)
# Get the current full-observation from torcs
#obs =
# Make an obsevation from a raw observation vector from TORCS
self.observation = self.make_observaton()
#print(self.laserReading,"len = ",self.noOfsamples)
# Reward setting Here #######################################
reward = self.calcReward()
# if self.time_step>30:
# self.resetflag=True
self.time_step += 1
return self.observation, reward, self.resetter, {} |
#!/usr/bin/env python
"""
Program: ssat_vocab.py
Author: C. McKnight
Description:
This program takes a JSON formatted data file and
generates a GIFT formatted file for import into the
Moodle 2 learning system. The contents of the GIFT
file are determined by the desired type of quiz.
Currently the program supports the following types
of quizzes:
Word Match - A quiz that requires the student
to select the definition that
matches the word.
Fill In The Blank - A quiz that requires the student
to select the correct word to
complete the sentence.
"""
#####################################################################
# imports
#####################################################################
import json, argparse, sys
from giftgens import *
from pprint import pprint
#####################################################################
# methods
#####################################################################
#====================================================================
def parse_args():
"""Parse the command line arguments
Args:
None
Returns:
Name of the data file to be loaded.
"""
parser = argparse.ArgumentParser(description='Generate GIFT format Moodle files.')
parser.add_argument('-qt', '--quiztype',
help='Type of quiz: [ wordmatch | fillblank ] Default is wordmatch',
nargs='?', default='wordmatch')
parser.add_argument('infile', metavar='filename', nargs='?')
return vars(parser.parse_args())
#====================================================================
def load_file(filename):
"""Unserializes JSON data into a dictionary
Retrieves the serialized data structure into a dictionary.
Args:
filename - Name of the input file
Returns:
A dict mapping the data to its original structure. For example:
{
'Moodle GIFT Header' : {
'course' : 'SSAT Vocabulary',
'category' : 'Vocabulary Builder',
'lesson' : 'Lesson 5A' },
'wordbank' : {
'brash' : {
'definition' : "bold; hasty or lacking in sensitivity',
'sentence' : 'The commander made a (bold) {} maneuver to \
keep his opponent off balance.'
},
'benevolent' : {
'definition' : "kind, good, caring',
'sentence' : 'Although everyone originally though the new \
teacher was too strict, they began to see him as more of \
a (caring) {} person over time.'
}
}
}
Note:
The curly braces embedded in the 'sentence' value are used to
mark the point where the list of words should appear.
"""
if not (filename is None):
try:
with open(filename) as json_data:
data = json.load(json_data)
json_data.close()
except:
print("**Error** Can't find " + filename + "!")
exit()
else:
print("You must provide a valid file name!")
exit()
return data
#====================================================================
def main():
# get the command line arguments
myopts = parse_args()
# import the data file
databank = load_file(myopts['infile'])
# call the requested quiz generator
print "Generating " + myopts['quiztype']
generate_quiz(databank, myopts['quiztype'])
#====================================================================
#####################################################################
# main program logic
#####################################################################
if __name__=='__main__':
main() |
import numpy
from storage import storage
from uneven_sine_gen import uneven_sine_gen
from double_ind_gen import double_ind_gen
class double_uneven_gen(uneven_sine_gen,double_ind_gen):
name = 'unevenly sampled, double signal (two sine waves), uneven noise' |
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse,HttpResponseRedirect
from .models import Employee
from django.utils import timezone
from django.contrib import messages
from random import randint
def index(request):
return render(request,'register/index.html',None)
def check_value(num,val):
# Returns True if Number not found in Db
try:
if num ==1:
Employee.objects.get(mobile=val)
elif num==2:
Employee.objects.get(uId=val)
else:
Employee.objects.get(empId=val)
return False
except:
return True
def getId(name,city):
id=name[:3]+str(randint(1000,9999))+city[:3]
while check_value(3,id) is False:
id=name[:3]+str(randint(1000,9999))+city[:3]
return id
def submitDetails(request):
if request.method=='POST':
name=request.POST.get('fname');number=request.POST.get('mobile');city=request.POST.get('city')
state=request.POST.get('state');desig=request.POST.get('designation');aadhar=request.POST.get('uId')
email=request.POST.get('mail');pwd=request.POST.get('pwd')
time=timezone.now()
if check_value(1,number) and check_value(2,aadhar):
empid=getId(name,city)
e=Employee(fname=name,mobile=number,city=city,state=state,designation=desig,time=time,uId=aadhar,email=email,pwd=pwd,empId=empid)
e.save()
context={"Name":name,"message":"Registration Successful!","empid":empid}
return render(request,'register/registered.html',context)
else:
return render(request,'register/index.html',{"message":"User Already Registered"})
else:
return render(request,'register/index.html',{"message":"'Internal Error! Please Try Again'"})
|
# -*- coding: UTF-8 -*-
import requests
from xml.dom import minidom
xml_data = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:cli="http://cliente.bean.master.sigep.bsb.correios.com.br/">
<soapenv:Header/>
<soapenv:Body>
<cli:buscaCliente>
<idContrato>9912331062</idContrato>
<idCartaoPostagem>0067511082</idCartaoPostagem>
<usuario>14172907</usuario>
<senha>jq5y6</senha>
</cli:buscaCliente>
</soapenv:Body>
</soapenv:Envelope>"""
headers = {'Content-Type': 'application/xml'}
result = requests.post('https://apps.correios.com.br/SigepMasterJPA/AtendeClienteService/AtendeCliente?wsdl',
data=xml_data, headers=headers, verify=False).text
# print result.encode('utf-8').strip()
d = result.strip()
dom = minidom.parse(d)
dados = dom.getElementsByTagName('servicos')
for c in dados:
pass
|
'''
Created on Jul 30, 2012
@author: Michele Sama (m.sama@puzzledev.com)
'''
from django.test.testcases import TestCase
from django.core.management import call_command
from jom.factory import JomFactory
from jomtest.foo.models import SimpleModel
from jomtest.foo.joms import SimpleModelJomDescriptor,\
ModelWithAllFieldsJomDescriptor
from jom.fields import StringJomField, NumeralJomField, BooleanJomField,\
DateJomField, UrlJomField
from django.core.urlresolvers import reverse
import json
class ExportTestCase(TestCase):
def setUp(self):
call_command('export_jom')
def tearDown(self):
call_command('clear_jom')
def testAllJomCreated(self):
raise NotImplementedError()
class JomFactoryTestCase(TestCase):
def setUp(self):
self.factory = JomFactory()
def tearDown(self):
self.factory = None
def testRegisterJomDescriptor(self):
descriptor = self.factory.register(SimpleModelJomDescriptor)
self.assertTrue(isinstance(descriptor, SimpleModelJomDescriptor))
def testGetForName(self):
name = SimpleModel.__name__
descriptor = self.factory.register(SimpleModelJomDescriptor)
self.assertEqual(descriptor, self.factory.getForName(name))
def testGetForModel(self):
descriptor = self.factory.register(SimpleModelJomDescriptor)
self.assertEqual(descriptor, self.factory.getForModel(SimpleModel))
def testGetJomInstance(self):
instance = SimpleModel.objects.create(name = "foo")
descriptor = self.factory.register(SimpleModelJomDescriptor)
self.assertEqual(descriptor,
self.factory.getJomInstance(instance).descriptor)
instance.delete()
def testGetJomClass(self):
descriptor = self.factory.register(SimpleModelJomDescriptor)
self.assertEqual(descriptor,
self.factory.getJomClass(SimpleModel).descriptor)
class JomDescriptorTestCase(TestCase):
def setUp(self):
self.factory = JomFactory()
self.descriptor = self.factory.register(
ModelWithAllFieldsJomDescriptor)
def tearDown(self):
self.factory = None
self.descriptor = None
def testJomFieldsCreated(self):
fields = self.descriptor.jom_fields
self.assertEqual(StringJomField, fields['slug'])
self.assertEqual(StringJomField, fields['text'])
self.assertEqual(StringJomField, fields['char'])
self.assertEqual(StringJomField, fields['email'])
self.assertEqual(StringJomField, fields['url'])
self.assertEqual(StringJomField, fields['comma_separated_integer'])
self.assertEqual(NumeralJomField, fields['integer'])
self.assertEqual(NumeralJomField, fields['positive_integer'])
self.assertEqual(NumeralJomField, fields['small_integer'])
self.assertEqual(NumeralJomField, fields['small_positive_integer'])
self.assertEqual(NumeralJomField, fields['big_integer'])
self.assertEqual(NumeralJomField, fields['float'])
self.assertEqual(BooleanJomField, fields['boolean'])
self.assertEqual(DateJomField, fields['date'])
self.assertEqual(DateJomField, fields['time'])
self.assertEqual(DateJomField, fields['datetime'])
self.assertEqual(UrlJomField, fields['file'])
self.assertEqual(UrlJomField, fields['image'])
class BackEndTestCase(TestCase):
def testSave(self):
instance = SimpleModel.objects.create(name = "foo")
response = self.client.post(
reverse("jom_async_save_ajax"),
data = {'model': instance.__class__.__name__,
'id': instance.id,
'name': "bar"},
content_type = "application/json"
)
print(response.content)
instance = SimpleModel.objects.get(id = instance.id)
self.assertEqual("bar", instance.name)
instance.delete() |
"""
Author: Nemanja Rakicevic
Date : January 2018
Description:
Plotting functions:
- evaluation heatmaps
- model and exploration components
- model and exploration components separate files
"""
import os
import logging
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 14})
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
logger = logging.getLogger(__name__)
class MidpointNormalize(mpl.colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
mpl.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_evals(euclid_plot,
polar_plot,
errors_mean,
test_dict,
savepath=None,
num_trial=None,
show_plots=False,
img_format='png',
dpi=150):
"""Plot task model evaluation heatmap over test targets"""
# Set axis ticks
xticks = np.arange(1, len(test_dict['angles']), 2)
yticks = np.arange(0, len(test_dict['dist']), 2)
norm1 = MidpointNormalize(midpoint=0., vmin=-1, vmax=euclid_plot.max())
norm2 = MidpointNormalize(midpoint=0., vmin=-1, vmax=polar_plot.max())
fig = plt.figure(figsize=(15, 5), dpi=100)
fig.suptitle("Performance Error Plots (failed = -1)", fontsize=16)
# Eudlidean error plot
ax = plt.subplot("121")
ax.set_ylabel('distances')
ax.set_xlabel('angles')
ax.set_title("Euclidean error: {}".format(errors_mean[0].round(2)))
euc = ax.imshow(euclid_plot, origin='upper', cmap=cm.seismic, norm=norm1)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels([str(x) for x in test_dict['angles'][xticks - 1][::-1]])
ax.set_yticklabels([str(y) for y in test_dict['dist'][yticks][::-1]])
cbar = plt.colorbar(
euc, shrink=0.7, aspect=20, pad=0.15, orientation='horizontal',
ticks=[-1, errors_mean[0].round(2), euclid_plot.max().round(2)])
cbar.ax.set_xticklabels(['-1', 'mean', 'max'])
euc.set_clim(-1.001, euclid_plot.max() + .005)
# Polar error plot
ax = plt.subplot("122")
ax.set_title("Polar coordinate error: {}".format(errors_mean[1].round(2)))
ax.set_xlabel('angles')
sidf = ax.imshow(polar_plot, origin='upper', cmap=cm.seismic, norm=norm2)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels([str(x) for x in test_dict['angles'][xticks - 1][::-1]])
ax.set_yticklabels([str(y) for y in test_dict['dist'][yticks][::-1]])
cbar = plt.colorbar(
sidf, shrink=0.7, aspect=20, pad=0.15, orientation='horizontal',
ticks=[-1, errors_mean[1].round(2), polar_plot.max().round(2)])
sidf.set_clim(-1.001, polar_plot.max() + .005)
savepath = os.path.join(savepath, "plots_eval")
if savepath is not None:
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(num_trial) == int:
fig_name = 'test_plots_trial_{:05d}.{}'.format(
num_trial, img_format)
plt.savefig('{}/{}'.format(savepath, fig_name),
format=img_format, dpi=dpi)
logger.info("Figure saved: '{}'".format(fig_name))
else:
plt.savefig('{}/{}.{}'.format(
savepath, num_trial, img_format),
format=img_format, dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
def plot_model(model_object,
dimensions=(0, 1),
savepath=None,
num_trial=None,
show_points=False,
show_plots=False,
ds_plots=True,
img_format='png',
dpi=150):
"""
Plot task model components and exploration components,
if multidimensions then along custom dims.
"""
param_names = ['joint_{}'.format(d) for d in dimensions]
if len(model_object.mu_alpha):
fig = plt.figure(
"DISTRIBUTIONs at step: {}".format(num_trial),
figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0] * 3,
fig.get_size_inches()[1] * 2)
dim1 = model_object.param_list[dimensions[0]]
dim2 = model_object.param_list[dimensions[1]]
# Extract values to plot
if len(model_object.param_dims) > 2:
if model_object.param_dims[0] > 1:
model_alpha = model_object.mu_alpha[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_L = model_object.mu_L[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_pidf = model_object.pidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_uidf = model_object.uidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_sidf = model_object.sidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
else:
model_alpha = model_object.mu_alpha[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_L = model_object.mu_L[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_pidf = model_object.pidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_uidf = model_object.uidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_sidf = model_object.sidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
else:
model_alpha = model_object.mu_alpha
model_L = model_object.mu_L
model_pidf = model_object.pidf
model_uidf = model_object.uidf
model_sidf = model_object.sidf
# Creat 3D plot meshgrid
X, Y = np.meshgrid(dim2, dim1)
# Downsample for memory contstraints
ds1 = max(1, len(dim1) // 50) if ds_plots else 1
ds2 = max(1, len(dim2) // 50) if ds_plots else 1
dim1 = dim1[::ds1]
dim2 = dim2[::ds2]
model_alpha = model_alpha[::ds1, ::ds2]
model_L = model_L[::ds1, ::ds2]
model_pidf = model_pidf[::ds1, ::ds2]
model_uidf = model_uidf[::ds1, ::ds2]
model_sidf = model_sidf[::ds1, ::ds2]
X = X[::ds1, ::ds2]
Y = Y[::ds1, ::ds2]
# Set ticks
xticks = np.linspace(
min(dim2[0], dim2[-1]), max(dim2[0], dim2[-1]), 5).round(1)
yticks = np.linspace(
min(dim1[0], dim1[-1]), max(dim1[0], dim1[-1]), 4).round(1)
xticks1 = np.linspace(
min(dim2[0], dim2[-1]), max(dim2[0], dim2[-1]), 5).round(1)
yticks1 = np.linspace(
min(dim1[0], dim1[-1]), max(dim1[0], dim1[-1]), 5).round(1)
zticks_alpha = np.linspace(
model_alpha.min(), model_alpha.max(), 5).round(2)
zticks_L = np.linspace(
model_L.min(), model_L.max(), 5).round(2)
zticks_pidf = np.linspace(
model_pidf.min(), model_pidf.max(), 7).round(2)
zticks_uidf = np.linspace(
model_uidf.min(), model_uidf.max(), 7).round(2)
search_lim = (
min((1 - model_pidf).min(), model_uidf.min(), model_sidf.min()),
max((1 - model_pidf).max(), model_uidf.max(), model_sidf.max()))
# Task models
# Angle task model
ax = fig.add_subplot(2, 3, 1, projection='3d')
ax.set_title('ANGLE MODEL')
ax.plot_surface(X, Y, model_alpha, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_ylabel(param_names[1], labelpad=5)
ax.set_xlabel(param_names[0], labelpad=5)
ax.set_zlabel('[degrees] ', rotation='vertical', labelpad=10)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
if abs(model_alpha.max() - model_alpha.min()) >= 1:
ax.set_zticks(zticks_alpha)
else:
ax.ticklabel_format(style='sci', axis='z', scilimits=(0, 0))
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=5)
# Distance task model
ax = fig.add_subplot(2, 3, 2, projection='3d')
ax.set_title('DISTANCE MODEL')
ax.plot_surface(X, Y, model_L, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_ylabel(param_names[1], labelpad=5)
ax.set_xlabel(param_names[0], labelpad=5)
ax.set_zlabel('[cm]', rotation='vertical', labelpad=10)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
if abs(model_L.max() - model_L.min()) >= 1:
ax.set_zticks(zticks_L)
else:
ax.ticklabel_format(style='sci', axis='z', scilimits=(0, 0))
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=5)
# Exploration components
# Selection IDF (top view)
ax = fig.add_subplot(2, 3, 3)
ax.set_title('Selection function')
ax.set_xlabel(param_names[0])
ax.set_ylabel(param_names[1])
# ax.set_xlim(len(dim1), 0)
ax.set_xlim(0, len(dim1))
ax.set_ylim(0, len(dim2))
# ax.set_xticks(np.linspace(len(dim1)-1, -1, 5))
ax.set_xticks(np.linspace(-1, len(dim1), 5))
ax.set_yticks(np.linspace(-1, len(dim2), 5))
ax.set_xticklabels([str(x) for x in xticks])
ax.set_yticklabels([str(y) for y in yticks1])
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
sidf = ax.imshow(model_sidf, cmap=cm.summer, origin='lower')
for spine in ax.spines.values():
spine.set_visible(False)
# add also the trial points
for tr in model_object.coord_explored:
if list(tr) in [list(x) for x in model_object.coord_failed]:
ax.scatter(x=tr[1] // ds1, y=tr[0] // ds2, c='r', s=15)
else:
ax.scatter(x=tr[1] // ds1, y=tr[0] // ds2, c='c', s=15)
cbar = plt.colorbar(
sidf, shrink=0.5, aspect=20, pad=0.17, orientation='horizontal',
ticks=[0.0, 0.5, 1.0])
sidf.set_clim(-0.001, 1.001)
# Penalisation IDF
if 'Informed' in model_object.name:
ax = fig.add_subplot(2, 3, 4, projection='3d')
ax.set_title('Penalisation function: {} points'.format(
len(model_object.coord_failed)))
ax.plot_surface(X, Y, (1 - model_pidf), rstride=1, cstride=1,
cmap=cm.copper, linewidth=0, antialiased=False)
ax.set_zlim(search_lim)
ax.set_xlabel(param_names[0], labelpad=5)
ax.set_ylabel(param_names[1], labelpad=5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=2)
# Uncertainty IDF
if 'Random' not in model_object.name:
ax = fig.add_subplot(2, 3, 5, projection='3d')
ax.set_title('Model uncertainty: {:4.2f}'.format(
model_object.uncertainty))
ax.plot_surface(X, Y, model_uidf, rstride=1, cstride=1,
cmap=cm.winter, linewidth=0, antialiased=False)
ax.set_zlim(search_lim)
ax.set_xlabel(param_names[0], labelpad=5)
ax.set_ylabel(param_names[1], labelpad=5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=5)
# Selection IDF (3D view)
ax = fig.add_subplot(2, 3, 6, projection='3d')
ax.set_title('Selection function')
ax.plot_surface(X, Y, model_sidf, rstride=1, cstride=1,
cmap=cm.summer, linewidth=0, antialiased=False)
ax.set_zlim(search_lim)
ax.set_xlabel(param_names[0], labelpad=5)
ax.set_ylabel(param_names[1], labelpad=5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=2)
# add also the trial points
if show_points:
for tr in model_object.coord_explored:
if list(tr) in [list(x) for x in model_object.coord_failed]:
ax.plot([dim2[tr[1] // ds2], dim2[tr[1] // ds2]],
[dim1[tr[0] // ds1], dim1[tr[0] // ds1]],
[model_sidf.min(), model_sidf.max()],
linewidth=1, color='r', alpha=0.7)
else:
ax.plot([dim2[tr[1] // ds2], dim2[tr[1] // ds2]],
[dim1[tr[0] // ds1], dim1[tr[0] // ds1]],
[model_sidf.min(), model_sidf.max()],
linewidth=1, color='c', alpha=0.7)
savepath = os.path.join(savepath, "plots_model")
if savepath is not None:
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(num_trial) == int:
fig_title = "Models and IDFs" \
"(num_iter: {}, resolution: {})".format(
num_trial, len(dim1))
fig.suptitle(fig_title, fontsize=16)
fig_name = 'model_plots_trial_{:05d}.{}'.format(
num_trial, img_format)
plt.savefig('{}/{}'.format(savepath, fig_name),
format=img_format, dpi=dpi)
logger.info("Figure saved: '{}'".format(fig_name))
else:
plt.savefig('{}/{}.{}'.format(
savepath, num_trial, img_format),
format=img_format, dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
def plot_model_separate(model_object,
dimensions=(0, 1),
savepath=None,
num_trial=None,
show_points=False,
show_plots=False,
ds_plots=True,
img_format='png',
dpi=150):
"""
Plot task model components and exploration components,
each in a separate file
"""
savepath = os.path.join(savepath, "plots_model")
if not os.path.isdir(savepath):
os.makedirs(savepath)
param_names = ['joint_{}'.format(d) for d in dimensions]
if len(model_object.mu_alpha):
fig = plt.figure(
"DISTRIBUTIONs at step: {}".format(num_trial),
figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0] * 3,
fig.get_size_inches()[1] * 2)
dim1 = model_object.param_list[dimensions[0]]
dim2 = model_object.param_list[dimensions[1]]
# Extract values to plot
if len(model_object.param_dims) > 2:
if model_object.param_dims[0] > 1:
model_alpha = model_object.mu_alpha[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_L = model_object.mu_L[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_pidf = model_object.pidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_uidf = model_object.uidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
model_sidf = model_object.sidf[:, :, 3, 3, 4].reshape(
len(dim1), len(dim2))
else:
model_alpha = model_object.mu_alpha[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_L = model_object.mu_L[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_pidf = model_object.pidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_uidf = model_object.uidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
model_sidf = model_object.sidf[0, 0, :, 0, :, 0].reshape(
len(dim1), len(dim2))
else:
model_alpha = model_object.mu_alpha
model_L = model_object.mu_L
model_pidf = model_object.pidf
model_uidf = model_object.uidf
model_sidf = model_object.sidf
# Creat 3D plot meshgrid
X, Y = np.meshgrid(dim2, dim1)
# Downsample for memory contstraints
ds1 = max(1, len(dim1) // 50) if ds_plots else 1
ds2 = max(1, len(dim2) // 50) if ds_plots else 1
dim1 = dim1[::ds1]
dim2 = dim2[::ds2]
model_alpha = model_alpha[::ds1, ::ds2]
model_L = model_L[::ds1, ::ds2]
model_pidf = model_pidf[::ds1, ::ds2]
model_uidf = model_uidf[::ds1, ::ds2]
model_sidf = model_sidf[::ds1, ::ds2]
X = X[::ds1, ::ds2]
Y = Y[::ds1, ::ds2]
# Set ticks
xticks = np.linspace(
min(dim2[0], dim2[-1]), max(dim2[0], dim2[-1]), 5).round(1)
yticks = np.linspace(
min(dim1[0], dim1[-1]), max(dim1[0], dim1[-1]), 4).round(1)
xticks1 = np.linspace(
min(dim2[0], dim2[-1]), max(dim2[0], dim2[-1]), 5).round(1)
yticks1 = np.linspace(
min(dim1[0], dim1[-1]), max(dim1[0], dim1[-1]), 5).round(1)
zticks_alpha = np.linspace(
model_alpha.min(), model_alpha.max(), 5).round(2)
zticks_L = np.linspace(
model_L.min(), model_L.max(), 5).round(2)
zticks_pidf = np.linspace(
model_pidf.min(), model_pidf.max(), 7).round(2)
zticks_uidf = np.linspace(
model_uidf.min(), model_uidf.max(), 7).round(2)
search_lim = (
min((1 - model_pidf).min(), model_uidf.min(), model_sidf.min()),
max((1 - model_pidf).max(), model_uidf.max(), model_sidf.max()))
# Task models
# Angle task model
fig = plt.figure("ANGLE MODEL #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, model_alpha, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel(param_names[0], labelpad=10)
ax.set_ylabel(param_names[1], labelpad=10)
ax.set_zlabel('[degrees] ', rotation='vertical', labelpad=10)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
if abs(model_alpha.max() - model_alpha.min()) >= 1:
ax.set_zticks(zticks_alpha)
else:
ax.ticklabel_format(style='sci', axis='z', scilimits=(0, 0))
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=5)
plt.savefig("{}/img_trial_{:05d}_model_angle.png".format(
savepath, num_trial),
format="png")
# Distance task model
fig = plt.figure("DISTANCE MODEL #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, model_L, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel(param_names[0], labelpad=10)
ax.set_ylabel(param_names[1], labelpad=10)
ax.set_zlabel('[cm]', rotation='vertical', labelpad=10)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
if abs(model_L.max() - model_L.min()) >= 1:
ax.set_zticks(zticks_L)
else:
ax.ticklabel_format(style='sci', axis='z', scilimits=(0, 0))
ax.set_xticklabels([str(x) for x in xticks], rotation=41)
ax.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax.tick_params(axis='x', direction='out', pad=-5)
ax.tick_params(axis='y', direction='out', pad=-3)
ax.tick_params(axis='z', direction='out', pad=5)
plt.savefig("{}/img_trial_{:05d}_model_dist.png".format(
savepath, num_trial),
format="png")
# Exploration components
# Selection IDF (top view)
fig = plt.figure("SELECTION IDF #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax1 = fig.add_subplot(111)
ax1.set_xlabel(param_names[0])
ax1.set_ylabel(param_names[1])
# ax1.set_xlim(len(dim1), 0)
ax1.set_xlim(0, len(dim1))
ax1.set_ylim(0, len(dim2))
# ax1.set_xticks(np.linspace(len(dim1)-1, -1, 5))
ax1.set_xticks(np.linspace(-1, len(dim1), 5))
ax1.set_yticks(np.linspace(-1, len(dim2), 5))
ax1.set_xticklabels([str(x) for x in xticks])
ax1.set_yticklabels([str(y) for y in yticks1])
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
sidf = ax1.imshow(model_sidf, cmap=cm.summer, origin='lower')
for spine in ax1.spines.values():
spine.set_visible(False)
# add also the trial points
for tr in model_object.coord_explored:
if list(tr) in [list(x) for x in model_object.coord_failed]:
ax1.scatter(x=tr[1] // ds1, y=tr[0] // ds2, c='r', s=15)
else:
ax1.scatter(x=tr[1] // ds1, y=tr[0] // ds2, c='c', s=15)
cbar = plt.colorbar(sidf, shrink=0.5, aspect=20, pad=0.17,
orientation='horizontal', ticks=[0.0, 0.5, 1.0])
sidf.set_clim(-0.001, 1.001)
plt.savefig("{}/img_trial_{:05d}_sidf_top.png".format(
savepath, num_trial),
format="png")
# Penalisation IDF
if 'Informed' in model_object.name:
fig = plt.figure(
"PENALISATION IDF #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_surface(
X, Y, (1 - model_pidf), rstride=1, cstride=1, cmap=cm.copper,
linewidth=0, antialiased=False)
ax1.set_zlim(search_lim)
ax1.set_ylabel(param_names[1], labelpad=5)
ax1.set_xlabel(param_names[0], labelpad=5)
ax1.set_xticks(xticks)
ax1.set_yticks(yticks)
ax1.set_xticklabels([str(x) for x in xticks], rotation=41)
ax1.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax1.tick_params(axis='x', direction='out', pad=-5)
ax1.tick_params(axis='y', direction='out', pad=-3)
ax1.tick_params(axis='z', direction='out', pad=2)
plt.savefig("{}/img_trial_{:05d}_pidf.png".format(
savepath, num_trial),
format="png")
# Uncertainty IDF
if 'Random' not in model_object.name:
fig = plt.figure(
"UNCERTAINTY IDF #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_surface(
X, Y, model_uidf, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
ax1.set_zlim(search_lim)
ax1.set_ylabel(param_names[1], labelpad=5)
ax1.set_xlabel(param_names[0], labelpad=5)
ax1.set_xticks(xticks)
ax1.set_yticks(yticks)
ax1.set_zticks(zticks_uidf)
ax1.set_xticklabels([str(x) for x in xticks], rotation=41)
ax1.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax1.tick_params(axis='x', direction='out', pad=-5)
ax1.tick_params(axis='y', direction='out', pad=-3)
ax1.tick_params(axis='z', direction='out', pad=5)
plt.savefig("{}/img_trial_{:05d}_uidf.png".format(
savepath, num_trial),
format="png")
# Selection IDF
fig = plt.figure("SELECTION IDF #{}".format(num_trial), figsize=None)
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1])
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_surface(X, Y, model_sidf, rstride=1, cstride=1,
cmap=cm.summer, linewidth=0, antialiased=False)
ax1.set_zlim(search_lim)
ax1.set_ylabel(param_names[1], labelpad=5)
ax1.set_xlabel(param_names[0], labelpad=5)
ax1.set_xticks(xticks)
ax1.set_yticks(yticks)
ax1.set_xticklabels([str(x) for x in xticks], rotation=41)
ax1.set_yticklabels([str(x) for x in yticks], rotation=-15)
ax1.tick_params(axis='x', direction='out', pad=-5)
ax1.tick_params(axis='y', direction='out', pad=-3)
ax1.tick_params(axis='z', direction='out', pad=2)
# add also the trial points
if show_points:
for tr in model_object.coord_explored:
if list(tr) in [list(x) for x in model_object.coord_failed]:
ax1.plot([dim2[tr[1] // ds2], dim2[tr[1] // ds2]],
[dim1[tr[0] // ds1], dim1[tr[0] // ds1]],
[model_sidf.min(), model_sidf.max()],
linewidth=1, color='r', alpha=0.7)
else:
ax1.plot([dim2[tr[1] // ds2], dim2[tr[1] // ds2]],
[dim1[tr[0] // ds1], dim1[tr[0] // ds1]],
[model_sidf.min(), model_sidf.max()],
linewidth=1, color='c', alpha=0.7)
plt.savefig("{}/img_trial_{:05d}_sidf.png".format(
savepath, num_trial),
format="png")
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from guestbook.models import Greeting
class GreetingAdmin(admin.ModelAdmin):
list_display =('__unicode__', 'username', 'created_at')
list_filter = ('created_at', 'username')
search_fields = ('username', 'content')
admin.site.register(Greeting, GreetingAdmin)
|
class Field:
def __init__(self, column_type):
self.column_type = column_type
def __str__(self):
return '<{}:{}>'.format(self.__class__.__name__, self.column_type)
class CharField(Field):
def __init__(self):
super().__init__('varchar')
class InterField(Field):
def __init__(self):
super().__init__('int')
class MetaModel(type):
fields = {}
def __init__(cls, what, bases, class_dict):
super().__init__(what, bases, class_dict)
fields = {}
for k, v in class_dict.items():
if isinstance(v, Field):
fields[k] = v
cls.fields = fields
class Model(metaclass=MetaModel):
__table__ = None
def __init__(self, **kwargs):
if not self.__table__:
Model.__table__ = self.__class__.__name__.lower()
else:
Model.__table__ = self.__table__
self.__dict__.update(kwargs)
def _insert(self):
pass
def _update(self):
pass
def get_kv(self):
data = {}
for k in self.fields.keys():
data[k] = self.__dict__[k]
return data
def __setattr__(self, key, value):
self.__dict__[key] = value
def select(self):
pass
def save(self):
values = []
sql = 'INSERT INTO {table} ({keys}) VALUE ({values})'.format(table=Model.__table__, keys=','.join(self.get_kv().keys()),
values=','.join(self.get_kv().values()))
print(sql)
class Database:
conn = None
@staticmethod
def connect():
pass
@staticmethod
def get_conn():
pass
@staticmethod
def execute():
pass
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tinus.views.home'),
url(r'^history/$', 'tinus.views.history'),
url(r'^logout$', 'tinus.views.logout'),
url(r'^analysis', 'tinus.views.analysis'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'tpl/login.html'}),
url(r'^addbill$', 'tinus.views.add_bill', name='add bill'),
url(r'^removebill$', 'tinus.views.remove_bill', name='remove bill'),
url(r'^chooseuser$', 'tinus.views.choose_user', name='choose user'),
url(r'^admin/', include(admin.site.urls)),
) |
import numpy as np
import scipy as sp
import scipy.interpolate
import math
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import json
import simulation
import plot_figure
import temperature
#parameters
N,max_t,Q_deploy,radius=int(1E5),700,0,0.35
a_drop, v_drop = 150000, 0 # Second stage parameters
m_stage = 11
# ------------------------------ main ----------------------------------
#whether to rerun simulation
#simulation.dump_simulation_data(N,max_t,Q_deploy,radius, m_stage, a_drop, v_drop)
alt, vel, mach, acc, drag, Q, t_list, key_points=simulation.get_simulation_data()
plot_figure.plot(alt, vel, mach, acc, drag, Q, t_list,key_points['index_1000'],key_points['index_deploy'])
power=temperature.get_power(vel,drag)
temperature.temperature_simulation(t_list,temperature.get_power(vel,drag),radius)
|
import numpy as np
import gym
import copy
import matplotlib.pyplot as plt
from matplotlib import animation
from collections import defaultdict
from dps import cfg
from dps.env.basic import game
from dps.env.env import BatchGymEnv
from dps.utils import Param, square_subplots, create_maze
from dps.train import Hook
from dps.utils.tf import FeedforwardCell, MLP, ScopedFunction
from dps.rl.policy import Policy, ProductDist, SigmoidNormal, Softmax
class CollectBase(game.ObjectGame):
agent_spec = Param()
collectable_specs = Param()
obstacle_specs = Param()
step_size = Param()
time_reward = Param()
discrete_actions = Param()
def __init__(self, **kwargs):
self.agent_spec = copy.deepcopy(dict(self.agent_spec))
self.agent_spec['collectable'] = False
self.collectable_specs = copy.deepcopy(list(self.collectable_specs))
self.collectable_specs = [dict(cs) for cs in self.collectable_specs]
for spec in self.collectable_specs:
spec['collectable'] = True
self.obstacle_specs = copy.deepcopy(list(self.obstacle_specs))
self.obstacle_specs = [dict(os) for os in self.obstacle_specs]
for spec in self.obstacle_specs:
spec['collectable'] = False
self.entity_specs = [self.agent_spec] + self.collectable_specs + self.obstacle_specs
for i, es in enumerate(self.entity_specs):
es['idx'] = i
if self.discrete_actions:
action_space = gym.spaces.MultiDiscrete([8, 3])
else:
action_space = gym.spaces.Box(low=np.array([-1, -1, 0]), high=np.array([1, 1, 1]), dtype=np.float32)
super(CollectBase, self).__init__(
action_space=action_space,
reward_range=(-10, 10),
entity_feature_dim=len(self.entity_specs),
**kwargs)
def move_entities(self, action):
if self.discrete_actions:
angle_idx, magnitude_idx = action
angle = angle_idx * 2 * np.pi / 8
magnitude = [0.1, 0.5, 1.0][int(magnitude_idx)]
y = self.step_size * magnitude * np.sin(angle)
x = self.step_size * magnitude * np.cos(angle)
else:
y, x, magnitude = action
y = np.clip(y, -1, 1)
x = np.clip(x, -1, 1)
magnitude = np.clip(magnitude, 0, 1)
norm = np.sqrt(x**2 + y**2)
if norm > 1e-6:
y = self.step_size * magnitude * y / norm
x = self.step_size * magnitude * x / norm
else:
y = x = 0
return self._move_entity(self.entities[0], y, x)
def resolve_collision(self, mover, other):
""" Return (kill, stop, reward) """
if isinstance(other, str): # wall
return (False, True, 0)
else:
if other.collectable:
if self.time_reward:
return (True, False, 0)
else:
return (True, False, 1/self.n_collectables)
else:
return (False, True, 0)
def get_entity_features(self, entity):
return [int(entity.idx == i) for i in range(len(self.entity_specs))]
def compute_reward(self):
if self.time_reward:
return sum([-1 for entity in self.entities if entity.collectable and entity.alive]) / (self.n_collectables * cfg.T)
else:
return 0.0
class CollectA(CollectBase):
n_collectables = Param()
n_obstacles = Param()
max_overlap = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables + self.n_obstacles
assert self.n_collectables > 0
super(CollectA, self).__init__(**kwargs)
def setup_field(self):
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=self.n_obstacles, replace=True))
specs = [self.agent_spec] + collectable_specs + obstacle_specs
shapes = [spec['shape'] for spec in specs]
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
def build_env():
gym_env = CollectA()
return BatchGymEnv(gym_env=gym_env)
class CollectB(CollectBase):
""" Objects placed in a circle concentric with the image, and only one collectable. """
angle_sep = Param()
n_dirs = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 2*self.n_dirs + 1
self.n_collectables = 1
super(CollectB, self).__init__(**kwargs)
def setup_field(self):
assert self.image_shape[0] == self.image_shape[1]
start_angle = np.pi/4
radius = int(np.floor(self.image_shape[0] / 2 - self.agent_spec['shape'][0]/2))
center = (self.image_shape[0]/2, self.image_shape[1]/2)
centers = []
for i in range(self.n_dirs):
angle = start_angle + 2*np.pi * i / self.n_dirs
angle1 = angle - self.angle_sep
angle2 = angle + self.angle_sep
for angle in [angle1, angle2]:
y = radius * np.sin(angle) + center[0]
x = radius * np.cos(angle) + center[1]
centers.append((y, x))
collectable_spec = np.random.choice(self.collectable_specs)
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=2*self.n_dirs-1, replace=True))
object_specs = np.random.permutation([collectable_spec] + obstacle_specs)
agent = game.Entity(**self.agent_spec)
agent.center = center
objects = [game.Entity(**spec) for spec in object_specs]
for center, obj in zip(centers, objects):
obj.center = center
return [agent, *objects]
class CollectC(CollectBase):
""" No obstacles. """
max_overlap = Param()
n_collectables = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables
super(CollectC, self).__init__(**kwargs)
def setup_field(self):
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
specs = [self.agent_spec] + collectable_specs
shapes = [spec['shape'] for spec in specs]
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
class CollectD(CollectBase):
""" Same as CollectA, but obstacles are arranged into a maze. """
n_collectables = Param()
n_obstacles = Param()
max_overlap = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables + self.n_obstacles
assert self.n_collectables > 0
super(CollectD, self).__init__(**kwargs)
def setup_field(self):
agent_shape = self.agent_spec['shape']
maze_shape = (
int(np.ceil(self.image_shape[0] / agent_shape[0])),
int(np.ceil(self.image_shape[1] / agent_shape[1])))
maze = create_maze(maze_shape)
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=self.n_obstacles, replace=True))
specs = [self.agent_spec] + collectable_specs + obstacle_specs
shapes = [spec['shape'] for spec in specs]
maze = maze[None, :, :]
masks = np.concatenate(
[np.tile(1-maze, (self.n_collectables+1, 1, 1)),
np.tile(maze, (self.n_obstacles, 1, 1))],
axis=0)
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap, masks=masks)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
class RolloutsHook(Hook):
def __init__(self, env_class, plot_step=None, env_kwargs=None, **kwargs):
self.env_class = env_class
self.env_kwargs = env_kwargs or {}
kwarg_string = "_".join("{}={}".format(k, v) for k, v in self.env_kwargs.items())
name = env_class.__name__ + ("_" + kwarg_string if kwarg_string else "")
self.name = name.replace(" ", "_")
self.plot_step = plot_step
super(RolloutsHook, self).__init__(final=True, **kwargs)
def start_stage(self, training_loop, updater, stage_idx):
gym_env = self.env_class(**self.env_kwargs)
self.env = BatchGymEnv(gym_env=gym_env)
def plot(self, updater, rollouts):
plt.ion()
if updater.env.gym_env.image_obs:
obs = rollouts.obs
else:
obs = rollouts.image
fig, axes = square_subplots(rollouts.batch_size, figsize=(5, 5))
plt.subplots_adjust(top=0.95, bottom=0, left=0, right=1, wspace=0.1, hspace=0.1)
images = []
for i, ax in enumerate(axes.flatten()):
ax.set_aspect("equal")
ax.set_axis_off()
image = ax.imshow(np.zeros(obs.shape[2:]))
images.append(image)
def animate(t):
for i in range(rollouts.batch_size):
images[i].set_array(obs[t, i, :, :, :])
anim = animation.FuncAnimation(fig, animate, frames=len(rollouts), interval=500)
path = updater.exp_dir.path_for('plots', '{}_animation.gif'.format(self.name))
anim.save(path, writer='imagemagick')
plt.close(fig)
def step(self, training_loop, updater, step_idx=None):
n_rollouts = cfg.n_val_rollouts
batch_size = cfg.batch_size
record = defaultdict(float)
n_iters = int(np.ceil(n_rollouts / batch_size))
for it in range(n_iters):
n_remaining = n_rollouts - it * batch_size
_batch_size = min(batch_size, n_remaining)
for learner in updater.learners:
with learner:
rollouts = self.env.do_rollouts(policy=learner.pi, n_rollouts=_batch_size, T=cfg.T, mode='val')
key = "{}-reward_per_ep".format(self.name)
record[key] += _batch_size * rollouts.rewards.sum(0).mean()
if it == 0 and self.plot_step and (step_idx is None or step_idx % self.plot_step == 0):
self.plot(updater, rollouts)
return dict(val={k: v / n_rollouts for k, v in record.items()})
colors = "red green blue"
agent_spec = dict(appearance="star", color="black", z=100, shape=(10, 10))
entity_size = (10, 10)
noise_res = getattr(cfg, 'noise_res', None)
collectable_specs = [dict(appearance="x", color=colors)]
obstacle_specs = [
dict(appearance="circle", color=colors),
dict(appearance="ud_triangle", color=colors),
dict(appearance="triangle", color=colors),
dict(appearance="plus", color=colors),
dict(appearance="diamond", color=colors),
]
for es in collectable_specs + obstacle_specs:
es.update(shape=entity_size, noise_res=noise_res)
hook_step = 1000
hook_kwargs = dict(n=hook_step, plot_step=hook_step, initial=True)
# env config
config = game.config.copy(
env_name="collect",
n_collectables=5,
n_obstacles=5,
agent_spec=agent_spec,
collectable_specs=collectable_specs,
obstacle_specs=obstacle_specs,
build_env=build_env,
image_shape=(48, 48), background_colour="white", max_overlap=0.25, step_size=14,
hooks=[
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=4), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=5), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=6), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=7), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=8), **hook_kwargs),
RolloutsHook(env_class=CollectC, env_kwargs=dict(n_collectables=5), **hook_kwargs),
RolloutsHook(env_class=CollectC, env_kwargs=dict(n_collectables=10), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=6, n_obstacles=6), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=7, n_obstacles=7), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=8, n_obstacles=8), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=9, n_obstacles=9), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=10, n_obstacles=10), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=5, n_obstacles=5), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=6, n_obstacles=6), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=7, n_obstacles=7), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=8, n_obstacles=8), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=9, n_obstacles=9), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=10, n_obstacles=10), **hook_kwargs),
],
angle_sep=np.pi/16,
discrete_actions=True,
time_reward=False,
eval_step=1000,
display_step=1000,
)
class Backbone(ScopedFunction):
backbone = None
mlp = None
def _call(self, inp, output_size, is_training):
if self.backbone is None:
self.backbone = Backbone()
if self.mlp is None:
self.mlp = MLP([100, 100])
outp = self.backbone(inp, 0, is_training)
outp = self.mlp(outp, output_size, is_training)
return outp
def build_attentional_relation_network(output_size, name):
from dps.utils.tf import AttentionalRelationNetwork
ff = AttentionalRelationNetwork(n_repeats=2, scope="collection_controller")
return FeedforwardCell(ff, output_size, name=name)
def build_object_network_controller(output_size, name):
from dps.utils.tf import ObjectNetwork
ff = ObjectNetwork(n_repeats=1, scope="collection_controller")
return FeedforwardCell(ff, output_size, name=name)
def build_controller(output_size, name):
if cfg.controller_type == "arn":
return build_attentional_relation_network(output_size, name)
elif cfg.controller_type == "obj":
return build_object_network_controller(output_size, name)
else:
raise Exception("Unknown controller_type: {}".format(cfg.controller_type))
def build_policy(env, **kwargs):
if cfg.discrete_actions:
action_selection = ProductDist(
Softmax(8, one_hot=False), Softmax(3, one_hot=False))
else:
action_selection = ProductDist(
SigmoidNormal(-1, 1, explore=cfg.explore),
SigmoidNormal(-1, 1, explore=cfg.explore),
SigmoidNormal(0, 1, explore=cfg.explore),)
return Policy(action_selection, env.obs_shape, **kwargs)
# alg config
config.update(
build_controller=build_controller,
controller_type="obj",
d=128,
# d=256,
layer_norm=True,
symmetric_op="max",
use_mask=True,
# For obj
# build_on_input_network=lambda scope: MLP([128, 128], scope=scope),
# build_on_object_network=lambda scope: MLP([128, 128], scope=scope),
# build_on_output_network=lambda scope: MLP([128, 128, 128], scope=scope),
build_on_input_network=lambda scope: MLP([128], scope=scope),
build_on_object_network=lambda scope: MLP([128], scope=scope),
build_on_output_network=lambda scope: MLP([128, 128], scope=scope),
# For arn
build_arn_network=lambda scope: MLP([128, 128], scope=scope),
build_arn_object_network=lambda scope: MLP([128, 128], scope=scope),
n_heads=1,
exploration_schedule=1.0,
val_exploration_schedule=1.0,
build_policy=build_policy,
)
if __name__ == "__main__":
with config:
env = build_env().gym_env
agent = game.RandomAgent(env.action_space)
game.do_rollouts(
env, agent, render=True,
callback=lambda action, reward, **kwargs: print("Action: {}, Reward: {}".format(action, reward)))
|
from setuptools import setup
from pycdek import __version__
setup(
name='pycdek',
url='http://github.com/onrik/pycdek/',
download_url='https://github.com/onrik/pycdek/tarball/master',
version=__version__,
description='Client for CDEK API',
author='Andrey',
author_email='and@rey.im',
license='MIT',
packages=['pycdek'],
package_data={'pycdek': [
'pycdek/*.py',
]},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
) |
def count_characters(words):
dic={}
for word in words:
cnt=[0]*26
for c in word:
cnt[ord(c)-ord('a')]+=1
dic[word]=cnt
return dic
#"words" and "charactors" are consist of lowercases.
def main(words,characters):
print(characters)
char_count=[characters.count(chr(ord('a')+i)) for i in range(26)]
char_count_dict=count_characters(words)
ans=[]
for k,v in char_count_dict.items():
for i in range(26):
if v[i]>char_count[i]:
break
if i==26-1:
ans.append(k)
return ans
if __name__ == '__main__':
path="./dictionary.words"
f=open(path)
words=[line.strip().lower() for line in f.readlines()]
charactors=input().strip().lower()
print( main(words,charactors) )
|
from kafka import KafkaConsumer
class KafkaConsumers(object):
def __init__(self, kafka_host, kafka_port, kafka_topic, group_id):
self._host = kafka_host
self._port = kafka_port
self._topic = kafka_topic
self._group_id = group_id
self._consumer = None
try:
self._consumer = KafkaConsumer(bootstrap_servers="%s:%s" % (self._host, self._port),
group_id=self._group_id)
self._consumer.subscribe(self._topic)
except Exception as e:
print(e)
def consumer_pull_data(self):
"""
:return:
"""
try:
msg = self._consumer.poll(timeout_ms=2000, max_records=1)
message = list(msg.values())
if len(message) > 0:
message = message[0][0]
return message
except Exception as e:
print(e)
def main():
consumer = KafkaConsumers(kafka_host="10.242.111.211", kafka_port=9092, kafka_topic="test", group_id="mytest")
while True:
consumer.consumer_pull_data()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from useless import interval
import time
__author__ = 'Ronie Martinez'
@interval(500)
def my_function():
print "Hello world!"
my_function() # Caution! Non-blocking.
time.sleep(10) # Write some blocking code
|
#Method 1
def cubeSum0(n):
return (n**2)*((n+1)**2)//4
#Method 2
def cubeSum1(n):
s=0
for i in range(n):
s=s+i**3
return s
print(cubeSum0(int(input())))
#print(cubeSum1(int(input())))
|
import pandas as pd
import numpy as np
import cv2
import os
import scipy
from sklearn.model_selection import train_test_split
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
#defining file path
DA = os.listdir("C:\datasets\data\project\daily activity")
fall = os.listdir("C:\datasets\data\project\FA cam1")
filepath="C:\datasets\data\project\daily activity"
filepath2="C:\datasets\data\project\FA cam1"
images = []
label = []
#loading daily activity images
for i in DA:
image = scipy.misc.imread( os.path.join(filepath,i))
images.append(image)
label.append(0) #for daily activity images
#loading fall activity images
for i in fall:
image = scipy.misc.imread( os.path.join(filepath2,i))
images.append(image)
label.append(1) #for fall activity images
for i in range(0,118):
images[i] = cv2.resize(np.array(images[i]),(224,224))
images=np.array(images)
label=np.array(label)
label = to_categorical(label)
#Splitting data into training and testing set
Xtrain, Xtest, ytrain, ytest = train_test_split(images, label, test_size=0.2)
# (3) Create a sequential model
model = Sequential()
model.add(keras.layers.InputLayer(input_shape = (224,224,3)) )
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(224,224,3), kernel_size=(11,11), strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# Passing it to a dense layer
model.add(Flatten())
# 1st Dense Layer
model.add(Dense(4096, input_shape=(224*224*3,)))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Dense Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# Output Layer
model.add(Dense(2))
model.add(Activation('softmax'))
model.summary()
#compiling
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# data augumentation
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
datagen.fit(Xtrain)
model.fit_generator(datagen.flow(Xtrain, ytrain, batch_size=10),
steps_per_epoch=len(Xtrain) // 10, epochs=6)
#prediction on test split .
scores = model.predict(Xtest,verbose=1)
score = pd.DataFrame(scores)
nn = score.apply(np.argmax,axis=1)
ytest = pd.DataFrame(ytest)
n = ytest.apply(np.argmax,axis=1)
error = pd.concat([n,nn],axis=1)
error['error'] = error[0] - error[1]
efficiency_CNN = error.ix[error['error']==0,:].shape[0]/error.shape[0]
|
print("--------hellp.py--------")
temp=input("请输入您的姓名:")
print("你好,"+temp+"!")
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/8 9:06
# @Author : LQX
# @Email : qixuan.lqx@qq.com
# @File : ordinalloss.py
# @Software: PyCharm
import torch as t
from torch import nn, autograd
from torch.nn import Parameter as P
from torch.nn.modules.loss import _Loss, _WeightedLoss
from torch.nn import functional as F
import math
import ipdb
class OrdinalLoss(_WeightedLoss):
def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):
super(OrdinalLoss, self).__init__(weight, size_average, reduce, reduction)
self.w = P(t.Tensor(2))
def forward(self, input: t.Tensor, target: t.Tensor):
predict = t.argmax(input, dim=-1)
correct_idx = t.nonzero(predict == target).squeeze_()
wrong_idx = t.nonzero(predict != target).squeeze_()
# compare in wrong_idx
w_score = 0
for i in range(wrong_idx.shape[0]):
for j in range(i + 1, wrong_idx.shape[0]):
target_ordinal = target[wrong_idx[j]] - target[wrong_idx[i]]
predict_ordinal = predict[wrong_idx[j]] - predict[wrong_idx[i]]
w_score += target_ordinal * predict_ordinal
# compare between correct_idx and wrong_idx
b_score = 0
for i in range(correct_idx.shape[0]):
for j in range(wrong_idx.shape[0]):
target_ordinal = target[correct_idx[j]] - target[wrong_idx[i]]
predict_ordinal = predict[correct_idx[j]] - predict[wrong_idx[i]]
b_score += target_ordinal * predict_ordinal
score = w_score + b_score
return math.exp(0 - self.w * score) + F.mseloss(target, input)
if __name__ == "__main__":
loss_fn = OrdinalLoss()
x = t.ones((5, 3), requires_grad=True)
x = x.view((-1, 1))
w = t.randn(2, 15)
b = t.rand(2, 1)
y = t.mm(w, x) + b
print(y.requires_grad)
y.requires_grad_()
print(y.requires_grad)
print(y)
print(w, b)
print(y.grad)
y.backward()
print(y.grad)
print(w, b)
|
from numpy import*
from scipy import optimize
def polynomial(x,w):
answer=0.0
for i in range(w.size):
answer+=w[i]*pow(x,i)
return answer
def leastSqure(xone,y,w_init):
X=mat(zeros((xone.size, w_init.size)))
for i in range(xone.size):
for j in range(w_init.size):
X[i,j]=pow(xone[i],j)
#w=(((X.T)*X).I)*(X.T)*y
w=zeros(w_init.size)
for i in range(w_init.size):
w[i]=dot(dot(dot(X.T,X).I,X.T),y)[0,i]
#w=X.T*X
print("w: ",w)
return w
|
from rest_framework import viewsets, permissions
from .models import Location, User, Deliveryman, Delivery
from .serializer import LocationSerializer, UserSerializer, DeliverymanSerializer, DeliverySerializer
class LocationViewSet(viewsets.ModelViewSet):
queryset = Location.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = LocationSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = UserSerializer
class DeliverymanViewSet(viewsets.ModelViewSet):
queryset = Deliveryman.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = DeliverymanSerializer
class DeliveryViewSet(viewsets.ModelViewSet):
queryset = Delivery.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = DeliverySerializer
|
import datetime
from core.mixins import ReadFromCSVMixin
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.text import slugify
from organisations.models import (
Organisation,
OrganisationDivision,
OrganisationDivisionSet,
)
class Command(ReadFromCSVMixin, BaseCommand):
"""
Bespoke import command for importing NI District Electoral Areas
from a custom CSV assembled from
http://www.legislation.gov.uk/uksi/2014/270/made
https://github.com/mysociety/mapit/blob/master/mapit_gb/data/ni-electoral-areas-2015.csv
https://www.registers.service.gov.uk/registers/statistical-geography-local-government-district-nir
"""
division_sets = {}
divisions = []
start_date = "2014-05-22"
def handle(self, *args, **options):
csv_data = self.load_data(options)
# first pass over the csv builds the division sets
self.create_division_sets(csv_data)
# second pass over the csv builds the divisions
self.create_divisions(csv_data)
# now we've created all the objects,
# save them all inside a transaction
self.save_all()
def get_org_from_line(self, line):
return Organisation.objects.all().get_by_date(
organisation_type="local-authority",
official_identifier=line["District Register Code"],
date=datetime.datetime.strptime("2015-04-01", "%Y-%m-%d").date(),
)
def create_division_sets(self, csv_data):
for line in csv_data:
org = self.get_org_from_line(line)
self.division_sets[
org.official_identifier
] = OrganisationDivisionSet(
organisation=org,
start_date=self.start_date,
end_date=None,
legislation_url="http://www.legislation.gov.uk/uksi/2014/270/made",
short_title="The District Electoral Areas (Northern Ireland) Order 2014",
notes="",
consultation_url="",
)
def create_divisions(self, csv_data):
for line in csv_data:
org = self.get_org_from_line(line)
id_ = "gss:{}".format(line["District Electoral Area GSS code"])
div_set = self.division_sets[org.official_identifier]
div = OrganisationDivision(
official_identifier=id_,
temp_id="",
divisionset=div_set,
name=line["District Electoral Area"],
slug=slugify(line["District Electoral Area"]),
division_type="LGE",
seats_total=line["Number of councillors"],
)
self.divisions.append(div)
@transaction.atomic
def save_all(self):
for record in self.divisions:
record.divisionset.save()
# hack: see https://code.djangoproject.com/ticket/29085
# This should fix it when we use Django>=3.03:
# https://github.com/django/django/commit/519016e5f25d7c0a040015724f9920581551cab0
record.divisionset = record.divisionset
record.save()
# https://github.com/django/django/commit/519016e5f25d7c0a040015724f9920581551cab0
record.divisionset = record.divisionset
record.save()
|
from waitlist.permissions import perm_manager
from ... import roles_changed_sig, role_created_sig, role_removed_sig
from typing import Iterable, Sequence, Any
from waitlist.storage.database import AccountNote, Role, RoleChangeEntry,\
Account
from waitlist.base import db
from sqlalchemy.sql.expression import or_
from waitlist.utility.constants import account_notes
perm_manager.define_permission('trainee')
perm_trainee = perm_manager.get_permission('trainee')
def on_roles_changed_history_entry(_, to_id: int, by_id: int,
added_roles: Sequence[str],
removed_roles: Sequence[str],
note: str) -> None:
if note == '':
note = None
if len(added_roles) <= 0 and len(removed_roles) <= 0 and note == "":
return
history_entry = AccountNote(accountID=to_id, byAccountID=by_id, note=note,
type=account_notes.TYPE_ACCOUNT_ROLES_CHANGED)
role_changes_dict = dict()
if len(added_roles) > 0:
db_roles = db.session.query(Role).filter(
or_(Role.name == name for name in added_roles)).all()
role_changes_dict['added'] = []
for role in db_roles:
# get role from db
role_change = RoleChangeEntry(added=True, role=role)
history_entry.role_changes.append(role_change)
role_changes_dict['added'].append(role.displayName)
if len(removed_roles) > 0:
db_roles = db.session.query(Role).filter(
or_(Role.name == name for name in removed_roles)).all()
role_changes_dict['removed'] = []
for role in db_roles:
role_change = RoleChangeEntry(added=False, role=role)
history_entry.role_changes.append(role_change)
role_changes_dict['removed'].append(role.displayName)
if len(role_changes_dict) > 0:
history_entry.jsonPayload = role_changes_dict
db.session.add(history_entry)
db.session.commit()
# handler to reset welcome mail status
def on_roles_changed_check_welcome_mail(_: Any, to_id: int, by_id: int,
added_roles: Iterable[str],
removed_roles: Iterable[str], note: str) -> None:
"""
Handler to reset welcome mail status.
"""
for role in added_roles:
for need in perm_trainee.needs:
if role == need.value:
acc = db.session.query(Account).get(to_id)
acc.had_welcome_mail = False
return
def on_role_created_history_entry(_: Any, by_id: int, role_name: str,
role_display_name: str) -> None:
if by_id is None or role_name is None or role_display_name is None:
return
note: AccountNote = AccountNote(accountID=by_id, byAccountID=by_id,
restriction_level=1000,
type=account_notes.TYPE_ROLE_CREATED)
note.jsonPayload = {
'role_name': role_name,
'role_display_name': role_display_name
}
db.session.add(note)
db.session.commit()
def on_role_removed_history_entry(_: Any, by_id: int, role_name: str,
role_display_name: str) -> None:
if by_id is None or role_name is None or role_display_name is None:
return
note: AccountNote = AccountNote(accountID=by_id, byAccountID=by_id,
restriction_level=1000,
type=account_notes.TYPE_ROLE_REMOVED)
note.jsonPayload = {
'role_name': role_name,
'role_display_name': role_display_name
}
db.session.add(note)
db.session.commit()
def connect() -> None:
roles_changed_sig.connect(on_roles_changed_history_entry)
roles_changed_sig.connect(on_roles_changed_check_welcome_mail)
role_created_sig.connect(on_role_created_history_entry)
role_removed_sig.connect(on_role_removed_history_entry)
|
from django.contrib import admin
from .models import BlogCategory, BlogPost
admin.site.register(BlogCategory)
admin.site.register(BlogPost)
|
# BSD 3-Clause License
#
# Adapted from ValentyUSB.
#
# Copyright (c) 2020, Great Scott Gadgets <ktemkin@greatscottgadgets.com>
# Copyright (c) 2018, Luke Valenty
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from amaranth import Elaboratable, Module, Signal, Cat, Const
from amaranth.lib.cdc import FFSynchronizer
from amaranth.hdl.xfrm import ResetInserter
class TxShifter(Elaboratable):
"""Transmit Shifter
TxShifter accepts parallel data and shifts it out serially.
Parameters
----------
Parameters are passed in via the constructor.
width : int
Width of the data to be shifted.
Input Ports
-----------
Input ports are passed in via the constructor.
i_data: Signal(width)
Data to be transmitted.
i_enable: Signal(), input
When asserted, shifting will be allowed; otherwise, the shifter will be stalled.
Output Ports
------------
Output ports are data members of the module. All outputs are flopped.
o_data : Signal()
Serial data output.
o_empty : Signal()
Asserted the cycle before the shifter loads in more i_data.
o_get : Signal()
Asserted the cycle after the shifter loads in i_data.
"""
def __init__(self, width):
self._width = width
#
# I/O Port
#
self.i_data = Signal(width)
self.i_enable = Signal()
self.i_clear = Signal()
self.o_get = Signal()
self.o_empty = Signal()
self.o_data = Signal()
def elaborate(self, platform):
m = Module()
shifter = Signal(self._width)
pos = Signal(self._width, reset=0b1)
with m.If(self.i_enable):
empty = Signal()
m.d.usb += [
pos.eq(pos >> 1),
shifter.eq(shifter >> 1),
self.o_get.eq(empty),
]
with m.If(empty):
m.d.usb += [
shifter.eq(self.i_data),
pos.eq(1 << (self._width-1)),
]
with m.If(self.i_clear):
m.d.usb += [
shifter.eq(0),
pos.eq(1)
]
m.d.comb += [
empty.eq(pos[0]),
self.o_empty.eq(empty),
self.o_data.eq(shifter[0]),
]
return m
class TxNRZIEncoder(Elaboratable):
"""
NRZI Encode
In order to ensure there are enough bit transitions for a receiver to recover
the clock usb uses NRZI encoding. This module processes the incoming
dj, dk, se0, and valid signals and decodes them to data values. It
also pipelines the se0 signal and passes it through unmodified.
https://www.pjrc.com/teensy/beta/usb20.pdf, USB2 Spec, 7.1.8
https://en.wikipedia.org/wiki/Non-return-to-zero
Clock Domain
------------
usb_48 : 48MHz
Input Ports
-----------
i_valid : Signal()
Qualifies oe, data, and se0.
i_oe : Signal()
Indicates that the transmit pipeline should be driving USB.
i_data : Signal()
Data bit to be transmitted on USB. Qualified by o_valid.
i_se0 : Signal()
Overrides value of o_data when asserted and indicates that SE0 state
should be asserted on USB. Qualified by o_valid.
Output Ports
------------
o_usbp : Signal()
Raw value of USB+ line.
o_usbn : Signal()
Raw value of USB- line.
o_oe : Signal()
When asserted it indicates that the tx pipeline should be driving USB.
"""
def __init__(self):
self.i_valid = Signal()
self.i_oe = Signal()
self.i_data = Signal()
# flop all outputs
self.o_usbp = Signal()
self.o_usbn = Signal()
self.o_oe = Signal()
def elaborate(self, platform):
m = Module()
usbp = Signal()
usbn = Signal()
oe = Signal()
# wait for new packet to start
with m.FSM(domain="usb_io"):
with m.State("IDLE"):
m.d.comb += [
usbp.eq(1),
usbn.eq(0),
oe.eq(0),
]
with m.If(self.i_valid & self.i_oe):
# first bit of sync always forces a transition, we idle
# in J so the first output bit is K.
m.next = "DK"
# the output line is in state J
with m.State("DJ"):
m.d.comb += [
usbp.eq(1),
usbn.eq(0),
oe.eq(1),
]
with m.If(self.i_valid):
with m.If(~self.i_oe):
m.next = "SE0A"
with m.Elif(self.i_data):
m.next = "DJ"
with m.Else():
m.next = "DK"
# the output line is in state K
with m.State("DK"):
m.d.comb += [
usbp.eq(0),
usbn.eq(1),
oe.eq(1),
]
with m.If(self.i_valid):
with m.If(~self.i_oe):
m.next = "SE0A"
with m.Elif(self.i_data):
m.next = "DK"
with m.Else():
m.next = "DJ"
# first bit of the SE0 state
with m.State("SE0A"):
m.d.comb += [
usbp.eq(0),
usbn.eq(0),
oe.eq(1),
]
with m.If(self.i_valid):
m.next = "SE0B"
# second bit of the SE0 state
with m.State("SE0B"):
m.d.comb += [
usbp.eq(0),
usbn.eq(0),
oe.eq(1),
]
with m.If(self.i_valid):
m.next = "EOPJ"
# drive the bus back to J before relinquishing control
with m.State("EOPJ"):
m.d.comb += [
usbp.eq(1),
usbn.eq(0),
oe.eq(1),
]
with m.If(self.i_valid):
m.next = "IDLE"
m.d.usb_io += [
self.o_oe.eq(oe),
self.o_usbp.eq(usbp),
self.o_usbn.eq(usbn),
]
return m
class TxBitstuffer(Elaboratable):
"""
Bitstuff Insertion
Long sequences of 1's would cause the receiver to lose it's lock on the
transmitter's clock. USB solves this with bitstuffing. A '0' is stuffed
after every 6 consecutive 1's.
The TxBitstuffer is the only component in the transmit pipeline that can
delay transmission of serial data. It is therefore responsible for
generating the bit_strobe signal that keeps the pipe moving forward.
https://www.pjrc.com/teensy/beta/usb20.pdf, USB2 Spec, 7.1.9
https://en.wikipedia.org/wiki/Bit_stuffing
Clock Domain
------------
usb_12 : 48MHz
Input Ports
------------
i_data : Signal()
Data bit to be transmitted on USB.
Output Ports
------------
o_data : Signal()
Data bit to be transmitted on USB.
o_stall : Signal()
Used to apply backpressure on the tx pipeline.
"""
def __init__(self):
self.i_data = Signal()
self.o_stall = Signal()
self.o_will_stall = Signal()
self.o_data = Signal()
def elaborate(self, platform):
m = Module()
stuff_bit = Signal()
with m.FSM(domain="usb"):
for i in range(5):
with m.State(f"D{i}"):
# Receiving '1' increments the bitstuff counter.
with m.If(self.i_data):
m.next = f"D{i+1}"
# Receiving '0' resets the bitstuff counter.
with m.Else():
m.next = "D0"
with m.State("D5"):
with m.If(self.i_data):
# There's a '1', so indicate we might stall on the next loop.
m.d.comb += self.o_will_stall.eq(1),
m.next = "D6"
with m.Else():
m.next = "D0"
with m.State("D6"):
m.d.comb += stuff_bit.eq(1)
m.next = "D0"
m.d.comb += [
self.o_stall.eq(stuff_bit)
]
# flop outputs
with m.If(stuff_bit):
m.d.usb += self.o_data.eq(0),
with m.Else():
m.d.usb += self.o_data.eq(self.i_data)
return m
class TxPipeline(Elaboratable):
def __init__(self):
self.i_bit_strobe = Signal()
self.i_data_payload = Signal(8)
self.o_data_strobe = Signal()
self.i_oe = Signal()
self.o_usbp = Signal()
self.o_usbn = Signal()
self.o_oe = Signal()
self.o_pkt_end = Signal()
self.fit_dat = Signal()
self.fit_oe = Signal()
def elaborate(self, platform):
m = Module()
sync_pulse = Signal(8)
da_reset_shifter = Signal()
da_reset_bitstuff = Signal() # Need to reset the bit stuffer 1 cycle after the shifter.
stall = Signal()
# These signals are set during the sync pulse
sp_reset_bitstuff = Signal()
sp_reset_shifter = Signal()
sp_bit = Signal()
sp_o_data_strobe = Signal()
# 12MHz domain
bitstuff_valid_data = Signal()
# Keep a Gray counter around to smoothly transition between states
state_gray = Signal(2)
state_data = Signal()
state_sync = Signal()
#
# Transmit gearing.
#
m.submodules.shifter = shifter = TxShifter(width=8)
m.d.comb += [
shifter.i_data .eq(self.i_data_payload),
shifter.i_enable .eq(~stall),
shifter.i_clear .eq(da_reset_shifter | sp_reset_shifter)
]
#
# Bit-stuffing and NRZI.
#
bitstuff = ResetInserter(da_reset_bitstuff)(TxBitstuffer())
m.submodules.bitstuff = bitstuff
m.submodules.nrzi = nrzi = TxNRZIEncoder()
#
# Transmit controller.
#
m.d.comb += [
# Send a data strobe when we're two bits from the end of the sync pulse.
# This is because the pipeline takes two bit times, and we want to ensure the pipeline
# has spooled up enough by the time we're there.
bitstuff.i_data.eq(shifter.o_data),
stall.eq(bitstuff.o_stall),
sp_bit.eq(sync_pulse[0]),
sp_reset_bitstuff.eq(sync_pulse[0]),
# The shifter has one clock cycle of latency, so reset it
# one cycle before the end of the sync byte.
sp_reset_shifter.eq(sync_pulse[1]),
sp_o_data_strobe.eq(sync_pulse[5]),
state_data.eq(state_gray[0] & state_gray[1]),
state_sync.eq(state_gray[0] & ~state_gray[1]),
self.fit_oe.eq(state_data | state_sync),
self.fit_dat.eq((state_data & shifter.o_data & ~bitstuff.o_stall) | sp_bit),
self.o_data_strobe.eq(state_data & shifter.o_get & ~stall & self.i_oe),
]
# If we reset the shifter, then o_empty will go high on the next cycle.
#
m.d.usb += [
# If the shifter runs out of data, percolate the "reset" signal to the
# shifter, and then down to the bitstuffer.
# da_reset_shifter.eq(~stall & shifter.o_empty & ~da_stalled_reset),
# da_stalled_reset.eq(da_reset_shifter),
# da_reset_bitstuff.eq(~stall & da_reset_shifter),
bitstuff_valid_data.eq(~stall & shifter.o_get & self.i_oe),
]
with m.FSM(domain="usb"):
with m.State('IDLE'):
with m.If(self.i_oe):
m.d.usb += [
sync_pulse.eq(1 << 7),
state_gray.eq(0b01)
]
m.next = "SEND_SYNC"
with m.Else():
m.d.usb += state_gray.eq(0b00)
with m.State('SEND_SYNC'):
m.d.usb += sync_pulse.eq(sync_pulse >> 1)
with m.If(sync_pulse[0]):
m.d.usb += state_gray.eq(0b11)
m.next = "SEND_DATA"
with m.Else():
m.d.usb += state_gray.eq(0b01)
with m.State('SEND_DATA'):
with m.If(~self.i_oe & shifter.o_empty & ~bitstuff.o_stall):
with m.If(bitstuff.o_will_stall):
m.next = 'STUFF_LAST_BIT'
with m.Else():
m.d.usb += state_gray.eq(0b10)
m.next = 'IDLE'
with m.Else():
m.d.usb += state_gray.eq(0b11)
with m.State('STUFF_LAST_BIT'):
m.d.usb += state_gray.eq(0b10)
m.next = 'IDLE'
# 48MHz domain
# NRZI encoding
nrzi_dat = Signal()
nrzi_oe = Signal()
# Cross the data from the 12MHz domain to the 48MHz domain
cdc_dat = FFSynchronizer(self.fit_dat, nrzi_dat, o_domain="usb_io", stages=3)
cdc_oe = FFSynchronizer(self.fit_oe, nrzi_oe, o_domain="usb_io", stages=3)
m.submodules += [cdc_dat, cdc_oe]
m.d.comb += [
nrzi.i_valid.eq(self.i_bit_strobe),
nrzi.i_data.eq(nrzi_dat),
nrzi.i_oe.eq(nrzi_oe),
self.o_usbp.eq(nrzi.o_usbp),
self.o_usbn.eq(nrzi.o_usbn),
self.o_oe.eq(nrzi.o_oe),
]
return m
|
#!/usr/bin/env python
import difflib
import sys
import os
gold_config = os.sys.argv[1]
device_config = os.sys.argv[2]
with open('{}'.format(gold_config)) as gold:
lines1 = sorted(gold.readlines())
with open('{}'.format(device_config)) as device:
lines2 = sorted(device.readlines())
for line in sorted(difflib.unified_diff(lines1, lines2, fromfile='file1', tofile='file2', lineterm='', n=0)):
for prefix in ('---', '+++', '@@', '#'):
if line.startswith(prefix):
break
elif line.strip().startswith('-set snmp v3 usm'):
break
elif line.strip().startswith('+set snmp v3 usm'):
break
elif line.strip().startswith('+set'):
line = re.sub('^\+set', 'delete', line, flags=re.M)
elif line.strip().startswith('-set'):
line = re.sub('^\-set', 'set', line, flags=re.M)
else:
print line,
#####
## need to ignore "set version x.x.x" in configs, use another method to verify correct junos version
## need to handle deactivated sections of code... if deactivated, throw warning to intervene
## need to present a message saying "device in compliance statement" and have follow up
## ansible playbook play take that message and stop moving forward
## later, add in collection of device config so that if we ever use apply-groups, we include
## the "display inheritence" extra bits so items are not lost. (if needed??-- jinja will likely present the same data)
## will need to deal with strings being quoted in jinja output vs not quoted in junos config output
## ***WILL also need to deal with delete then set order
## *** also, need to exclude snmp v3 users since the stored key will always be different
#old
# elif line.strip().startswith('+set'):
# line = line.replace('+set','delete')
# elif line.strip().startswith('-set'):
# line = line.replace('-set','set')
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, fcluster
from dtaidistance import dtw, dtw_visualisation, clustering
from dtaidistance import dtw_visualisation as dtwvis
from sklearn.preprocessing import RobustScaler, StandardScaler
import pickle
from src.models.dimred_clustering import *
run_plots = False
class Preprocessing(object):
def __init__(self):
pass
def pivot_table(self, df):
print('Manipulate and pivot table...')
df['sku_key'] = df['sku_key'].astype(int)
df.drop(['sku_department', 'sku_subdepartment',
'sku_category', 'sku_subcategory', 'sku_label'],
axis=1, inplace=True)
product_ts = pd.pivot_table(df, values='sales',
index='sku_key', columns='tran_date')
return product_ts
def sort_nas(self, df):
df['nas'] = df.apply(lambda x: x.isna()).sum(axis=1)
print('There are {} products with less than 50% entries'\
.format(len(df[df['nas'] > len(df.columns)/2])))
self.df_ordered = df.sort_values('nas', ascending=True).drop('nas', axis=1)
return self.df_ordered
def split_nans(self, pivot_table, feature_df):
sorted_df = pivot_table.copy()
sorted_df['nan'] = sorted_df.iloc[:,0].apply(np.isnan)
pivot_no_nans = sorted_df[sorted_df['nan'] == False].drop('nan', axis=1)
pivot_nans = sorted_df[sorted_df['nan'] == True].drop('nan', axis=1)
nans = feature_df.loc[list(pivot_nans.index),:]
no_nans = feature_df.loc[list(pivot_no_nans.index),:]
return pivot_nans, nans, pivot_no_nans, no_nans
def plot_nas(self, df):
plt.figure(figsize=(5,10))
plt.imshow(df, cmap='hot', interpolation='nearest')
plt.show()
def make_diff_length_list(self, df):
self.product_ts_fill = df.fillna(0)
self.product_matrix_fill = self.product_ts_fill.values
self.product_matrix = df.values
product_dict = {}
product_list = []
for i, j in zip(range(len(self.product_matrix)), df.index):
product_dict[j] = self.product_matrix[i][~np.isnan(self.product_matrix[i])]
product_list.append(self.product_matrix[i][~np.isnan(self.product_matrix[i])])
self.product_dict = product_dict
self.product_list = product_list
class DynamicTimeWarping(object):
def __init__(self):
pass
def distance_matrix(self, df):
print('Producing distance matrix...')
ds = dtw.distance_matrix_fast(df)
if run_plots == True:
f, ax = dtw_visualisation.plot_matrix(ds)
f.set_size_inches(12, 12)
return ds
def linkage_tree(self, df):
print('Producing linkage Tree')
self.model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
clusters_dtw = self.model.fit(df)
return clusters_dtw
pickle.dump(self.model, open('model.pkl', 'wb'))
if run_plots == True:
f, ax = self.model.plot()
f.set_size_inches(17, 20)
def cluster(self, model, cluster_nr):
threshold = cluster_nr
clusters = fcluster(model.linkage, threshold,
criterion='inconsistent', depth=10)
return clusters
if run_plots == True:
fig = plt.figure(figsize=(20, 20))
dendrogram(model.linkage, orientation='left', leaf_font_size=15,
color_threshold=100, labels=product_ts.index[:subsample])
plt.show()
def main():
dp = DataPreprocess()
labels, df = dp.read_data('sku_labels.csv', 'extracted_features.csv')
product_sales = pd.read_csv('aggregate_products.csv')
scaler = StandardScaler()
X = dp.scale_data(df, scaler)
names = df.columns
pp = Preprocessing()
product_ts = pp.pivot_table(product_sales)
product_ts = pp.sort_nas(product_ts)
product_ts.to_csv('pivot_products.csv')
if run_plots == True:
pp.plot_nas(product_ts)
pp.make_diff_length_list(product_ts)
dtw = DynamicTimeWarping()
clusters_dtw = dtw.linkage_tree(pp.product_matrix_fill)
clusters = dtw.cluster(dtw.model, 6)
dtw_df = product_ts.reset_index()
dtw_df['cluster'] = clusters
output_df = dtw_df[['sku_key', 'cluster']]
print('Outputting...')
output_df.to_csv('dtw_clusters.csv', index=False)
if __name__ == '__main__':
main()
|
# Generated by Django 2.0 on 2018-01-30 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('karbar', '0002_auto_20180130_1509'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('myuser_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='karbar.MyUser')),
],
bases=('karbar.myuser',),
),
]
|
#이터레이터를 병렬로 처리하면 zip을 사용하자
#1) 리스트 컴프리헨션
names = ['Cecilia', 'Lise', 'Marie']
letters = [len(n) for n in names]
print("c1:",letters)
#2)if
longest_name = None
max_letters = 0
for i in range(len(names)):
count = letters[i]
if count > max_letters:
longest_name = names[i]
max_letters = count
print("c2:", longest_name)
#3) enumerate
for i , name in enumerate(names):
count = letters[i]
if count > max_letters:
longest_name = name
max_letters = count
print("c3:" ,longest_name)
#4) zip => 튜플로 저장 됨.
#제너레이터가 아니기 때문에 반환하는 작업을 해야함
for name, count in zip(names, letters):
if count > max_letters:
longest_name = name
max_letters = count
#5) 추가 한 값이 아닌 리스트 컴프리헨션한 값만 들어온 것을 알 수 있음.
names.append('Rosalind')
for name, count in zip(names, letters):
print("c5:", name) |
from .normalize_adj import NormalizeAdj, normalize_adj
from .add_selfloops import AddSelfloops, EliminateSelfloops, add_selfloops, eliminate_selfloops
from .wavelet import WaveletBasis, wavelet_basis
from .chebyshef import ChebyBasis, cheby_basis
from .neighbor_sampler import NeighborSampler, neighbor_sampler
from .gdc import GDC, gdc
from .augment_adj import augment_adj
from .reshape import SparseReshape, sparse_reshape
from .sample import find_4o_nbrs
from .flip import *
from .ppr import ppr, PPR, topk_ppr_matrix
from .clip import sparse_clip
from .topk import sparse_topk
from .power import adj_power, AdjPower
from .to_dense import to_dense, ToDense
from .to_edge import sparse_adj_to_edge, SparseAdjToEdge
from .to_dense import to_dense, ToDense |
from django.template import loader, Context
from django.template import Template
import os
class AdvertisePlugin():
plugin_path = os.path.dirname(os.path.abspath(__file__))
def header_extra_content(self):
print self.plugin_path
#t = loader.get_template(self.plugin_path + "/templates/advertise/header_advertise.html")
#return t.render(), 3
return Template("<h1>Advertise Plugin Header</h1>").render(Context())
def footer_extra_content(self):
t = loader.get_template("./templates/advertise/header_advertise.html")
return t.render(), 3
|
from flask import Flask, request, g
from pytrends.request import TrendReq
import requests
import json
from datetime import datetime
from time import mktime
import sqlite3
import math
import statistics
app = Flask(__name__)
pytrends = TrendReq()
DATABASE = '/home/luka/Desktop/pineapple/pineapple.sqlite'
def make_dicts(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row))
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE, isolation_level=None)
db.row_factory = make_dicts
return db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/trend', methods=['GET'])
def trend():
pytrends.build_payload(["grippe"], cat=0, timeframe='today 12-m', geo='CH-ZH', gprop='')
response = pytrends.interest_over_time()
return response['grippe'][-1] / 10
@app.route('/weather/<lat>/<lon>', methods=['GET'])
def weather(lat, lon):
weather_forecast = json.loads(requests.get(
'http://api.openweathermap.org/data/2.5/forecast?lat=' + lat + '&lon=' + lon + '&appid=a5a6619306cab76d5164937aa70c2410').content.decode(
"utf-8"))
temperature_avg_pred = (weather_forecast["list"][0]["main"]["temp"] + weather_forecast["list"][1]["main"]["temp"] + \
weather_forecast["list"][2]["main"]["temp"] + weather_forecast["list"][3]["main"]["temp"] + \
weather_forecast["list"][4]["main"]["temp"] + weather_forecast["list"][5]["main"]["temp"] + \
weather_forecast["list"][6]["main"]["temp"] + weather_forecast["list"][7]["main"]["temp"] + \
weather_forecast["list"][8]["main"]["temp"] + weather_forecast["list"][9]["main"]["temp"] + \
weather_forecast["list"][10]["main"]["temp"] + weather_forecast["list"][11]["main"][
"temp"] + \
weather_forecast["list"][12]["main"]["temp"] + weather_forecast["list"][13]["main"][
"temp"] + \
weather_forecast["list"][14]["main"]["temp"] + weather_forecast["list"][15]["main"][
"temp"] + \
weather_forecast["list"][16]["main"]["temp"] + weather_forecast["list"][17]["main"][
"temp"] + \
weather_forecast["list"][18]["main"]["temp"] + weather_forecast["list"][19]["main"][
"temp"] + \
weather_forecast["list"][20]["main"]["temp"] + weather_forecast["list"][21]["main"][
"temp"] + \
weather_forecast["list"][22]["main"]["temp"] + weather_forecast["list"][23]["main"][
"temp"]) / 24
temperature = abs(weather_forecast["list"][0]["main"]["temp"] - temperature_avg_pred)
temperature = min(temperature, 10)
humidity = abs(50 - weather_forecast["list"][0]["main"]["humidity"]) / 5
wind = 10 / (1 + math.exp(-0.15 * (weather_forecast["list"][0]["wind"]["speed"] - 15)))
rain = 0
if "rain" in weather_forecast["list"][0]:
rain = 10 / (1 + math.exp(-0.04 * (weather_forecast["list"][0]["3h"] - 50)))
return {"temperature": temperature, "humidity": humidity, "wind": wind, "rain": rain}
@app.route('/bmi', methods=['GET'])
def bmi():
response = []
for row in query_db('''
SELECT weight, height
FROM PhoneMeasurements
ORDER BY timestamp DESC
LIMIT 1'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
weight = response[0]["weight"]
height = response[0]["height"]
bmi = weight / (height / 100) ** 2
if bmi < 18.5:
bmi = 10
elif bmi > 30:
bmi = 4 * math.log(bmi - 30)
bmi = min(bmi, 10)
else:
bmi = 10 - 1.53846 * (bmi - 18.5)
return bmi
@app.route('/heartrate', methods=['GET'])
def heartrate():
response = []
for row in query_db('''
SELECT resting_heartrate
FROM PhoneMeasurements
ORDER BY timestamp DESC
LIMIT 1'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
heartrate = response[0]['resting_heartrate']
if heartrate < 70:
heartrate = 0
else:
heartrate = 10 / (1 + math.exp(-0.5 * (heartrate - 80)))
return heartrate
@app.route('/steps', methods=['GET'])
def steps():
response = []
for row in query_db('''
SELECT steps
FROM PhoneMeasurements
ORDER BY timestamp DESC
LIMIT 14'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
steps_median = statistics.median([x['steps'] for x in response])
steps = 0
if steps_median < 6000:
steps = (6000 - steps_median) / 600
return steps
@app.route('/pollution/<lat>/<lon>', methods=['GET'])
def pollution(lat, lon):
pollution = json.loads(requests.get(
'http://api.airvisual.com/v2/nearest_city?lat=' + lat + '&lon=' + lon + '&key=5e94f1ac-37e1-412c-8660-15000713e46a').content.decode(
"utf-8"))
aqi = pollution['data']['current']['pollution']['aqius'] / 30
return aqi
@app.route('/age', methods=['GET'])
def age():
response = []
for row in query_db('''
SELECT age
FROM PhoneMeasurements
ORDER BY timestamp DESC
LIMIT 1'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
age = response[0]['age']
if age >= 65:
age = 10
elif age < 7:
age = 8
elif age < 18:
age = 2
elif age < 65:
age = 4
return age
@app.route('/cigarettes', methods=['GET'])
def cigarettes():
response = []
for row in query_db('''
SELECT cigarettes
FROM PhoneMeasurements
ORDER BY timestamp DESC
LIMIT 30'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
cigarettes = statistics.median([x['cigarettes'] for x in response])
cigarettes = min(cigarettes, 10)
return cigarettes
@app.route('/sleep', methods=['GET'])
def sleep():
response = []
for row in query_db('''
SELECT sleep
FROM PhoneMeasurements
ORDER BY sleep DESC
LIMIT 7'''):
response.append(row)
if len(response) == 0:
return 'No measurements taken'
sleep = abs(8 - statistics.mean([x['sleep'] for x in response]))
sleep = min(sleep, 10)
return sleep
@app.route('/event/<zip>', methods=['GET'])
def event(zip):
response = []
print(zip)
for row in query_db('''
SELECT capacity
FROM Events
WHERE zip=?
LIMIT 1''', (int(zip),)):
response.append(row)
if len(response) == 0:
return 'No events'
event = 10 / (1 + math.exp(-0.03 * (response[0]['capacity'] - 200)))
return event
@app.route(
'/fusion/<lat>/<lon>/<zip>/<resting_heartrate_in>/<steps_in>/<cigarettes_in>/<sleep_in>/<calories_in>/<weight_in>/<height_in>/<age_in>',
methods=['GET'])
def fusion(lat, lon, zip, resting_heartrate_in, steps_in, cigarettes_in, sleep_in, calories_in, weight_in, height_in,
age_in):
timestamp = mktime(datetime.now().replace(microsecond=0, second=0, minute=0, hour=0).timetuple())
print(resting_heartrate_in, steps_in, cigarettes_in, sleep_in, calories_in, height_in, weight_in,
age_in, timestamp)
query_db(
'INSERT INTO PhoneMeasurements (resting_heartrate, steps, cigarettes, sleep, calories, height, weight, age, timestamp) '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
(resting_heartrate_in, steps_in, cigarettes_in, sleep_in, calories_in, height_in, weight_in, age_in, timestamp))
weather_data = weather(lat, lon)
scores = {'trend': trend(),
'temperature': weather_data['temperature'],
'humidity': weather_data['humidity'],
'wind': weather_data['wind'],
'rain': weather_data['rain'],
'bmi': bmi(),
'heartrate': heartrate(),
'steps': steps(),
'pollution': pollution(lat, lon),
'age': age(),
'cigarettes': cigarettes(),
'sleep': sleep(),
'event': event(zip)
}
print(scores)
diagnostic = []
for key, value in scores.items():
if value > 5:
diagnostic.append(key)
scores = {'trend': scores['trend'] * 2,
'temperature': scores['temperature'] * 1,
'humidity': scores['humidity'] * 1.5,
'wind': scores['wind'] * 1,
'rain': scores['rain'] * 1,
'bmi': scores['bmi'] * 0.5,
'heartrate': scores['heartrate'] * 2,
'steps': scores['steps'] * 1,
'pollution': scores['pollution'] * 2,
'age': scores['age'] * 2.5,
'cigarettes': scores['cigarettes'] * 1,
'sleep': scores['sleep'] * 1.25,
'event': scores['event'] * 1
}
final_score = 0.0
for key, value in scores.items():
if key != 'age':
final_score = final_score + value
final_score = int(final_score / 1.775)
return json.dumps({'score': final_score, 'diagnostic': diagnostic})
if __name__ == '__main__':
app.run()
|
#!/usr/bin/python
# Copyright (C) 2018 Ion Torrent Systems, Inc. All Rights Reserved
#
# Plugin: Meta16S
# This plugin is developed for 16S Metagenomics data analysis
#
# Author: Lucius Zheng
# Last modified: 2018/11/07
#
# Main revisions:
# 1. Add "Barcode Sample Settings +" in the plugin configure page to select which barcodes to process;
# 2. Update "Advanced options", as well as the default values for some parameters;
# 3. Include an HTML file named plan.html to enable plan configuration;
# 4. Add one download option for downloading all result files;
# 5. Add exceptions process to ensure a successful run.
#
# Last modified: 2019/02/14
import json
import os
from django.utils.functional import cached_property
from ion.plugin import *
import subprocess
from subprocess import check_output
from django.conf import settings
from django.template.loader import render_to_string
def createReport(reportName,reportTemplate,reportData):
with open(reportName,'w') as bcsum:
bcsum.write( render_to_string(reportTemplate,reportData) )
class Meta16S(IonPlugin):
# The version number for this plugin
version = "2.0.0.1"
# this plugin can run on fullchip runs, thumbnail runs, and composite (merged via project page) runs
# note that when the plugin is manually launched, only the 'launch' method will be called
runtypes = [RunType.FULLCHIP, RunType.THUMB, RunType.COMPOSITE]
# specify when the plugin is called. For log parsing, stay simple and just get called when the run completes.
# but can also be called before the run starts, at the block level, or after all other default plugins run
runlevels = [RunLevel.DEFAULT]
# a simple cached version of the start plugin property
@cached_property
def startplugin_json(self):
return self.startplugin
@cached_property
def barcodes_json(self):
with open('barcodes.json', 'r') as barcodes_handle:
return json.load(barcodes_handle)
def launch(self, data=None):
"""This is the primary launch method for the plugin."""
# configure django to use the templates folder
#settings.configure(TEMPLATE_DIRS=(self.startplugin["runinfo"]["plugin_dir"] + '/templates'),)
if not settings.configured:
settings.configure( DEBUG=False, TEMPLATE_DEBUG=False,
INSTALLED_APPS=('django.contrib.humanize',),
TEMPLATE_DIRS=(os.path.join(self.startplugin["runinfo"]["plugin_dir"],'templates'),)
)
# define mothur command related environment variables
mothur = self.startplugin_json['runinfo']['plugin_dir'] + "/mothur.1.39.5/mothur/mothur"
mothur_bin = os.path.join(self.startplugin_json['runinfo']['plugin_dir'],'mothur.1.39.5/mothur')
#mothur_bin = "/results/plugins/Meta16S_V3/scripts/mothur.1.39.5/mothur"
database_dir = os.path.join(self.startplugin_json['runinfo']['plugin_dir'],'database')
#database_dir = "/results/plugins/Meta16S_V3/scripts/database"
script_dir = os.path.join(self.startplugin_json['runinfo']['plugin_dir'],'scripts')
# save input parameters
parameters_used = {}
parameters_used["Number of reads"] = self.startplugin_json['pluginconfig']['num_of_reads']
parameters_used["Forward primer"] = self.startplugin_json['pluginconfig']['primer_f']
parameters_used["Reverse primer"] = self.startplugin_json['pluginconfig']['primer_r']
parameters_used["Reference database"] = self.startplugin_json['pluginconfig']['database']
parameters_used["Minimum average quality score"] = self.startplugin_json['pluginconfig']['qaverage']
parameters_used["Maximum homopolymer length"] = self.startplugin_json['pluginconfig']['maxhomop']
parameters_used["Maximum ambiguous bases"] = self.startplugin_json['pluginconfig']['maxambig']
parameters_used["Minimum read length"] = self.startplugin_json['pluginconfig']['minlength']
parameters_used["Maximum differences to primer"] = self.startplugin_json['pluginconfig']['pdiffs']
parameters_used["Minimum reads required"] = self.startplugin_json['pluginconfig']['minnum']
# save primer sequences info.
primer_file= self.startplugin_json['runinfo']['results_dir'] +'/primers.txt'
with open(primer_file,'w') as primer_handle:
primer_f = "forward" + " " + self.startplugin_json['pluginconfig']['primer_f'] + "\n"
primer_handle.write(primer_f)
if not self.startplugin_json['pluginconfig']['primer_r'] == "":
primer_r = "reverse" + " " + self.startplugin_json['pluginconfig']['primer_r'] + "\n"
primer_handle.write(primer_r)
# start to analyze bam files
sample_num = 0
for barcode_name, barcode_values in self.barcodes_json.iteritems():
# do you work per barcode here!
# first check to see if the barcode was excluded using the frame work barcodes configuration table
selected = True
barcodeData = self.startplugin_json['pluginconfig'].get('barcodetable',None)
if barcodeData:
#print(barcodeData)
for bc in barcodeData:
if bc.get('barcode_name',"") == barcode_name:
selected = bc.get('selected',True)
break
if not selected:
continue
print("Barcode Name: " + barcode_name)
print("Bam Filepath: " + barcode_values['bam_filepath'])
print("Read count: " + str(barcode_values['read_count']))
if parameters_used.has_key("Barcodes selected"):
parameters_used["Barcodes selected"].append(barcode_name)
else:
parameters_used["Barcodes selected"] = [barcode_name]
# if no BAM file or file size is 0, then skip the sample
if not os.path.exists(barcode_values['bam_filepath']):
print "BAM file does not exist. We will skip the sample in the followed analysis.\n"
continue
if os.path.getsize(barcode_values['bam_filepath']) == 0:
print "BAM file size is zero. We will skip the sample in the followed analysis.\n"
continue
# subsample reads to a fixed number, e.g., 10000
if barcode_values['read_count'] < int(self.startplugin_json['pluginconfig']['minnum']):
print "BAM file reads number is less than minimum required. We will skip the sample in the followed analysis.\n"
continue
# self.startplugin_json['pluginconfig']['num_of_reads'] type is unicode,
# change to int type for comparison
subsample_num = self.startplugin_json['pluginconfig']['num_of_reads']
if barcode_values['read_count'] < int(self.startplugin_json['pluginconfig']['num_of_reads']):
print "The selected number of analysis reads is larger than the raw reads count. We will use all reads in the followed analysis.\n"
subsample_num = str(barcode_values['read_count'])
arg1 = barcode_values['bam_filepath']
cmd = "samtools view " + arg1 + " | shuf -n "+ subsample_num + "> output.sam"
sampling_results = check_output(cmd, shell=True, cwd=self.startplugin_json['runinfo']['results_dir'])
# convert sam/bam to fastq
arg2 = barcode_name + '.fastq'
fastq_results = check_output(["samtools", 'bam2fq', '-s', arg2, "output.sam"],cwd=self.startplugin_json['runinfo']['results_dir'])
# read fastq file and create a fasta and quality file
arg3 = "#fastq.info(fastq=" + arg2 + ")"
mothur_read = check_output([mothur,arg3],cwd=self.startplugin_json['runinfo']['results_dir'])
# remove the user-provided primers, barcodes, and sequences that drop below a quality threshold
#
# oligos: takes a file that can contain the sequences of the forward and reverse primers
# qaverage: tells mothur to calculate the average quality score for each sequence and
# to remove those sequences that have an average below the value provided to the option
# flip=T to get the reverse complement of the sequences
# maxambig: cull those sequences that have ambiguous bases
# maxhomop: cap the homopolymer length
# minlength & maxlength: trim the sequence according their length
# pdiffs: maximum number of differences to the primer sequence, default=0
fasta_file = arg2.replace('.fastq','.fasta')
qual_file = arg2.replace('.fastq','.qual')
qaverage = self.startplugin_json['pluginconfig']['qaverage']
maxhomop = self.startplugin_json['pluginconfig']['maxhomop']
maxambig = self.startplugin_json['pluginconfig']['maxambig']
minlength = self.startplugin_json['pluginconfig']['minlength']
pdiffs = self.startplugin_json['pluginconfig']['pdiffs']
arg4 = "#trim.seqs(fasta=" + fasta_file + ",oligos=" + primer_file + ",qfile=" + qual_file + ",flip=T,qaverage=" + qaverage + ",maxhomop=" + maxhomop + ",maxambig=" + maxambig + ",minlength=" + minlength + ",pdiffs=" + pdiffs + ",processors=4)"
mothur_trim = check_output([mothur,arg4],cwd=self.startplugin_json['runinfo']['results_dir'])
trim_fasta = fasta_file.replace('.fasta','.trim.fasta')
# if no sequence, then skip this sample
if os.path.getsize(self.startplugin_json['runinfo']['results_dir'] + "/" + trim_fasta) == 0:
print "No sequence after trimming. We will skip the sample in the followed analysis.\n"
continue
sample_num += 1
arg5 = "#make.group(fasta=" + trim_fasta + ", groups=" + barcode_name + ")"
mothur_group = check_output([mothur,arg5],cwd=self.startplugin_json['runinfo']['results_dir'])
# merge all groups and fasta
merge_fasta = check_output(['cat *trim.fasta > all.fa'],shell=True,cwd=self.startplugin_json['runinfo']['results_dir'])
merge_group = check_output(['cat *trim.groups > all.group'],shell=True,cwd=self.startplugin_json['runinfo']['results_dir'])
# when all required input files are prepared, we run mothur pipeline to analyze the 16S rRNA data
mothur_pipeline = os.path.join(self.startplugin_json['runinfo']['plugin_dir'],'mothur.sh')
database = self.startplugin_json['pluginconfig']['database']
print([mothur_pipeline,mothur_bin,database_dir,database,script_dir])
try:
mothur_16S = check_output([mothur_pipeline,mothur_bin,database_dir,database],cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
############################################################################################################################
## Visualization analysis
############################################################################################################################
# Rarefaction plot for number of observed OTUs
try:
R_result = check_output(["Rscript", script_dir + "/rarePlot.R",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.rarefaction",
"RarefactionCurve.png",
"Number of Different OTUs"
],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# Rarefaction plot for Shannon measure
try:
R_result = check_output(["Rscript", script_dir + "/rarePlot.R",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.r_shannon",
"RarefactionCurve_shannon.png",
"Rarefaction Measure: shannon"
],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# Rarefaction plot for Chao1 measure
try:
R_result = check_output(["Rscript", script_dir + "/rarePlot.R",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.r_chao",
"RarefactionCurve_chao.png",
"Rarefaction Measure: chao1"
],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# taxonomy binning
#
# first, we need to reformat the outputs from mothur
try:
check_output(script_dir + "/prepare_input.sh",
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
#
# then, get the relative abundance of each otu in a sample: Abundance / Total number of sequences in the group
try:
check_output(["Rscript", script_dir + "/relative_abundance.R"],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
#
# plot the distribution of taxonomic relative abundances across all taxonomic groups for all samples
try:
check_output(["Rscript", script_dir + "/taxonomic_binning.R"],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# beta diversity
#
# 1st: PCoA plot in 2-D
if os.path.exists(self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.loadings"):
try:
check_output(["Rscript",script_dir + "/plotPCOA.R",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.axes",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.loadings"],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# 2nd: PCoA plot in 3-D
if os.path.exists(self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.loadings"):
try:
check_output(["Rscript",script_dir + "/plotPCOA3d.R",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.axes",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.lt.pcoa.loadings"],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
# Krona visualization
if os.path.exists(self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.tax.summary"):
try:
check_output(["python", script_dir + "/mothur_krona_XML.py",
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.tax.summary","output.xml"],
cwd=self.startplugin_json['runinfo']['results_dir'])
except subprocess.CalledProcessError as e:
print e.output
check_output("mkdir krona_analysis", shell=True, cwd=self.startplugin_json['runinfo']['results_dir'])
check_output([script_dir + "/KronaTools-2.7/bin/ktImportXML","-o","krona_analysis/krona.html", "output.xml"],
cwd=self.startplugin_json['runinfo']['results_dir'])
# visualizaiton of tree of samples
if os.path.exists(self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.tre"):
check_output(["./bin/figtree","-graphic","PNG",
self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.tre",
self.startplugin_json['runinfo']['results_dir'] + "/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.tre.png"],
cwd= script_dir + "/FigTree_v1.4.3")
###################################################################################################################
## output in HTML
###################################################################################################################
adiversityData = []
with open(self.startplugin['runinfo']['results_dir'] + '/all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.summary', 'r') as file_handle:
#with open('all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.summary', 'r') as file_handle:
next(file_handle)
for line in file_handle:
data_entry = {}
data = line.strip().split("\t")
data_entry['label'] = data[0]
data_entry['group'] = data[1]
data_entry['coverage'] = data[2]
data_entry['nseqs'] = data[3]
data_entry['sobs'] = data[4]
data_entry['chao'] = data[5]
data_entry['chao_lci'] = data[6]
data_entry['chao_hci'] = data[7]
data_entry['shannon'] = data[8]
data_entry['shannon_lci'] = data[9]
data_entry['shannon_hci'] = data[10]
data_entry['invsimpson'] = data[11]
data_entry['invsimpson_lci'] = data[12]
data_entry['invsimpson_hci'] = data[13]
adiversityData.append(data_entry)
if database == "2015RDP": genome_name = "trainset14_032015.rdp.fasta"
else: genome_name = "gg_13_8_99.fasta"
render_context = {
"autorefresh" : False,
"genome_name" : genome_name,
"library_type" : "16S fusion primers amplification",
"reads_num": self.startplugin_json['pluginconfig']['num_of_reads'],
"adiversityData" : adiversityData
}
cp_cmd = "cp " + os.path.join(self.startplugin_json['runinfo']['plugin_dir'],'templates/Meta16S_workflow.png') + " ./"
check_output(cp_cmd,shell=True,cwd=self.startplugin_json['runinfo']['results_dir'])
# save parameters into a json file
json_file = self.startplugin_json['runinfo']['results_dir'] +'/parameters.json'
with open(json_file,"w") as f:
f.write(json.dumps(parameters_used))
if sample_num > 1:
createReport(os.path.join(self.startplugin['runinfo']['results_dir'],'Meta16S_report.html'), 'barcode_summary_all.html', render_context )
zip_cmd = "zip -r results.zip " + "*.png Meta16S_report.html parameters.json " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.shared " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.taxonomy " + \
"all.OTU.summary.taxonomy " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.summary " + \
"Taxonomic-Binning/ " + \
"krona_analysis/ " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.braycurtis.0.03.tre"
else:
createReport(os.path.join(self.startplugin['runinfo']['results_dir'],'Meta16S_report.html'), 'barcode_specific.html', render_context )
zip_cmd = "zip -r results.zip " + "*.png Meta16S_report.html parameters.json " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.shared " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.taxonomy " + \
"all.OTU.summary.taxonomy " + \
"all.unique.good.filter.unique.precluster.pick.pick.opti_mcc.groups.summary " + \
"Taxonomic-Binning/ " + \
"krona_analysis/"
check_output(zip_cmd,shell=True,cwd=self.startplugin_json['runinfo']['results_dir'])
results = '<h4> <a href="' + 'results.zip' + '" target="_blank">Download all result files</a></h4>'
with open("download_block.html","w") as html_dl:
html_dl.write("<html><body><pre>")
html_dl.write(results)
html_dl.write("</pre></body></html>")
return True
# Return list of columns you want the plugin table UI to show.
# Columns will be displayed in the order listed.
def barcodetable_columns(self):
return [
{ "field": "selected", "editable": True },
{ "field": "barcode_name", "editable": False },
{ "field": "sample", "editable": False } ]
# Devel use - running directly
if __name__ == "__main__":
PluginCLI()
|
"""
这其实就是一个联系用的文档,试试写一个快排和堆排算法,这算是排序算法里边性能比较高的两个算法。
"""
# 快速排序,其实就是快速的找到某个位置的数字(元素)应该在的位置,通过递归的方式来实现这个代码。
def quickSort(numList):
if len(numList) < 2:
return numList
lowPoint = 0
highPoint = len(numList) - 1
tempNum = numList[0]
while lowPoint != highPoint:
while lowPoint != highPoint and numList[highPoint] > tempNum:
highPoint -= 1
numList[lowPoint] = numList[highPoint]
while lowPoint != highPoint and numList[lowPoint] < tempNum:
lowPoint += 1
numList[highPoint] = numList[lowPoint]
return quickSort(numList[:lowPoint]) + [tempNum] + quickSort(numList[lowPoint + 1:])
# 堆排,堆排里边比较复杂的,其实是构建堆和堆调整,其实树是可以用数组来表示的,
def justifyHeap(arr, i):
if 2*i+1 < len(arr) and arr[i] < arr[2 * i + 1]:
temp = arr[i]
arr[i] = arr[2 * i + 1]
arr[2 * i + 1] = temp
if 2*i < len(arr) and arr[i] < arr[2 * i]:
temp = arr[i]
arr[i] = arr[2 * i]
arr[2 * i] = temp
def heapSort(arr):
temList = []
arr.insert(0,"#")
while len(arr)!=1:
lenArr = len(arr)
startPoint = lenArr//2
for i in range(startPoint,0,-1):
justifyHeap(arr,i)
temList.append(arr.pop(1))
return temList
if __name__ == '__main__':
# numList = [3, 2, 5, 7, 1, 10, 33, 21]
# res = quickSort([3, 2])
# print(res)
res = heapSort([1,2,333,42,11111])
print(res) |
import os
import tkinter as tk
from tkinter import *
from obspy import read
#configuración de ventada
root=tk.Tk()
root.title("Convertidor de archivos")
root.geometry("450x230")
root.resizable(0, 0)
color="honeydew3"
root.configure(bg=color)
#variables para las rutas especificadas por usuario
path_guardar=" "
archivo=" "
f_path=" "
#Titulos
Show= Label(root,text="Archivos reftek130 a mseed \n Guatemala 2021",font= "Times 8",bg=color, fg="RoyalBlue4")
Show.grid(row=0, column=0,sticky="W")
k= Label(root,text="Convertidor de archivos",font= "Cambria 20",bg=color, fg="white")
k.grid(row=3, column=0,columnspan=2)
espace= Label(root,text=" ",font= "times 12",bg=color)
espace.grid(row=4, column=0)
#Guardar la ruta de extracción
def extraer():
global archivo
global f_path
file_path = tk.filedialog.askdirectory()
f_path=file_path
archivo = os.listdir(file_path)
#Seleccionar archivos para convertir
Show= Label(root,text="Seleccione una carpeta para la extracción: ",font= "Times 12",bg=color, fg="RoyalBlue4")
Show.grid(row=5, column=0)
Get_Button = Button(root,bd=1,font="cambria 9 ", background="white", text="Selección",relief="sunken",command= extraer)
Get_Button.grid(row=5, column=1)
#obtener la ruta para guardar
def getpath():
global path_guardar
pp = tk.filedialog.askdirectory()
path_guardar=pp
#guardar en la ruta IF si es el tipo de archivo adecuado
def guardar():
try:
for i in archivo:
a= read(f_path+"/"+i)
a.write(path_guardar+"/"+i+".mseed",format="MSEED")
tk.messagebox.showinfo(message= "Conversión completada")
except:
tk.messagebox.showerror(title=None, message="El tipo de archivo no coincide con los esperados. Conversión no realizada.")
#Botones para seleccionar rutas
Show2= Label(root,text="Seleccione una carpeta para guardar los archivos mseed: ",font= "Times 12",bg=color, fg="RoyalBlue4")
Show2.grid(row=7, column=0)
Get_Button2 = Button(root,bd=1,font="cambria 9", background="white", text="Selección",relief="sunken",command= getpath)
Get_Button2.grid(row=7, column=1)
space= Label(root,text="",font= "Times 12",bg=color, fg="RoyalBlue4")
space.grid(row=8, column=0)
#Comenzar la conversion
Aceptar = Button(root,bd=1,font="cambria 11", background="tomato2",fg="white", text="Aceptar",relief="flat",command= guardar)
Aceptar.grid(row=9, column=0,columnspan=2)
root.mainloop()
|
def isAnagram(str1,str2):
count=0
for _ in str1:
for x in str2:
if _==x:
count=count+1
else:pass
if count==len(str1):
return print("anagram")
else:
return print("not anagram")
str1=input("string 1:")
str2=input("string 2:")
isAnagram(str1,str2) |
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import os
import pandas as pd
import data.transforms_mitorch as tf
import torchvision.transforms as torch_tf
from test_net_single import test as test_single
from data.build_test_pipeline import build_transformations
import copy
import logging
from datetime import datetime
import pprint
import pickle as pkl
KNOWN_TRANSFORMATIONS = (
'noise',
'noisechannel',
'contrast',
'contrastchannel',
'gamma',
'rotate',
'shear',
'translate',
'scale',
'spike',
'ghosting',
'blur',
'biasfield',
'swap',
'motion',
'anisotropy',
'elasticdeformation',
'zoom',
)
KNOWN_T_KEYS = (
't_name',
't_params',
)
def setup_logger():
local_logger = logging.getLogger(__name__)
local_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler if you wish
file_handler = logging.FileHandler('/tmp/test_error_output_{}.log'.format(datetime.now().strftime('%Y%m%d_%H%M')))
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
# create stream handler
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
local_logger.addHandler(file_handler)
local_logger.addHandler(stream_handler)
return local_logger
logger = setup_logger()
def sanity_check_exp(exp):
exp_perm_len = None
for t in exp:
assert t['t_name'] in KNOWN_TRANSFORMATIONS, 'requested transformation is unknown'
assert all(
i in KNOWN_T_KEYS for i in t.keys()
), f'unknown keys are defined in the transformations {t}'
for u, v in t['t_params'].items():
if exp_perm_len is None:
exp_perm_len = len(v)
continue
assert exp_perm_len == len(v), 'expect t_params have a fixed length, getting {} and {} for {}'.format(
exp_perm_len, len(v), u
)
assert exp_perm_len > 0, 'length of experiment permutation must be > 0'
return exp_perm_len
def create_transformations(cfg, exp):
transformations_head = [
tf.ToTensorImageVolume(),
tf.RandomOrientationTo('RPI'),
# tf.RandomResampleTomm(target_spacing=(1, 1, 1)),
]
transformations_body = build_transformations(cfg, exp)
transformations_tail = [
tf.ConcatAnnot2ImgVolume(num_channels=-1), # concat all except the last to the image
tf.MaskIntensityVolume(mask_data=None), # crop a tight 3D box
tf.ConcatAnnot2ImgVolume(num_channels=-1), # concat all annot to the image
tf.CropForegroundVolume(margin=1), # crop the brain region
tf.ConcatImg2AnnotVolume(num_channels=2),
tf.NormalizeMinMaxVolume(max_div=True, inplace=True),
]
return torch_tf.Compose(
transformations_head + transformations_body + transformations_tail
)
def define_exp_current(exp, j):
exp_current = copy.deepcopy(exp)
exp_description = dict()
for k, t in enumerate(exp_current):
t_name_k = f't_name_{k}'
exp_description[t_name_k] = t['t_name']
for p_key in t['t_params'].keys():
t['t_params'][p_key] = t['t_params'][p_key][j]
exp_description[f'{t_name_k}_{p_key}'] = t['t_params'][p_key]
return exp_current, exp_description
def test_single_exp(cfg, exp):
exp_results = list()
# sanity check for t_params
exp_perm_len = sanity_check_exp(exp)
# create transformations
for j in range(exp_perm_len):
logger.info(f'experiment permutation: {j}|{exp_perm_len}')
exp_current, exp_description = define_exp_current(exp, j)
transformations = create_transformations(cfg, exp_current)
output_single = test_single(cfg, transformations=transformations, save_pred_flag=False, eval_pred_flag=True)
output_single.update(exp_description)
exp_results.append(output_single)
logger.info(f'{"".join(["-"*20])}\n')
return exp_results
def process_output_results(exp_ls_results, file_path, save=False):
# TODO whatever you want with exp_ls_results, e.g. save it to disk, visualize in graphs etc. I just print it.
if save:
file_path = os.path.join(file_path, 'exp_ls_results.pkl')
with open(file_path, 'wb') as fh:
pkl.dump(exp_ls_results, fh)
for i, exp, output_df in exp_ls_results:
logger.info(f'{i} --- \n{pprint.pformat(exp)}:\n{output_df}\n\n')
def test(cfg):
exp_ls = cfg.TEST.ROBUST_EXP_LIST
exp_ls_results = list()
for i, exp in enumerate(exp_ls):
logger.info(f'started testing experiment --- \n{i:03d}:{pprint.pformat(exp)}')
output_results = test_single_exp(cfg, exp)
output_df = pd.DataFrame(output_results)
exp_ls_results.append((i, exp, output_df))
logger.info(f'experiment {i} is done --- \n{output_df}\n')
logger.info(f'*** experiments saved')
process_output_results(exp_ls_results, cfg.OUTPUT_DIR, save=True)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pathlib import PurePath
from pants.backend.debian.target_types import (
DebianInstallPrefix,
DebianPackageDependencies,
DebianSources,
)
from pants.core.goals.package import (
BuiltPackage,
BuiltPackageArtifact,
OutputPathField,
PackageFieldSet,
)
from pants.core.util_rules.system_binaries import BinaryPathRequest, BinaryPaths, TarBinary
from pants.engine.fs import CreateDigest, DigestEntries, FileEntry
from pants.engine.internals.native_engine import Digest
from pants.engine.internals.selectors import Get
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import HydratedSources, HydrateSourcesRequest
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class DebianPackageFieldSet(PackageFieldSet):
required_fields = (DebianSources, DebianInstallPrefix, DebianPackageDependencies)
sources_dir: DebianSources
install_prefix: DebianInstallPrefix
packages: DebianPackageDependencies
output_path: OutputPathField
@rule(level=LogLevel.INFO)
async def package_debian_package(
field_set: DebianPackageFieldSet, tar_binary_path: TarBinary
) -> BuiltPackage:
dpkg_deb_path = await Get(
BinaryPaths,
BinaryPathRequest(
binary_name="dpkg-deb",
search_path=["/usr/bin"],
),
)
if not dpkg_deb_path.first_path:
raise OSError(f"Could not find the `{dpkg_deb_path.binary_name}` program in `/usr/bin`.")
hydrated_sources = await Get(HydratedSources, HydrateSourcesRequest(field_set.sources_dir))
# Since all the sources are coming only from a single directory, it is
# safe to pick an arbitrary file and get its root directory name.
# Validation of the resolved files has been called on the target, so it is known that
# snapshot.files isn't empty.
sources_directory_name = PurePath(hydrated_sources.snapshot.files[0]).parts[0]
result = await Get(
ProcessResult,
Process(
argv=(
dpkg_deb_path.first_path.path,
"--build",
sources_directory_name,
),
description="Create a Debian package from the produced packages.",
input_digest=hydrated_sources.snapshot.digest,
# dpkg-deb produces a file with the same name as the input directory
output_files=(f"{sources_directory_name}.deb",),
env={"PATH": str(PurePath(tar_binary_path.path).parent)},
),
)
# The output Debian package file needs to be renamed to match the output_path field.
output_filename = field_set.output_path.value_or_default(
file_ending="deb",
)
digest_entries = await Get(DigestEntries, Digest, result.output_digest)
assert len(digest_entries) == 1
result_file_entry = digest_entries[0]
assert isinstance(result_file_entry, FileEntry)
new_file = FileEntry(output_filename, result_file_entry.file_digest)
final_result = await Get(Digest, CreateDigest([new_file]))
return BuiltPackage(final_result, artifacts=(BuiltPackageArtifact(output_filename),))
def rules():
return [
*collect_rules(),
UnionRule(PackageFieldSet, DebianPackageFieldSet),
]
|
from flask_wtf import Form, FlaskForm
from flask_wtf.file import FileField, FileRequired
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
#signup form class used to initiate object instances to capture the different fields of user information
#passwords are stored in hashes for security
class SignupForm(Form):
first_name = StringField('First name', validators=[DataRequired("Please enter your first name.")])
last_name = StringField('Last name', validators=[DataRequired("Please enter your last name.")])
email = StringField('Email', validators=[DataRequired("Please enter your email address."), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Sign up')
#login form class is used to verify and authenticate the users
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired("Please enter your email address"), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password")])
submit = SubmitField('Sign in')
#caption field class is used to obtain the caption and file information for user images
class CaptionField(FlaskForm):
photo = FileField('Choose File', validators=[FileRequired()])
caption = StringField('Caption')
submit = SubmitField('Upload')
|
import glob
import os
import cnn_models
import keras
from keras.callbacks import EarlyStopping
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keraspipelines import KerasDirectoryFlowPipeline
# Define data sources
src = '/images_data/'
src_full_train = src + 'train_full/'
src_train = src + 'train_split/'
src_val = src + 'val_split/'
src_test = src + 'test/'
# Provide list with classes names
classes = ['A', 'B', 'C']
# Outputs number of samples in each split of data - needed for Keras generators
nb_train_samples = len(glob.glob(src_train + '*/*.*'))
nb_validation_samples = len(glob.glob(src_val + '*/*.*'))
nb_test_samples = len(glob.glob(src_test + '*/*.*'))
print('Number of training samples:', nb_train_samples)
print('Number of validation samples:', nb_validation_samples)
print('Number of test samples:', nb_test_samples)
# Define model callbacks and parameters passed directly to the model definition
# as specified in cnn_models.py
model_callbacks = [EarlyStopping(monitor='val_loss', patience=3, verbose=1)]
model_parameters = {
'img_size': (32, 32, 3),
'num_classes': number_classes,
}
# Define ImageDataGenerators with image augmentation parameters
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.1,
zoom_range=0.25,
rotation_range=45,
width_shift_range=0.25,
height_shift_range=0.25,
horizontal_flip=True,
channel_shift_range=0.07)
valid_datagen = ImageDataGenerator(rescale=1. / 255,)
# Run parameters for bagging & for KFold using .flow method
# to augment training & test data for improved model performance
directoryflow_bag_parameters = {
'model_name': getattr(cnn_models, 'basic_cnn'),
'model_params': model_parameters,
'predict_test': True,
'n_bags': 2,
'split_size': 0.2,
'seed': 1337,
'verbose': True,
'number_epochs': 1,
'batch_size': 16,
'callbacks': model_callbacks,
'src_dir': os.getcwd(),
'full_train_dir': src_total,
'train_dir': src_train,
'valid_dir': src_val,
'test_dir': src_test,
'image_size': (32, 32),
'classes': classes,
'train_datagen': train_datagen,
'valid_datagen': valid_datagen,
'test_datagen': train_datagen,
'number_train_samples': nb_train_samples,
'number_validation_samples': nb_validation_samples,
'number_test_samples': nb_test_samples,
'number_test_augmentations': 5,
'run_save_name': 'basic_cnn_bag_directoryflow',
'save_statistics': True,
'save_model': True,
'output_statistics': True,
}
# Pipeline definition for bagging using .flow_from_directory
bag_pipeline = KerasDirectoryFlowPipeline(model_name=directoryflow_bag_parameters['model_name'],
model_params=directoryflow_bag_parameters['model_params'],
predict_test=directoryflow_bag_parameters['predict_test'],
n_bags=directoryflow_bag_parameters['n_bags'],
split_size=directoryflow_bag_parameters['split_size'],
seed=directoryflow_bag_parameters['seed'],
verbose=directoryflow_bag_parameters['verbose'],
number_epochs=directoryflow_bag_parameters['number_epochs'],
batch_size=directoryflow_bag_parameters['batch_size'],
callbacks=directoryflow_bag_parameters['callbacks'],
src_dir=directoryflow_bag_parameters['src_dir'],
full_train_dir=directoryflow_bag_parameters['full_train_dir'],
train_dir=directoryflow_bag_parameters['train_dir'],
valid_dir=directoryflow_bag_parameters['valid_dir'],
test_dir=directoryflow_bag_parameters['test_dir'],
image_size=directoryflow_bag_parameters['image_size'],
classes=directoryflow_bag_parameters['classes'],
train_datagen=directoryflow_bag_parameters['train_datagen'],
valid_datagen=directoryflow_bag_parameters['valid_datagen'],
test_datagen=directoryflow_bag_parameters['test_datagen'],
number_train_samples=directoryflow_bag_parameters[
'number_train_samples'],
number_validation_samples=directoryflow_bag_parameters[
'number_validation_samples'],
number_test_samples=directoryflow_bag_parameters[
'number_test_samples'],
number_test_augmentations=directoryflow_bag_parameters[
'number_test_augmentations'],
run_save_name=directoryflow_bag_parameters['run_save_name'],
save_statistics=directoryflow_bag_parameters['save_statistics'],
save_model=directoryflow_bag_parameters['save_model'],
output_statistics=directoryflow_bag_parameters['output_statistics'])
# Run bagged model
bagging_model, bagging_preds_test, test_filenames = bag_pipeline.bag_flow_run(
split_every_bag=True)
|
#http://www.codeskulptor.org/#user43_pZhqbpJHlu_1.py
# template for "Stopwatch: The Game"
import simplegui
import math
# define global variables
time_interval = 0
width = 300
higth = 200
stop_count = 0
success_count = 0
running_status = False
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
min = str((t // 10) // 60)
sec = str((t // 10) % 60)
if len(sec) == 1:
sec = '0' + sec
else:
sec
tenths_of_sec = str(t % 10)
#print t
#print min+':'+sec+'.'+tenths_of_sec
return min+':'+sec+'.'+tenths_of_sec
# define event handlers for buttons; "Start", "Stop", "Reset"
def start_handler():
global running_status
timer.start()
running_status = True
def stop_handler():
global stop_count,success_count,time_interval,running_status
timer.stop()
if running_status == True:
stop_count += 1
#print str(time_interval % 10)
if str(time_interval % 10) == '0':
success_count += 1
running_status = False
def reset_handler():
global stop_count,success_count,time_interval,running_status
timer.stop()
time_interval = 0
stop_count = 0
success_count = 0
running_status = False
# define event handler for timer with 0.1 sec interval
def timer_handler():
global time_interval
time_interval += 1
# define draw handler
def draw_handler(canvas):
global time_interval,width,higth,stop_count,success_count
ti_str = format(time_interval)
canvas.draw_text(ti_str, (width/3.0, higth/2.0), 40, 'White')
score = str(success_count)+'/'+str(stop_count)
canvas.draw_text(score, (250, 30), 20, 'Lime')
# create frame
frame = simplegui.create_frame('Testing', width, higth)
# register event handlers
frame.set_draw_handler(draw_handler)
button1 = frame.add_button('Start', start_handler,120)
button2 = frame.add_button('Stop', stop_handler,120)
button3 = frame.add_button('Reset', reset_handler,120)
timer = simplegui.create_timer(100, timer_handler)
# start frame
frame.start()
# Please remember to review the grading rubric
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.