text
stringlengths 8
6.05M
|
|---|
from database_connection import DatabaseConnection
from validation import Validation
from query import Queries
conn = DatabaseConnection.dbconnection()
cursor = conn.cursor()
class Supervisor:
def __init__(self, id):
"""
Initializing and to get the team no of a supervisor.
:param id:
"""
self.id = id
get_tid = "select Team_Number from Supervisors where id = ?"
res = cursor.execute(get_tid, (self.id,))
for i in res:
Supervisor.team_id = i[0]
Supervisor.supervisor_tasks(self)
def supervisor_tasks(self):
"""
Supervisor selects a task.
:return:
"""
try:
option = {
"1": ("Show Complaint", Supervisor.show_complaint),
"2": ("Create Reports", Supervisor.create_report),
"3": ("Show Reports", Supervisor.show_reports)
}
ans = input("Choose:\n"
"1.Show Complaint.\n"
"2.Create Reports.\n"
"3.Show Reports.\n")
option.get(ans)[1](conn)
except Exception as e:
print("Invalid Choice. Please select again!")
Supervisor.supervisor_tasks(self)
def show_complaint(self):
"""
Shows list of all the complaints assigned to their team by admin.
:return:
"""
try:
sql = "select c.id,c.accident_name,c.comments from Complaints c where assigned_team = ?"
result = cursor.execute(sql, (Supervisor.team_id,))
for i in result:
print("Complaint_id : {}".format(i[0]))
print("Accident Name : {}".format(i[1]))
print("Comments : {}".format(i[2]))
# print("Complaint Status : {}".format(i[3]))
print("----------------------------")
except Exception as e:
print("Error in reading data")
def create_report(self):
"""
Creating a report for a complaint assigned to them.
:return:
"""
Supervisor.show_complaint(self)
result = Supervisor.input_create_report_data(self)
Queries.create(self, 'Report',
(int(result[0]), Supervisor.team_id, result[1], result[2], int(result[3]), int(result[4]))
)
Supervisor.supervisor_tasks(self)
def input_create_report_data(self):
"""
Validating the inputs for creating a report.
:return:
"""
complaint_no = Validation.input_int_for_create(self, "Enter Complaint id: ")
root_cause = Validation.input_str_for_create(self, "Enter root cause: ")
details = Validation.input_str_for_create(self, "Enter details: ")
no_of_people_effected = Validation.input_int_for_create(self, "Enter total no of people effected: ")
death_rate = Validation.input_int_for_create(self, "Enter number of deaths: ")
return complaint_no, root_cause, details, no_of_people_effected,death_rate
def show_reports(self):
"""
Shows the list of all the reports assigned to them.
:return:
"""
try:
sql = "select * from Report where team_no = ?"
result = cursor.execute(sql, (Supervisor.team_id,))
for i in result:
print("Report Id : {}".format(i[0]))
print("Root Cause : {}".format(i[3]))
print("Details : {}".format(i[4]))
print("Status : {}".format(i[5]))
print("Death Rate : {}".format(i[6]))
print("----------------------------")
except Exception as e:
print("Error in reading data")
finally:
Supervisor.supervisor_tasks(self)
|
import time
import dash_bootstrap_components as dbc
from dash import Input, Output, html
placeholder = html.Div(
[
dbc.Button("Load", id="loading-placeholder-button", n_clicks=0),
dbc.Placeholder(
html.Div(id="loading-placeholder-output"),
className="w-100",
animation="wave",
),
],
)
@app.callback(
Output("loading-placeholder-output", "children"),
[Input("loading-placeholder-button", "n_clicks")],
)
def load_output(n):
if n:
time.sleep(2)
return f"Output loaded {n} times"
return "Output not reloaded yet"
|
from GF2 import one
from itertools import chain, combinations
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
a=[one,one,one,0,0,0,0]
b=[0,one,one,one,0,0,0]
c=[0,0,one,one,one,0,0]
d=[0,0,0,one,one,one,0]
e=[0,0,0,0,one,one,one]
f=[0,0,0,0,0,one,one]
want=[0,one,0,0,0,one,0]
#[sum(a) for a in zip(a,b)] #better way to produce an array with the result we want
for subset in powerset([a,b,c,d,e,f]):
if(([sum(a) for a in zip(*subset)])==want):
print(subset)
|
#
# Contains the FPSession class, which maintains an authenticated
# session with the fadedpage server.
# Need to install the requests package.
#
import requests
import json
from requests.exceptions import ConnectionError
from os import mkdir, path, rmdir, listdir, remove
import os
class FPSession(object): #{
def __init__(self, user, password, sandbox=False):
self.site = "https://www.sandbox.fadedpage.com/" if sandbox else \
"https://www.fadedpage.com/"
print("Logging into " + self.site)
self.session = requests.Session()
content = self.request("login2.php", data = {
'myusername' : user,
'mypassword' : password
})
# Result is english html saying whether we logged in or not
if 'wrong' in str(content):
raise Exception(content.decode('utf-8'))
def __enter__(self):
return self
def __exit__(self, type, value, t):
print("Logging out...");
try:
self.request("logout.php", {})
except ConnectionError:
print("Ignoring log out failure")
print("closing...");
self.session.close()
def request(self, page, data):
r = self.session.post(self.site + page, data,
headers = { 'user-agent' : 'Mozilla/5.0' })
r.raise_for_status()
return r.content
def requestStream(self, page, data):
# Note that setting the agent is supposed to make apache not think
# we are a bot, but it isn't working, the sleep(.5) below seems to work.
r = self.session.post(self.site + page, data, stream=True,
headers = { 'user-agent' : 'Mozilla/5.0' })
r.raise_for_status()
#print(str(r))
#print(str(r.cookies))
#print(str(r.headers))
#print(str(r.content))
return r
def writeFileJSON(self, filename, bytes):
f = open(filename, 'wb')
f.write(bytearray(bytes))
f.close()
def writeFile(self, filename, response):
print("Downloading: " + filename)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
# Next download fails with a 403 if we don't sleep
# Apache thinks we are a bot
import time
time.sleep(1)
def downloadSC(self, file):
with self.requestStream("admin/file_rmi.php", {
'operation' : 'fetch-sc',
'file' : file
}) as response:
self.writeFile(file, response)
def downloadHTML(self, bookid):
with self.requestStream("admin/file_rmi.php", {
'operation' : 'fetch-html',
'bookid' : bookid
}) as response:
self.writeFile(bookid + ".html", response)
formats = {
'mobi' : '.mobi',
'pdf' : '-a5.pdf',
'epub' : '.epub',
}
def downloadFormat(self, bookid, format):
with self.requestStream("admin/file_rmi.php", {
'operation' : 'fetch-format',
'bookid' : bookid,
'format' : format
}) as response:
self.writeFile(bookid + self.formats[format], response)
#results = json.loads(content)
#print(results['msg'] + "\n")
#for f, v in results.items():
# if bookid in f:
# self.writeFile(f, v)
# Download all image files. Deletes any current images directory
# and rebuilds it!
def downloadImages(self, bookid):
content = self.request("admin/file_rmi.php", {
'operation' : 'fetch-images',
'bookid' : bookid
})
results = json.loads(content)
dir = results['dir']
if path.isdir('images'):
for f in listdir('images'):
remove("images/" + f)
rmdir('images')
mkdir('images')
print("Image download:")
for f in dir:
print("\t" + f + ": " + str(len(dir[f])))
self.writeFileJSON("images/" + f, dir[f])
def uploadFormat(self, bookid, format):
file = bookid + self.formats[format]
print("Uploading: " + file)
with open(file, 'rb') as f:
r = self.session.post(self.site +
"admin/file_rmi.php?operation=upload-format&bookid=" +
bookid + "&format=" + format, f)
r.raise_for_status()
def uploadOne(self, bookid, file):
print("Uploading: " + file)
with open(file, 'rb') as f:
r = self.session.post(self.site +
"admin/file_rmi.php?operation=upload-file&bookid=" +
bookid + "&file=" + file, f)
r.raise_for_status()
def uploadSC(self, file):
print("Uploading to special collections: " + file)
with open(file, 'rb') as f:
r = self.session.post(self.site +
"admin/file_rmi.php?operation=upload-sc&file=" + file, f)
r.raise_for_status()
#}
if False:
from http.client import HTTPConnection
HTTPConnection.debuglevel = 1
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Test code
if False:
with FPSession(os.environ['FPUSER'], os.environ['FPPASSWORD']) as fps:
bookid = '20190750'
if False:
fps.downloadHTML(bookid)
fps.downloadImages(bookid)
print("Mobi...")
fps.downloadFormat(bookid, 'mobi')
print("Epub...")
fps.downloadFormat(bookid, 'epub')
print("PDF...")
fps.downloadFormat(bookid, 'pdf')
fps.uploadFormat(bookid, 'pdf')
|
# albus.db
from .base_types import IntegerType, StringType
__all__ = [
'IntegerType',
'StringType',
]
|
# encoding: UTF-8
# Copyright 2017 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import rnn # rnn stuff temporarily in contrib, moving back to code in TF 1.1
import sys
import os
import os.path
import time
import math
import numpy as np
import my_txtutils as txt
tf.set_random_seed(0)
# model parameters
#
# Usage:
# Training only:
# Leave all the parameters as they are
# Disable validation to run a bit faster (set validation=False below)
# You can follow progress in Tensorboard: tensorboard --log-dir=log
# Training and experimentation (default):
# Keep validation enabled
# You can now play with the parameters anf follow the effects in Tensorboard
# A good choice of parameters ensures that the testing and validation curves stay close
# To see the curves drift apart ("overfitting") try to use an insufficient amount of
# training data (shakedir = "shakespeare/t*.txt" for example)
#
FLAGS = None
# SEQLEN = 30
# BATCHSIZE = 100
ALPHASIZE = txt.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 3
# learning_rate = 0.001 # fixed learning rate
# dropout_pkeep = 1.0 # no dropout
def main(_):
# load data, either shakespeare, or the Python source of Tensorflow itself
shakedir = FLAGS.text_dir
# shakedir = "../tensorflow/**/*.py"
codetext, valitext, bookranges = txt.read_data_files(shakedir, validation=True)
# display some stats on the data
epoch_size = len(codetext) // (FLAGS.train_batch_size * FLAGS.seqlen)
txt.print_data_stats(len(codetext), len(valitext), epoch_size)
#
# the model (see FAQ in README.md)
#
lr = tf.placeholder(tf.float32, name='lr') # learning rate
pkeep = tf.placeholder(tf.float32, name='pkeep') # dropout parameter
batchsize = tf.placeholder(tf.int32, name='batchsize')
# inputs
X = tf.placeholder(tf.uint8, [None, None], name='X') # [ BATCHSIZE, FLAGS.seqlen ]
Xo = tf.one_hot(X, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, FLAGS.seqlen, ALPHASIZE ]
# expected outputs = same sequence shifted by 1 since we are trying to predict the next character
Y_ = tf.placeholder(tf.uint8, [None, None], name='Y_') # [ BATCHSIZE, FLAGS.seqlen ]
Yo_ = tf.one_hot(Y_, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, FLAGS.seqlen, ALPHASIZE ]
# input state
Hin = tf.placeholder(tf.float32, [None, INTERNALSIZE*NLAYERS], name='Hin') # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
# using a NLAYERS=3 layers of GRU cells, unrolled FLAGS.seqlen=30 times
# dynamic_rnn infers FLAGS.seqlen from the size of the inputs Xo
onecell = rnn.GRUCell(INTERNALSIZE)
dropcell = rnn.DropoutWrapper(onecell, input_keep_prob=pkeep)
multicell = rnn.MultiRNNCell([dropcell]*NLAYERS, state_is_tuple=False)
multicell = rnn.DropoutWrapper(multicell, output_keep_prob=pkeep)
Yr, H = tf.nn.dynamic_rnn(multicell, Xo, dtype=tf.float32, initial_state=Hin)
# Yr: [ BATCHSIZE, FLAGS.seqlen, INTERNALSIZE ]
# H: [ BATCHSIZE, INTERNALSIZE*NLAYERS ] # this is the last state in the sequence
H = tf.identity(H, name='H') # just to give it a name
# Softmax layer implementation:
# Flatten the first two dimension of the output [ BATCHSIZE, FLAGS.seqlen, ALPHASIZE ] => [ BATCHSIZE x FLAGS.seqlen, ALPHASIZE ]
# then apply softmax readout layer. This way, the weights and biases are shared across unrolled time steps.
# From the readout point of view, a value coming from a cell or a minibatch is the same thing
Yflat = tf.reshape(Yr, [-1, INTERNALSIZE]) # [ BATCHSIZE x FLAGS.seqlen, INTERNALSIZE ]
Ylogits = layers.linear(Yflat, ALPHASIZE) # [ BATCHSIZE x FLAGS.seqlen, ALPHASIZE ]
Yflat_ = tf.reshape(Yo_, [-1, ALPHASIZE]) # [ BATCHSIZE x FLAGS.seqlen, ALPHASIZE ]
loss = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Yflat_) # [ BATCHSIZE x FLAGS.seqlen ]
loss = tf.reshape(loss, [batchsize, -1]) # [ BATCHSIZE, FLAGS.seqlen ]
Yo = tf.nn.softmax(Ylogits, name='Yo') # [ BATCHSIZE x FLAGS.seqlen, ALPHASIZE ]
Y = tf.argmax(Yo, 1) # [ BATCHSIZE x FLAGS.seqlen ]
Y = tf.reshape(Y, [batchsize, -1], name="Y") # [ BATCHSIZE, FLAGS.seqlen ]
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# stats for display
seqloss = tf.reduce_mean(loss, 1)
batchloss = tf.reduce_mean(seqloss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Y_, tf.cast(Y, tf.uint8)), tf.float32))
loss_summary = tf.summary.scalar("batch_loss", batchloss)
acc_summary = tf.summary.scalar("batch_accuracy", accuracy)
summaries = tf.summary.merge([loss_summary, acc_summary])
# Init Tensorboard stuff. This will save Tensorboard information into a different
# folder at each run named 'log/<timestamp>/'. Two sets of data are saved so that
# you can compare training and validation curves visually in Tensorboard.
timestamp = str(math.trunc(time.time()))
summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir, timestamp + "-training"))
validation_writer = tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir, timestamp + "-validation"))
# Init for saving models. They will be saved into a directory named 'checkpoints'.
# Only the last checkpoint is kept.
if not os.path.exists(FLAGS.checkpoint_dir):
os.mkdir(FLAGS.checkpoint_dir)
saver = tf.train.Saver(max_to_keep=1)
# for display: init the progress bar
DISPLAY_FREQ = 50
_50_BATCHES = DISPLAY_FREQ * FLAGS.train_batch_size * FLAGS.seqlen
progress = txt.Progress(DISPLAY_FREQ, size=111+2, msg="Training on next "+str(DISPLAY_FREQ)+" batches")
# init
istate = np.zeros([FLAGS.train_batch_size, INTERNALSIZE*NLAYERS]) # initial zero input state
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
step = 0
# training loop
for x, y_, epoch in txt.rnn_minibatch_sequencer(codetext, FLAGS.train_batch_size, FLAGS.seqlen, nb_epochs=1000):
# train on one minibatch
feed_dict = {X: x, Y_: y_, Hin: istate, lr: FLAGS.learning_rate, pkeep: FLAGS.dropout_pkeep, batchsize: FLAGS.train_batch_size}
_, y, ostate, smm = sess.run([train_step, Y, H, summaries], feed_dict=feed_dict)
# save training data for Tensorboard
summary_writer.add_summary(smm, step)
# display a visual validation of progress (every 50 batches)
if step % _50_BATCHES == 0:
feed_dict = {X: x, Y_: y_, Hin: istate, pkeep: 1.0, batchsize: FLAGS.train_batch_size} # no dropout for validation
y, l, bl, acc = sess.run([Y, seqloss, batchloss, accuracy], feed_dict=feed_dict)
txt.print_learning_learned_comparison(x, y, l, bookranges, bl, acc, epoch_size, step, epoch)
# run a validation step every 50 batches
# The validation text should be a single sequence but that's too slow (1s per 1024 chars!),
# so we cut it up and batch the pieces (slightly inaccurate)
# tested: validating with 5K sequences instead of 1K is only slightly more accurate, but a lot slower.
if step % _50_BATCHES == 0 and len(valitext) > 0:
VALI_SEQLEN = 1*1024 # Sequence length for validation. State will be wrong at the start of each sequence.
bsize = len(valitext) // VALI_SEQLEN
txt.print_validation_header(len(codetext), bookranges)
vali_x, vali_y, _ = next(txt.rnn_minibatch_sequencer(valitext, bsize, VALI_SEQLEN, 1)) # all data in 1 batch
vali_nullstate = np.zeros([bsize, INTERNALSIZE*NLAYERS])
feed_dict = {X: vali_x, Y_: vali_y, Hin: vali_nullstate, pkeep: 1.0, # no dropout for validation
batchsize: bsize}
ls, acc, smm = sess.run([batchloss, accuracy, summaries], feed_dict=feed_dict)
txt.print_validation_stats(ls, acc)
# save validation data for Tensorboard
validation_writer.add_summary(smm, step)
# display a short text generated with the current weights and biases (every 150 batches)
if step // 3 % _50_BATCHES == 0:
txt.print_text_generation_header()
ry = np.array([[txt.convert_from_alphabet(ord("K"))]])
rh = np.zeros([1, INTERNALSIZE * NLAYERS])
for k in range(1000):
ryo, rh = sess.run([Yo, H], feed_dict={X: ry, pkeep: 1.0, Hin: rh, batchsize: 1})
rc = txt.sample_from_probabilities(ryo, topn=10 if epoch <= 1 else 2)
print(chr(txt.convert_to_alphabet(rc)), end="")
ry = np.array([[rc]])
txt.print_text_generation_footer()
# save a checkpoint (every 500 batches)
if step // 10 % _50_BATCHES == 0:
saver.save(sess, FLAGS.checkpoint_dir + '/rnn_train_' + timestamp, global_step=step)
# display progress bar
progress.step(reset=step % _50_BATCHES == 0)
# loop state around
istate = ostate
step += FLAGS.train_batch_size * FLAGS.seqlen
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--text_dir',
type=str,
default='shakespeare/*.txt',
help='Path to input text files.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='log',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--seqlen',
type=int,
default=30,
help='How long of a sequence to consider.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--dropout_pkeep',
type=float,
default=1.0,
help='What pct to keep in the dropout layers.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=50,
help='How many text sequences to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_text',
default=False,
help="""\
Whether to print out a list of all misclassified test text.\
""",
action='store_true'
)
parser.add_argument(
'--checkpoint_dir',
type=str,
default='checkpoints',
help="""\
Path to keep model checkpoints.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
# all runs: FLAGS.seqlen = 30, BATCHSIZE = 100, ALPHASIZE = 98, INTERNALSIZE = 512, NLAYERS = 3
# run 1477669632 decaying learning rate 0.001-0.0001-1e7 dropout 0.5: not good
# run 1477670023 lr=0.001 no dropout: very good
# Tensorflow runs:
# 1485434262
# trained on shakespeare/t*.txt only. Validation on 1K sequences
# validation loss goes up from step 5M
# 1485436038
# trained on shakespeare/t*.txt only. Validation on 5K sequences
# On 5K sequences validation accuracy is slightly higher and loss slightly lower
# => sequence breaks do introduce inaccuracies but the effect is small
# 1485437956
# Trained on shakespeare/*.txt only. Validation on 1K sequences
# On this much larger dataset, validation loss still decreasing after 6 epochs (step 35M)
# 1485440785
# Dropout = 0.5 - Trained on shakespeare/*.txt only. Validation on 1K sequences
# Much worse than before. Not very surprising since overfitting was not apparent
# on the validation curves before so there is nothing for dropout to fix.
|
#!/usr/bin/env python3
# Python 3.2.3
# Linux/Unix
# James Jessen
# 10918967
# CptS 355
#-------------------------------------------------------------------------------
# Used this value because it's less than 80 and
# aligns nicely with both the histogram and digraph output.
_PRINT_WIDTH = 78
#debugging = True # view debugging output
debugging = False # hide debugging output
#================================= Test/Debug ==================================
def debug(*s):
"""Print but only when debugging"""
if debugging:
print(*s)
def test(function, outputs, *inputs):
"""Test function with inputs and compare actual outputs with expected"""
result = True
for o, i in zip(outputs, *inputs):
actual = function(*i)
if(actual != o):
result = False
# Create visual seperation between failures
debug('=' * _PRINT_WIDTH)
debug(function.__name__ + "(" + str(i).strip('[]()') + ")")
debug('-' * _PRINT_WIDTH)
debug("Actual:")
debug(actual)
debug('.' * _PRINT_WIDTH)
debug("Expected:")
debug(o)
# Create visual seperation between tested functions, if there is need
if(result == False):
debug(('#' * _PRINT_WIDTH) + '\n')
return result
#================================ Translation ==================================
# Assumes that the characters in s1 are unique and
# that the two strings are the same length.
def makettable(s1, s2):
"""Return a dictionary mapping each char in s1 to corresponding char in s2"""
ttable = {}
for c1, c2 in zip(s1, s2):
debug(c1, c2)
ttable[c1] = c2
return ttable
# The translation table is a dictionary.
# If a character is not in the translation table, it remains unchanged.
def trans(ttable, s):
"""Translate string s using translation table ttable"""
translation = ""
for c in s:
translation += ttable.get(c, c)
return translation
def testtrans():
"""Test trans(), return false if there are any failures"""
ttable = makettable('abc', 'xyz')
revttable = makettable('xyz', 'abc')
tests = "Now I know my abc's"
answer = "Now I know my xyz's"
inputs = []
outputs = []
inputs.append((ttable, tests))
outputs.append(answer)
inputs.append((revttable, trans(ttable, tests)))
outputs.append("Now I know mb abc's")
inputs.append((ttable, ''))
outputs.append('')
inputs.append((makettable('', ''), "abc"))
outputs.append('abc')
return test(trans, outputs, inputs)
#================================= Histogram ===================================
def histo(s):
"""Return a histogram depicting the frequency of each char in string s"""
D = {}
for c in s:
D[c] = D.get(c, 0) + 1
# Primarly sort by frequency (High->Low)
# Secondarly sort alphabetically (A->Z)
histogram = sorted(D.items(), key=lambda t: (-t[1], t[0]))
return histogram
def testhisto():
"""Test histo(), return false if there are any failures"""
inputs = []
outputs = []
inputs.append(('implemented',))
outputs.append([('e', 3), ('m', 2), ('d', 1), ('i', 1), ('l', 1), ('n', 1),
('p', 1), ('t', 1)])
inputs.append(('abbccddd',))
outputs.append([('d', 3), ('b', 2), ('c', 2), ('a', 1)])
inputs.append(('aaabbccd',))
outputs.append([('a', 3), ('b', 2), ('c', 2), ('d', 1)])
return test(histo, outputs, inputs)
#================================== Digraphs ===================================
def digraphs(s):
"""Return digraphs depicting the frequency of adjacent characters in s"""
D = {}
for i in range(len(s)-1):
pair = '/' + s[i:i+2] + '/'
D[pair] = D.get(pair, 0) + 1
# Primarly sort alphabetically (A->Z)
# Secondarlyy sort by Frequency (High->Low)
digraph = sorted(D.items(), key=lambda t : (t[0], -t[1]))
return digraph
def testdigraphs():
"""Test digraphs(), return false if there are any failures"""
inputs = []
outputs = []
inputs.append(('abbccddddab',))
outputs.append([('/ab/', 2), ('/bb/', 1), ('/bc/', 1), ('/cc/', 1),
('/cd/', 1), ('/da/', 1), ('/dd/', 3)])
inputs.append(('aaabbccd',))
outputs.append([('/aa/', 2), ('/ab/', 1), ('/bb/', 1), ('/bc/', 1),
('/cc/', 1), ('/cd/', 1)])
inputs.append(('dccbbaaa',))
outputs.append([('/aa/', 2), ('/ba/', 1), ('/bb/', 1), ('/cb/', 1),
('/cc/', 1), ('/dc/', 1)])
return test(digraphs, outputs, inputs)
#==================================== Main =====================================
if __name__ == '__main__':
# Saying a function failed testing is not helpful.
# Failures are printed in detail upon discovery.
# Conflicted on whether or not to include this if-statement.
# It is used for debugging, but this program does nothing else!
#if(debugging):
print()
# A function is considered OK if it passed every test.
if(testtrans()):
print("Translation...OK")
if(testhisto()):
print("Histogram.....OK")
if(testdigraphs()):
print("Digraphs......OK")
print()
#================================ Extra Credit =================================
# ZYX WVWUTSZRVQ VP OUNMXLX WKZYVQL WNXLXQZTK XLZSOTRLYXJ RQ
# ZYX WSNI RL ZYX NXLUTZ VP SHHRJXQZST SQJ/VN RQZXQZRVQST NXTXSLXL OK
# WXZ VGQXNL. ZYXLX RQZNVJUHZRVQL HSQ YSFX JXFSLZSZRQE HVQLXDUXQHXL ZV
# VUN XHVLKLZXM. OUNMXLX WKZYVQL YSFX OXXQ PVUQJ ZV PXXJ VQ S GRJX
# FSNRXZK VP MSMMSTL SQJ ORNJL RQ ZYX XFXNETSJXL-XFXQ ZYX VHHSLRVQST
# STTRESZVN! OK WNXKRQE VQ QSZRFX GRTJTRPX, SQJ HVMWXZRQE GRZY VZYXN
# QSZRFX WNXJSZVNL, WKZYVQL SNX LXNRVULTK RMWSHZRQE ZYX QSZUNST VNJXN
# VP LVUZY PTVNRJS'L XHVTVERHST HVMMUQRZRXL. ZYX HVQZRQUXJ
# WNVTRPXNSZRVQ VP OUNMXLX WKZYVQL-SQJ ZYX HVQZRQUXJ RQZNVJUHZRVQ VP
# QXG PVNXREQ LWXHRXL-HSQ PUNZYXN ZYNXSZXQ MSQK VP ZYX XQJSQEXNXJ
# WTSQZL SQJ SQRMSTL GX'NX GVNIRQE JRTREXQZTK
# ZV WNVZXHZ. (GGG.QWL.EVF/XFXN/QSZUNXLHRXQHX/OUNMXLXWKZYVQLRQZNV.YZM)
thisIsTheCryptogramAnswer = """
the population of burmese pythons presently established in
the park is the result of accidental and/or intentional releases by
pet owners. these introductions can have devastating consequences to
our ecosystem. burmese pythons have been found to feed on a wide
variety of mammals and birds in the everglades-even the occasional
alligator! by preying on native wildlife, and competing with other
native predators, pythons are seriously impacting the natural order
of south florida's ecological communities. the continued
proliferation of burmese pythons-and the continued introduction of
new foreign species-can further threaten many of the endangered
plants and animals we're working diligently
to protect. (www.nps.gov/ever/naturescience/burmesepythonsintro.htm)
"""
|
#!/usr/bin/env python
import math
import random
import csv
VERBOSE = False
def prob(r1, r2):
return r1*(1.-r2)/(r1*(1.-r2)+r2*(1.-r1))
class Outcome(object):
def __init__(self, name):
self.name = name
def __call__(self, team1, team2):
return NotImplementedError
class SeedOutcome(Outcome):
def __init__(self):
super(SeedOutcome, self).__init__('SeedOutcome')
def __call__(self, team1, team2):
return team1 if team1<team2 else team2
class RatingOutcome(Outcome):
def __init__(self):
super(RatingOutcome, self).__init__('RatingOutcome')
def __call__(self, team1, team2):
return team1 if team1.rating>team2.rating else team2
class RatingRandomOutcome(Outcome):
def __init__(self, rng=None):
super(RatingRandomOutcome, self).__init__('RatingRandomOutcome')
self.rng = rng or random.Random()
def __call__(self, team1, team2):
p1 = team1.rating/10000.
p2 = team2.rating/10000.
p = (p1 - p1*p2)/(p1 + p2 - 2*p1*p2)
if self.rng.random() <= p:
return team1
else:
return team2
class RandomOutcome(Outcome):
def __init__(self, rng=None):
super(RandomOutcome, self).__init__('RandomOutcome')
self.rng = rng or random.Random()
def __call__(self, team1, team2):
if self.rng.random() <= 0.5:
return team1
else:
return team2
class Team(object):
def __init__(self, name, region, seed, rating):
self.name = name
self.region = region
self.seed = seed
self.rating = rating
self.initial_slot = None
self.current_slot = None
def __eq__(self, other):
return (self.seed == other.seed) and (self.region == other.region)
def __lt__(self, other):
if self.seed == other.seed:
if region_order[self.region] < region_order[other.region]:
return True
else:
return False
else:
return self.seed < other.seed
def __str__(self):
return self.name
def __repr__(self):
return "%s(name=%s, region=%s, seed=%d, rating=%d)" % (self.__class__.__name__,
self.name,
self.region,
self.seed,
self.rating)
class Bracket(object):
def __init__(self, n_teams=128):
self.n_teams = n_teams
self.n_rounds = int(math.ceil(math.log(self.n_teams, 2)))
self.n_slots = int(2*self.n_teams - 1)
self.n_rounds = int(math.ceil(math.log(self.n_teams, 2)))
self.slots = {}
for i in range(self.n_slots):
self.slots[i] = []
def fill_teams(self, filename='2014_KP_round3.csv'):
self.teams = [None]*self.n_slots
with open(filename, 'rU') as f:
teams = csv.DictReader(f)
for row in teams:
if VERBOSE:
print row
team = Team(row['Team'], row['Region'], int(row['Seed']), int(row['Rating']))
self.teams[int(row['Slot'])-1] = team
if VERBOSE:
print self.teams
def slot_range(self, round):
return (int(self.n_teams/pow(2,round)), int(self.n_teams/pow(2,round-1)-1))
def parent_slot(self, slot):
return slot/2
def child_slots(self, slot):
return (2*slot, 2*slot+1)
def round(self, slot):
r = self.n_rounds + 1
for i in range(1,self.n_rounds+1):
min_slot, max_slot = self.slot_range(i)
if min_slot <= slot and max_slot >= slot:
r = i
break
return r
def cycle(self, n_sims=1, outcome=None):
for i in range(n_sims):
self.fill_teams()
self.resolve(outcome)
# print self.slots
def resolve(self, outcome=None):
current_slot = 2
while current_slot != 1:
if VERBOSE:
print "Current slot is %d" % current_slot
current_round = self.round(current_slot)
current_team = self.teams[current_slot-1]
if current_team:
if VERBOSE:
print " %s is in this slot" % current_team
opponent_slot = current_slot+1 if (current_slot%2 == 0) else current_slot-1
opponent_team = self.teams[opponent_slot-1]
if opponent_team:
# we have a game to resolve
winner_team = current_team
if outcome:
winner_team = outcome(current_team, opponent_team)
winner_slot = current_slot/2
self.teams[winner_slot-1] = winner_team
self.slots[winner_slot-1].append(winner_team.name)
current_slot = winner_slot
if VERBOSE:
print " Opponent slot is %d" % opponent_slot
print " GAME: %s vs %s" % (current_team, opponent_team)
print " %s wins and goes to slot %d" % (current_team, current_slot)
else:
# we do not have a game, need to go down the other branch
if VERBOSE:
print " No opponent in slot %d" % opponent_slot
current_slot = opponent_slot
else:
if VERBOSE:
print " No Team in this slot, going deeper"
current_slot *= 2
# print self.slots
if __name__ == '__main__':
import doctest
doctest.testmod()
# outcome = SeedOutcome()
# outcome = RatingOutcome()
outcome = RatingRandomOutcome()
# outcome = RandomOutcome()
bracket = Bracket()
nsim = 10000
bracket.cycle(nsim, outcome=outcome)
slot_team_count = {}
for s,ts in bracket.slots.iteritems():
for t in ts:
if s not in slot_team_count:
slot_team_count[s] = {}
if t in slot_team_count[s]:
slot_team_count[s][t] += 1
else:
slot_team_count[s][t] = 1
for s,cs in slot_team_count.iteritems():
print "Slot " + str(s+1).zfill(2) + ":",
for t,c in cs.iteritems():
slot_team_count[s][t] /= float(nsim)
print sorted(cs.items(), reverse=True, key=lambda x:x[1])
|
#This code appears to work, but it's too slow for leetcode
class ListNode():
def __init__(self, x):
self.val = x
self.next = None
def insertionSortList(head):
newHead = head
if head == None or head.next==None:
return head
curr= head.next
while curr != None:
u=newHead
v=None
while curr.val > u.val:
v=u
u=u.next
if u == newHead:
newHead = curr
if u == curr:
curr=curr.next
else:
temp = curr.next
curr.next = u
if v != None:
v.next = curr
while u.next != curr:
u=u.next
u.next = temp
curr = temp
return newHead
t=ListNode(3)
t.next=ListNode(4)
t.next.next=ListNode(1)
t.next.next.next=ListNode(9)
t.next.next.next.next=ListNode(8)
s=insertionSortList(t)
|
from django.db import models
class Artist(models.Model):
artist = models.CharField(max_length=100, null=False)
def __str__(self):
return self.artist
class Song(models.Model):
id = models.AutoField(primary_key=True, null=False, verbose_name="ID")
song = models.CharField(max_length=100, null=False)
artist = models.ForeignKey(Artist, null=False, on_delete=models.PROTECT,
related_name="songs") # models.PROTECT does not give you the option to delete a
# father (artist in our case) before you remove its childrens from him because if you could delete the father
# then its children will be deleted too
# related_name="songs" means that if i would make it then i can use the name to call it, for example i could say
# artist.song and get all the song of that artist
def __str__(self):
return f"{self.song} - {self.artist}"
class Facts(models.Model):
facts = models.TextField()
song = models.ForeignKey(Song, null=False, on_delete=models.PROTECT,
related_name="facts")
author = models.CharField(max_length=100, null=False, default='none')
def __str__(self):
return (f"{self.facts}\n"
f"נכתב עי: {self.author}")
|
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http.response import HttpResponse
import json
from jobs.dao import select_jobs
from jobs.controller import JobCraw
def into_jobs(req):
return render_to_response('jobs.html',RequestContext(req))
def get_jobs(req):
JobCraw()
return HttpResponse(json.dumps(select_jobs()),content_type="application/json")
|
import subprocess
import time
directoryRoot = "/home/damianossotirakis/__Zallpy/__tlog/"
directoryConfig = "portaloficinas-config/"
directoryEureka = "portaloficinas-eureka/"
directoryGateway = "portaloficinas-gateway/"
directoryApiClient = "portaloficinas-api-client/"
directoryApiAuth = "portaloficinas-api-auth/"
directoryApiEstablishment = "portaloficinas-api-establishment/"
directoryApiPromotion = "portaloficinas-api-promo/"
print("init - Config")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryConfig)+'init.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
time.sleep(15)
print("init - Gatwey")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryGateway)+'init-gateway.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
print("init - Eureka")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryEureka)+'init-eureka.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
time.sleep(20)
print("init - API Auth")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryApiAuth)+'init-gateway.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
print("init - API Client")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryApiClient)+'init-eureka.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
print("init - API Establishment")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryApiEstablishment)+'init-gateway.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
print("init - API Promotion")
cmd = ['gnome-terminal']
cmd.extend(['-x', 'bash', '-c', (directoryRoot+directoryApiPromotion)+'init-eureka.sh; exec $SHELL' ])
subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
# A first Python script
import sys # Load a library module
print(sys.platform)
print(2 ** 10) # Raise 2 to a power
x = 'Spam!'
print(x * 8) # String repetition
nextInput=input(); # Take a input from the console
print(nextInput); # print the input
title="Welcome to the python world"
author="Minhaz"
|
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
|
# TUPLES
# tup1 = ('MIT is the best!', 1, [], 'Yo!', 3)
# NOTE they are immutable (can't change elements' values)
# print(tup1)
# print(tup1[0])
# print(tup1[0:1])
# tup2 = ((1,'one'), (2, 'two'), (3, 'three'), (2, 'two'))
# def get_data(aTuple):
# nums = ()
# words = ()
# for t in aTuple:
# nums += (t[0],)
# if t[1] not in words:
# words += (t[1],)
# minNum = min(nums)
# maxNum = max(nums)
# uniqueWords = len(words)
# # NOTE you can return many objects using tuples
# return (minNum, maxNum, uniqueWords)
# (minNum, maxNum, uniqueWords) = get_data(tup2)
# print('Your tuple\'s minimal and maximal numbers are',minNum,'and '+str(maxNum)+', also it has',\
# uniqueWords,'unique words')
# LISTS
# ls1 = [0,1]
# ls2 = [3,4]
# ls3 = ls1+ls2
# ls1.extend([2])
# print(ls1,ls2,ls3)
# ls3.append(2)
# print(ls3)
# sorted Doesn't mutate
# sort Mutates
# ALIASES
# warm = ['orange', 'red', 'yellow']
# hot = warm
# hot.append('pink')
# print(hot)
# print(warm)
# # NOTE side effect -> if you change one list the other one will change too
# if you don't want a side effect you should clone a list
# hot = warm[:]
# hot.append('pink')
# print(hot)
# print(warm)
def removeDups(ls1, ls2):
for i in ls1.copy():
if i in ls2:
ls1.remove(i)
return sorted(ls1+ls2)
print(removeDups([1,2,3,4,5,6,7,8], [1,2,3,4]))
|
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from manage import app, db
class ContactUsModel(db.Model):
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column('name', db.String)
email = db.Column(db.String, unique=True, nullable=False)
creation_date = db.Column('creation_date', db.Date, default=datetime.utcnow)
|
a = [[10, 20], [30, 40], [50, 60]]
print(a)
a = [[10, 20],
[30, 40],
[50, 60]]
#이차원리스트[행][열]
a[0][0] = 1000
a[1][1] = 2000
a[2][1] = 3000
print(a)
for i, j in a:
print(i, j)
for i in range(len(a)): #행의 개수
for j in range(len(a[i])): #열의 개수
print(a[i][j],end=" ")
print()
print("\n===2중 for문 리스트 초기화===")
a = []
num = 10
for i in range(8): #행
line = [] #안쪽에 넣을 리스트
for j in range(8): #열
line.append(num)
num += 10
a.append(line)
print(a)
print("\n===리스트 표현식===")
#중급자용 문법
num_list = [num for num in range(1, 6)]
print(num_list)
num_list = [num for num in range(1, 11) if num % 2 == 0]
print(num_list)
num = 10
a = [[num for j in range(2)] for i in range(3)]
print(a)
|
import numpy as np
import math
import os
import h5py
def ReadH5File(filename):
# return the first h5 dataset from this file
with h5py.File(filename, 'r') as hf:
keys = [key for key in hf.keys()]
data = np.array(hf[keys[0]])
return data
def WriteH5File(data, filename, dataset):
with h5py.File(filename, 'w') as hf:
# should cover all cases of affinities/images
hf.create_dataset(dataset, data=data, compression='gzip')
dsp = 8
volumesz = (5700,5456,5332)
all_blocksizes = [(128,1024,1024), (192,1536,1536), (256,2048,2048), (512,512,512), (768,768,768), (1024,1024,1024), (1536,1536,1536), (2048,2048,2048)]
input_folder = "/Users/Tim/Documents/Code/detectSomae/somae_in/"
output_folder = "/Users/Tim/Documents/Code/detectSomae/somae_blocks_out/"
somae_dsp = ReadH5File(input_folder+"somae_filled.h5")
os.mkdir(output_folder)
for blocksz in all_blocksizes:
blocksz_dsp = [int(blocksz[0]/dsp), int(blocksz[1]/dsp), int(blocksz[2]/dsp)]
dir_name = output_folder + "somaeblocks-dsp{}-{}x{}x{}/".format(dsp,blocksz[2],blocksz[1],blocksz[0])
os.mkdir(dir_name)
nb_z = math.ceil(volumesz[0]/blocksz[0])
nb_x = math.ceil(volumesz[1]/blocksz[1])
nb_y = math.ceil(volumesz[2]/blocksz[2])
print("blocksize: {},{},{}".format(blocksz[2],blocksz[1],blocksz[0]))
print("nbz:{}, nby:{}, nbx:{}".format(nb_z,nb_y,nb_x))
for bz in range(nb_z):
for by in range(nb_y):
for bx in range(nb_x):
labels_out = np.zeros((blocksz_dsp[0],blocksz_dsp[1],blocksz_dsp[2]),dtype=np.uint64)
somae_block_dsp = somae_dsp[bz*blocksz_dsp[0]:(bz+1)*blocksz_dsp[0],
by*blocksz_dsp[1]:(by+1)*blocksz_dsp[1],
bx*blocksz_dsp[2]:(bx+1)*blocksz_dsp[2]]
labels_out[:somae_block_dsp.shape[0],:somae_block_dsp.shape[1],:somae_block_dsp.shape[2]] = somae_block_dsp
filename_dsp = dir_name+'Zebrafinch-somae_filled_refined_dsp{}-{:04d}z-{:04d}y-{:04d}x.h5'.format(dsp,bz,by,bx)
WriteH5File(labels_out,filename_dsp, "main")
|
import pandas as pd
import geopandas as gpd
import os
from shapely.geometry import Point, LineString, shape
from geopandas.tools import sjoin
import contextily as ctx
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import geoplot
# 0. A brief exploration of the data
# Check date range, the distribution of the points, the number of unique device id
# import data
parentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
file_name=os.path.join(parentDirectory,'data','location_data.csv')
location_data=pd.read_csv(file_name)
# date range
location_data['timestamp']=pd.to_datetime(location_data['timestamp'],unit='s')
print(f"The range of the datetime of the data is from {location_data['timestamp'].min()} to {location_data['timestamp'].max()}")
# distribution of the points
location_data['geometry']=location_data.apply(lambda x: Point((float(x.longitude), float(x.latitude))), axis=1)
location_data_gdf=gpd.GeoDataFrame(location_data, geometry='geometry')
location_data_gdf=location_data_gdf.set_crs(epsg=4326)
location_data_gdf=location_data_gdf.to_crs("EPSG:4398")
fig, ax = plt.subplots(1, figsize=(10, 10))
location_data_gdf.plot(figsize=(10, 10), alpha=0.5, edgecolor='k',ax=ax)
ctx.add_basemap(ax, crs="EPSG:4398", url=ctx.providers.OpenStreetMap.Mapnik,zoom=10)
ax.set_axis_off()
ax.set_title("Spatial Distribution of Devices in the Dataset", fontdict={"fontsize": "25", "fontweight" : "3"})
fig.savefig(os.path.join(parentDirectory,'output','point_distribution.png'))
# the number of unique device id
num_unique_device=len(location_data_gdf['device_id'].unique())
num_rows=len(location_data_gdf)
print(f"There are {num_unique_device} unique device ids among {num_rows} observations")
# 1. Create 500m buffers around the selected tourist points
# create 100m buffer from Kuala Lumpur airport
def create_point(df,crs):
"""
This function is used to create a point geo dataframe
Input:
- df: a pandas dataframe that contains the location information
- crs: a string of target crs
Output:
- gdf: a geodataframe of the given point
"""
df['geometry']=df.apply(lambda x: Point((float(x.lon), float(x.lat))), axis=1)
gdf=gpd.GeoDataFrame(df, geometry='geometry')
gdf = gpd.GeoDataFrame(df)
gdf=gdf.set_crs(epsg=4326)
gdf=gdf.to_crs(crs)
return gdf
def create_buffer(gdf,buffer_distance):
"""
This function is used to create buffer around given geodataframe with a specified distance
Input:
- gdf: input geo dataframe
Output:
- buffer_poly: a buffer around the input gdf with the specified distance
"""
buffer_poly = gdf.copy()
buffer_poly["geometry"]=gdf.geometry.buffer(buffer_distance)
return buffer_poly
# location of the selected points
#df=pd.DataFrame(names=['name','lon','lat'])
location_dict=dict()
location_dict['perdona']=[101.6847, 3.1430]
location_dict['chinatown']=[101.6969, 3.1428]
location_dict['petronas']=[101.7120, 3.1579]
df=pd.DataFrame.from_dict(location_dict,orient='index',columns=['lon','lat']). reset_index().rename(columns={'index':'name'})
crs="EPSG:4398"
# create buffer
distance=500
gdf=create_point(df,crs)
buffer=create_buffer(gdf,distance)
print(buffer.head())
# 2. Extract those points that intersect with the buffer
# coduct spatial join to extract points that intersect with the 100m buffer
joined_buffer = gpd.sjoin(location_data_gdf, buffer, how="left", op='intersects')
print(joined_buffer.info())
# 3. Group by each device id to get first&last timestamp, duration, and distance moved
# Here, I cannot detect if the device leaves the point area and come back again.
# first & last timestamp & duration by tourist points
time_visited_by_device=joined_buffer.groupby(['device_id','name']).agg(['min','max'])
time_visited_by_device.columns = ['{}_{}'.format(x[0], x[1]) for x in time_visited_by_device.columns]
time_visited_by_device=time_visited_by_device.reset_index()
time_visited_by_device=time_visited_by_device[['device_id','name','timestamp_min','timestamp_max']]
time_visited_by_device['duration']=time_visited_by_device['timestamp_max']-time_visited_by_device['timestamp_min']
print(time_visited_by_device)
# plot first timestamp
sns.set_style(style="whitegrid")
time_visited_by_device['timestamp_min_hour']=time_visited_by_device['timestamp_min'].dt.hour
fist_visit_bplot = sns.violinplot(y='timestamp_min_hour', x='name',
data=time_visited_by_device,
width=0.5,
palette="colorblind")
fist_visit_bplot.axes.set_title("Timestamp of First Visits by Tourist Point",
fontsize=16)
fist_visit_bplot.figure.savefig(os.path.join(parentDirectory,'output','first_visit_timestamp.png'))
# plot last timestamp
time_visited_by_device['timestamp_max_hour']=time_visited_by_device['timestamp_max'].dt.hour
last_visit_bplot = sns.violinplot(y='timestamp_max_hour', x='name',
data=time_visited_by_device,
width=0.5,
palette="colorblind")
last_visit_bplot.axes.set_title("Timestamp of Last Visits by Tourist Point",
fontsize=16)
last_visit_bplot.figure.savefig(os.path.join(parentDirectory,'output','last_visit_timestamp.png'))
# plot duration
time_visited_by_device['duration_min']=time_visited_by_device['duration'].dt.total_seconds().div(60).astype(int)
duration_bplot = sns.boxplot(y='duration_min', x='name',
data=time_visited_by_device,
width=0.5,
palette="colorblind")
duration_bplot.axes.set_title("Duration of Stays by Tourist Point",
fontsize=16)
duration_bplot.figure.savefig(os.path.join(parentDirectory,'output','duration.png'))
# calculate the length of movements by each device
result_df=pd.DataFrame()
movements_gdf=gpd.GeoDataFrame()
for index,row in tqdm.tqdm(time_visited_by_device.iterrows()):
device_id=row[0]
name=row[1]
temp_gdf=joined_buffer.loc[(joined_buffer['name']==name)&(joined_buffer['device_id']==device_id)]
try:
movement=temp_gdf.groupby(['device_id', 'name'])['geometry'].apply(lambda x:LineString(x.tolist()))
movement = gpd.GeoDataFrame(movement, geometry='geometry')
movements_gdf=movements_gdf.append(movement)
except ValueError: # when we cannot convert it to linestring
continue
try:
length=movement['geometry'].length.tolist()[0]
except IndexError: # when there is anything inside movement['geometry'].length
continue
temp_df = pd.DataFrame([[device_id,name,length]], columns=['device_id','name','length'])
result_df=result_df.append(temp_df)
movements_gdf=movements_gdf.reset_index()
print(result_df)
# plot distance travelled by each device
distance_bplot = sns.boxplot(y='length', x='name',
order=['perdona','petronas','chinatown'],
data=result_df,
width=0.5,
palette="colorblind")
distance_bplot.axes.set_title("Distance Travelled by Each Device (meters)",
fontsize=16)
distance_bplot.figure.savefig(os.path.join(parentDirectory,'output','distance.png'))
# plot the movement in perdona on a map
movements_perdona=movements_gdf.loc[movements_gdf['name']=='perdona']
fig, ax = plt.subplots(1, figsize=(10, 10))
movements_perdona.plot(figsize=(10, 10), alpha=0.2, color='blue',edgecolor='blue',ax=ax,markersize=5)
ctx.add_basemap(ax, crs="EPSG:4398", url=ctx.providers.OpenStreetMap.Mapnik)
ax.set_axis_off()
ax.set_title("Movements of devices in Perdona", fontdict={"fontsize": "25", "fontweight" : "3"})
fig.savefig(os.path.join(parentDirectory,'output','movement_perdona.png'))
# plot the movement in petronas on a map
movements_petronas=movements_gdf.loc[movements_gdf['name']=='petronas']
fig, ax = plt.subplots(1, figsize=(10, 10))
movements_petronas.plot(figsize=(10, 10), alpha=0.1, color='blue',edgecolor='blue',ax=ax,markersize=1)
ctx.add_basemap(ax, crs="EPSG:4398", url=ctx.providers.OpenStreetMap.Mapnik)
ax.set_axis_off()
ax.set_title("Movements of devices in petronas", fontdict={"fontsize": "25", "fontweight" : "3"})
fig.savefig(os.path.join(parentDirectory,'output','movement_petronas.png'))
# plot the movement in chinatown on a map
movements_chinatown=movements_gdf.loc[movements_gdf['name']=='chinatown']
fig, ax = plt.subplots(1, figsize=(10, 10))
movements_chinatown.plot(figsize=(10, 10), alpha=0.1, color='blue',edgecolor='blue',ax=ax,markersize=1)
ctx.add_basemap(ax, crs="EPSG:4398", url=ctx.providers.OpenStreetMap.Mapnik)
ax.set_axis_off()
ax.set_title("Movements of devices in chinatown", fontdict={"fontsize": "25", "fontweight" : "3"})
fig.savefig(os.path.join(parentDirectory,'output','movement_chinatown.png'))
# plot the cumulative total number of devices by hour
device_in_points=joined_buffer.dropna(subset=['name'])
device_in_points['timestamp_hour']=device_in_points['timestamp'].dt.hour
device_in_points_count=device_in_points.groupby(['name','timestamp_hour']).agg(['count'])
device_in_points_count.columns = ['{}_{}'.format(x[0], x[1]) for x in device_in_points_count.columns]
device_in_points_count=device_in_points_count.reset_index()
count_lplot=sns.lineplot(data=device_in_points_count, x="timestamp_hour", y="device_id_count", hue="name")
count_lplot.axes.set_title("Cumulative Number of Devices by Hour", fontsize=16)
count_lplot.figure.savefig(os.path.join(parentDirectory,'output','cumulative_devices.png'))
# plot spatial distribution of devices
name_list=['perdona','petronas','chinatown']
for name in name_list:
devices=device_in_points.loc[device_in_points['name']==name]
nrow=4
ncol=6
fig, axes = plt.subplots(nrow, ncol,figsize=(50, 50))
buffer_temp= buffer.loc[buffer['name']==name]
xlim = ([buffer_temp.total_bounds[0], buffer_temp.total_bounds[2]])
ylim = ([buffer_temp.total_bounds[1], buffer_temp.total_bounds[3]])
# plot counter
hour=0
for r in range(nrow):
for c in range(ncol):
devices_temp=devices.loc[devices['timestamp_hour']==hour]
devices_temp.plot(alpha=0.5, color='blue',ax=axes[r,c],markersize=50)
axes[r,c].set_xlim(xlim)
axes[r,c].set_ylim(ylim)
ctx.add_basemap(axes[r,c], crs="EPSG:4398", url=ctx.providers.Stamen.TonerLite)
axes[r,c].set_axis_off()
axes[r,c].set_title(f"Devices at {hour}", fontdict={"fontsize": "50", "fontweight" : "1"})
hour+=1
fig.tight_layout()
fig.suptitle(f"Spatial Distribution of Devices by Hour in {name.capitalize()}", fontsize=100)
fig.savefig(os.path.join(parentDirectory,'output',f'device_{name}_hour.png'))
location_data_gdf['timestamp_hour']=location_data_gdf['timestamp'].dt.hour
nrow=4
ncol=6
fig, axes = plt.subplots(nrow, ncol,figsize=(50, 50))
xlim = ([location_data_gdf.total_bounds[0], location_data_gdf.total_bounds[2]])
ylim = ([location_data_gdf.total_bounds[1], location_data_gdf.total_bounds[3]])
# plot counter
hour=0
for r in range(nrow):
for c in range(ncol):
devices_temp=location_data_gdf.loc[location_data_gdf['timestamp_hour']==hour]
devices_temp.plot(alpha=0.2, color='blue',ax=axes[r,c],markersize=20)
axes[r,c].set_xlim(xlim)
axes[r,c].set_ylim(ylim)
ctx.add_basemap(axes[r,c], crs="EPSG:4398", url=ctx.providers.Stamen.TonerLite)
axes[r,c].set_axis_off()
axes[r,c].set_title(f"Devices at {hour}", fontdict={"fontsize": "50", "fontweight" : "1"})
hour+=1
fig.tight_layout(h_pad=100)
fig.suptitle("Spatial Distribution of Devices by Hour in Kuala Lumpur", fontsize=100)
fig.subplots_adjust(top=0.8)
fig.savefig(os.path.join(parentDirectory,'output','device_hour.png'))
|
import asyncio
from asyncio.exceptions import CancelledError
async def producer(q):
for i in range(10):
await q.put(i)
await asyncio.sleep(0.1)
## finishing
await q.put(None)
async def watcher(q, name):
while True:
task = await q.get()
if task is not None:
print(f"{name} got {task}")
await asyncio.sleep(1)
else:
await q.put(None)
break
async def main():
q = asyncio.Queue()
p = asyncio.create_task(producer(q))
watchers = asyncio.gather(*[watcher(q, f"{i}") for i in range(3)])
await p
await watchers
asyncio.run(main())
|
"""Main module of the Dota 2 subreddit Responses Bot.
The main body of the script is running in this file. The comments are loaded from the subreddit
and the script checks if the comment or submission is a response from Dota 2. If it is, a proper reply for response is
prepared. The response is posted as a reply to the original comment/submission on Reddit.
"""
import time
from praw.exceptions import APIException
from praw.models import Comment
from prawcore import ServerError
import config
from bot import account
from util.caching import get_cache_api
from util.database.database import db_api
from util.logger import logger
from util.response_info import ResponseInfo
from util.str_utils import preprocess_text
__author__ = 'Jonarzz'
__maintainer__ = 'MePsyDuck'
cache_api = get_cache_api()
def work():
"""Main method executing the script.
It connects to an account, loads dictionaries from proper files (declared in config file).
Afterwards it executes process_comments method with proper arguments passed.
"""
reddit = account.get_account()
logger.info('Connected to Reddit account : ' + config.USERNAME)
comment_stream, submission_stream = get_reddit_stream(reddit)
while True:
try:
for comment in comment_stream:
if comment is None:
break
process_replyable(reddit, comment)
for submission in submission_stream:
if submission is None:
break
process_replyable(reddit, submission)
except ServerError as e:
comment_stream, submission_stream = get_reddit_stream(reddit)
logger.critical("Reddit server is down : " + str(e))
time.sleep(120)
except APIException as e:
comment_stream, submission_stream = get_reddit_stream(reddit)
logger.critical("API Exception occurred : " + str(e))
time.sleep(60)
def get_reddit_stream(reddit):
"""Returns the comment and submission stream.
Streams need to be restarted/re-obtained when they throw exception.
:param reddit: The reddit account instance
:return: The comment and subreddit stream
"""
comment_stream = reddit.subreddit(config.SUBREDDIT).stream.comments(pause_after=-1)
submission_stream = reddit.subreddit(config.SUBREDDIT).stream.submissions(pause_after=-1)
return comment_stream, submission_stream
def process_replyable(reddit, replyable):
"""Method used to check all the comments in a submission and add replies if they are responses.
PRAW generates past ~100 comments/submissions on the first iteration. Then the loop only runs if there is a new
comment/submission added to the stream. This also means that once PRAW is up and running, after the initial comments
list it won't generate any duplicate comments.
However, just as a safeguard, Caching is used to store replyable ids as they are processed for the first time.
Otherwise, when the bot is restarted it might reply twice to same comments. If replyable id is in the already present
in the cache_api, then it is ignored, else processed and added to the cache_api.
* Self comments are ignored.
* It is prepared for comparison to the responses in dictionary.
* If the replyable is not on the excluded responses list (loaded from config) and if it is in the responses db or
specific responses list, a reply replyable is prepared and posted.
:param reddit: The reddit account instance
:param replyable: comment or submission
:return: None
"""
if cache_api.exists(thing_id=replyable.fullname):
return
# Ignore thyself
if replyable.author == reddit.user.me():
return
logger.info("Found new replyable: " + replyable.fullname)
processed_text = process_text(replyable.body if isinstance(replyable, Comment) else replyable.title)
# TODO make use of assignment expression for all below
if is_excluded_response(processed_text):
pass
elif is_custom_response(processed_text):
add_custom_reply(replyable, processed_text)
elif (response_info := is_hero_specific_response(processed_text)) is not None:
add_hero_specific_reply(replyable, response_info)
elif (response_info := is_flair_specific_response(replyable, processed_text)) is not None:
add_flair_specific_reply(replyable, response_info)
elif (response_info := is_update_request(reddit, replyable, processed_text)) is not None:
update_reply(replyable, response_info)
elif (response_info := is_hero_response(processed_text)) is not None:
add_regular_reply(replyable, response_info)
def process_text(text):
"""Method used to clean the replyable body/title text.
If text contains a quote, the first quote text is considered as the text.
:param text: The replyable body/title text
:return: Processed text
"""
hero_name = None
if '>' in text:
text = get_quoted_text(text)
if '::' in text:
hero_name, text = text.split('::', 1)
hero_name = hero_name.strip() + '::'
return (hero_name or '') + preprocess_text(text)
def get_quoted_text(text):
"""Method used to get quoted text.
If body/title text contains a quote, the first quote is considered as the text.
:param text: The replyable text
:return: The first quote in the text. If no quotes are found, then the entire text is returned
"""
lines = text.split('\n\n')
for line in lines:
if line.startswith('>'):
return line[1:]
return text
def is_excluded_response(text):
"""Method to check if the given body/title is in excluded responses set.
Also return False for single word text (they're mostly common phrases).
:param text: The processed body/title text
:return: True if text is an excluded response, else False
"""
return ' ' not in text or text in config.EXCLUDED_RESPONSES
def is_custom_response(text):
"""Method to check if given body/title text is in custom response set.
:param text: The body/title text
:return: True if text is a custom response, else False
"""
return text in config.CUSTOM_RESPONSES
def add_custom_reply(replyable, body):
"""Method to create a custom reply for specific cases that match the custom responses set.
:param replyable: The comment/submission on reddit
:param body: The processed body/title text
:return: None
"""
custom_response = config.CUSTOM_RESPONSES[body]
original_text = replyable.body if isinstance(replyable, Comment) else replyable.title
reply = custom_response.format(original_text, config.COMMENT_ENDING)
replyable.reply(reply)
logger.info("Replied to: " + replyable.fullname)
def is_hero_specific_response(text):
"""Method that checks if response for specified hero name and text exists.
:param text: The processed body/title text
:return: ResponseInfo containing hero_id and link for response if the response for specified hero was found, otherwise None
"""
if '::' in text:
hero_name, text = text.split('::', 1)
if not hero_name or not text:
return None
hero_id = db_api.get_hero_id_by_name(hero_name=hero_name)
if hero_id:
link, _ = db_api.get_link_for_response(processed_text=text, hero_id=hero_id)
if link:
return ResponseInfo(hero_id=hero_id, link=link)
return None
def add_hero_specific_reply(replyable, response_info):
"""Method to add a hero specific reply to the comment/submission.
:param replyable: The comment/submission on reddit
:param response_info: ResponseInfo containing hero_id and link for response
:return: None
"""
create_and_add_reply(replyable=replyable, response_url=response_info.link, hero_id=response_info.hero_id)
def is_flair_specific_response(replyable, text):
"""Method that checks if response for hero in author's flair and text exists.
:param replyable: The comment/submission on reddit
:param text: The processed body/title text
:return: ResponseInfo containing hero_id and link for response if the response for author's flair's hero was found, otherwise None
"""
hero_id = db_api.get_hero_id_by_flair_css(flair_css=replyable.author_flair_css_class)
if hero_id:
link, _ = db_api.get_link_for_response(processed_text=text, hero_id=hero_id)
if link:
return ResponseInfo(hero_id=hero_id, link=link)
return None
def add_flair_specific_reply(replyable, response_info):
"""Method to add a author's flair specific reply to the comment/submission.
:param replyable: The comment/submission on reddit
:param response_info: ResponseInfo containing hero_id and link for response
:return: None
"""
create_and_add_reply(replyable=replyable, response_url=response_info.link, hero_id=response_info.hero_id)
def is_update_request(reddit, replyable, text):
"""Method to check whether the comment is a request to update existing response.
Only works if
* Comment begins with "try"
* Comment ends with valid hero name
* Given hero has the original response
* Root/Original comment/submission was not hero specific response.
Examples:
"Try legion commander" : Valid
"Try leGiOn ComManDer" : Valid - case does not matter
"legion commander" : Invalid - does not begin with `try`
"Try legion" : Invalid - invalid hero name
:param reddit: The reddit account instance
:param replyable: The comment/submission on reddit
:param text: The processed body/title text
:return: ResponseInfo containing hero_id and link for response if this is a valid update request, otherwise None
"""
if not text.startswith(config.UPDATE_REQUEST_KEYWORD):
return None
if not validate_update_request_comment_tree(reddit, replyable):
return None
hero_name = text.replace(config.UPDATE_REQUEST_KEYWORD, '', 1)
hero_id = db_api.get_hero_id_by_name(hero_name=hero_name)
if hero_id is None:
return None
root_replyable = replyable.parent().parent()
processed_text = process_text(root_replyable.body if isinstance(root_replyable, Comment) else root_replyable.title)
if is_hero_specific_response(processed_text):
return None
link, _ = db_api.get_link_for_response(processed_text=processed_text, hero_id=hero_id)
if link is None:
return None
return ResponseInfo(hero_id=hero_id, link=link)
def validate_update_request_comment_tree(reddit, replyable):
"""Method to check whether the comment in the request to update existing response is valid.
A valid comment tree is when:
* Comment was made as a reply to bot's comment
* Comment was added by OP, who made the original request(Response/Comment) for the response.
The comment tree should look something like below, where root(original) replyable can be Comment or Submission.
Only valid case is c3.
c1/s1 user: Foo
c2 bot: "Foo" response by Bar hero
c3 user: Try Bar2
c4 other_user: Try Bar2
:param reddit: The reddit account instance
:param replyable: The comment/submission on reddit
:return: True if this is a valid comment tree, else False
"""
if not isinstance(replyable, Comment):
return False
op = replyable.author
parent_comment = replyable.parent()
if not isinstance(parent_comment, Comment):
return False
if not parent_comment.author == reddit.user.me():
return False
root_replyable = parent_comment.parent()
if not root_replyable.author == op:
return False
return True
def update_reply(replyable, response_info):
"""Method to edit and update existing response comment by the bot with a new hero as requested.
:param replyable: The comment/submission on reddit
:param response_info: ResponseInfo containing hero_id and link for response
:return: None
"""
bot_comment = replyable.parent()
root_replyable = bot_comment.parent()
# TODO maybe get original text from bot's command, rather than the original post, as it might be edited by the time this command is called
original_text = root_replyable.body if isinstance(root_replyable, Comment) else root_replyable.title
original_text = original_text.strip()
if '>' in original_text:
original_text = get_quoted_text(original_text).strip()
# Getting name with Proper formatting
hero_name = db_api.get_hero_name(response_info.hero_id)
reply = "[{}]({}) (sound warning: {}){}".format(original_text, response_info.link, hero_name, config.COMMENT_ENDING)
bot_comment.edit(reply)
logger.info("Updated Reply: " + replyable.fullname)
def is_hero_response(text):
"""Method to create response for given replyable.
In case of multiple matches, it used to sort responses in descending order of heroes and get the first one,
but now it's random.
:param text: The processed body/title text
:return: ResponseInfo containing hero_id and link for response if this is a valid update request, otherwise None
"""
link, hero_id = db_api.get_link_for_response(processed_text=text)
if link and hero_id:
return ResponseInfo(hero_id=hero_id, link=link)
return None
def add_regular_reply(replyable, response_info):
"""Method to create response for given replyable.
In case of multiple matches, it used to sort responses in descending order of heroes and get the first one,
but now it's random.
:param replyable: The comment/submission on reddit
:param response_info: ResponseInfo containing hero_id and link for response
:return: None
"""
create_and_add_reply(replyable=replyable, response_url=response_info.link, hero_id=response_info.hero_id)
def create_and_add_reply(replyable, response_url, hero_id):
"""Method that creates a reply in reddit format and adds the reply to comment/submission.
The reply consists of a link to the response audio file, the response itself, a warning about the sound
and an ending added from the config file (post footer).
Image is currently ignored due to new reddit redesign not rendering flairs properly.
:param replyable: The comment/submission on reddit
:param response_url: The url to the response audio file
:param hero_id: The hero_id to which the response belongs to.
:return: The text for the comment reply.
"""
original_text = replyable.body if isinstance(replyable, Comment) else replyable.title
original_text = original_text.strip()
if '>' in original_text:
original_text = get_quoted_text(original_text).strip()
if '::' in original_text:
original_text = original_text.split('::', 1)[1].strip()
hero_name = db_api.get_hero_name(hero_id)
reply = "[{}]({}) (sound warning: {}){}".format(original_text, response_url, hero_name, config.COMMENT_ENDING)
replyable.reply(reply)
logger.info("Replied to: " + replyable.fullname)
|
# coding=latin1
print("¿")
print("¿")
print("abc")
|
# pylint: disable=C0111
from setuptools import setup
with open("README.md", "r") as fh:
README = fh.read()
setup(
name='oneforge',
version='0.1.0',
description='1Forge REST API wrapper',
long_description=README,
long_description_content_type='text/markdown',
author='Renato Orgito',
author_email='orgito@gmail.com',
maintainer='Renato Orgito',
maintainer_email='orgito@gmail.com',
url='https://github.com/orgito/1forge-client',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
],
keywords='1forge forex',
packages=['oneforge'],
setup_requires=['setuptools>=38.6.0'],
install_requires=['requests'],
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/orgito/1forge-client/issues',
'Source': 'https://github.com/orgito/1forge-client',
},
)
|
from django.db import models
# Create your models here.
class station(models.Model):
sno = models.CharField(max_length=4)
sna = models.CharField(max_length=128)
sarea = models.CharField(max_length=128)
lat = models.FloatField()
lng = models.FloatField()
ar = models.CharField(max_length=128)
sareaen = models.CharField(max_length=256)
snaen = models.CharField(max_length=256)
aren = models.CharField(max_length=256)
def __str__(self):
return self.sno
class info(models.Model):
sno = models.CharField(max_length=4)
tot = models.PositiveSmallIntegerField()
sbi = models.PositiveSmallIntegerField()
mday = models.DateTimeField()
bemp = models.PositiveSmallIntegerField()
act = models.BooleanField()
def __str__(self):
return self.sno
def outdated(self):
return datetime.datetime.now()-self.mday >= datetime.timedelta(days=1)
|
import os
import glob
import unittest
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def my_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='*_test.py')
return test_suite
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='annonex2embl',
version='1.0.3',
author='Michael Gruenstaeudl, PhD',
author_email='m.gruenstaeudl@fu-berlin.de',
description='Converts an annotated DNA multi-sequence alignment (in NEXUS format) to an EMBL flatfile for submission to ENA via the Webin-CLI submission tool',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/michaelgruenstaeudl/annonex2embl',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
keywords='novel DNA sequences, public sequence databases, European Nucleotide Archive, file conversion, flatfile',
license='BSD',
entry_points={
'console_scripts': [
'annonex2embl = annonex2embl.CLIOps:start_annonex2embl'
],
},
packages=['annonex2embl'], # So that the subfolder 'annonex2embl' is read immediately.
#packages = find_packages(),
install_requires=['biopython <= 1.77', 'argparse', 'requests', 'unidecode'],
scripts=glob.glob('scripts/*'),
test_suite='setup.my_test_suite',
include_package_data=True,
zip_safe=False
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask,render_template,request,redirect,url_for
from werkzeug.utils import secure_filename
import os,time
import base_func
app = Flask(__name__)
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
base_func.base('service tomcat7 stop')
base_func.base('rm -rf /var/lib/tomcat7/webapps/mianyang.war')
base_func.base('rm -rf /var/lib/tomcat7/webapps/mianyang')
f = request.files['file']
#basepath = os.path.dirname(__file__) # 当前文件所在路径
upload_path = os.path.join('/var/lib/tomcat7/webapps',secure_filename(f.filename))
f.save(upload_path)
base_func.base('service tomcat7 restart')
time.sleep(5)
base_func.backup('/var/lib/tomcat7/webapps/mianyang','/home/anyuan/Backtomcat')
base_func.backup('/var/lib/tomcat7/webapps/mianyang.war','/home/anyuan/Backtomcat')
return redirect(url_for('upload'))
return render_template('upload.html')
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
# Behold my beautifully inelegant solution to the Project euler 39
def pf(number):
if number == 1:
return [1]
factors = []
subnumber = number
divisor = 2
temp = subnumber/divisor
while temp % 1 == 0:
factors.append(divisor)
subnumber = temp
temp = subnumber / divisor
divisor = 3
temp = subnumber / divisor
while temp % 1 == 0:
factors.append(divisor)
subnumber = temp
temp = subnumber / divisor
divisor = 5
m = 2
while subnumber != 1:
temp = subnumber / divisor
if temp % 1 == 0:
factors.append(divisor)
subnumber = temp
else:
divisor += m
m = 6 - m
if divisor > number/2:
return [number]
return factors
def group_factors(prime_factors):
grouped = []
count = 1
value = prime_factors[0]
for i in range(1, len(prime_factors)):
if prime_factors[i] == value:
count += 1
else:
grouped.append([value, count])
value = prime_factors[i]
count = 1
grouped.append([value, count])
return grouped
def product(list):
total = 1
for i in list:
total *= i
return total
def generate_factors(prime_factors):
if len(prime_factors) == 1:
primes = [prime_factors[0][0]]
count = [prime_factors[0][1]]
else:
primes, count = zip(*prime_factors)
ccount = [0 for x in range(len(count))]
factors = []
while ccount[-1] <= count[-1]:
factors.append(product([primes[x] ** ccount[x] for x in range(len(ccount))]))
ccount[0] += 1
for i in range(len(ccount)-1):
if ccount[i] > count[i]:
ccount[i] = 0
ccount[i+1] += 1
return factors
max = 0
answer = 0
for i in range(2, 1002, 2):
grouped = group_factors(pf(i/2))
if len(grouped) > 1:
factors = generate_factors(grouped)
total = 0
sides = set()
for j in range(len(factors)):
for k in range(j+1, len(factors)):
a = factors[j]
b = factors[k]
k = factors[-1] / (a*b)
if k >= 1 and k == int(k):
k = int(k)
if a < b:
smallest = a
biggest = b
else:
smallest = b
biggest = a
if (biggest / smallest) < 2:
m = smallest
n = biggest-smallest
yeet = int(k*(m**2 - n**2))
boi = 2*k*m*n
if yeet < boi:
smallest = yeet
biggest = boi
else:
smallest = boi
biggest = yeet
sides.add((smallest, biggest))
total = len(sides)
if total > max:
max = total
answer = i
print("The answer is {} with {} solutions".format(answer, max))
|
"""
Hacked together router with Werkzeug because GCF does not support full Flask apps
"""
# hack together router with werkzeug because google cloud functions does not
# support full flask apps
from werkzeug.routing import Map, Rule, NotFound
from werkzeug.exceptions import MethodNotAllowed, HTTPException
import requests
url_map = Map([])
def route(path, methods=("GET",)):
def decorator(fn):
def endpoint(req):
if methods:
if req.method not in methods:
raise MethodNotAllowed(list(methods))
return fn(req)
url_map.add(Rule(path, endpoint=endpoint))
return decorator
def enter_router(request):
urls = url_map.bind("localhost", path_info=request.path)
try:
endpoint, args = urls.match()
return endpoint(request)
except HTTPException as e:
raise e
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from dataclasses import FrozenInstanceError, replace
from unittest.mock import patch
import numpy as np
import pytest
import pulser
from pulser.channels import Microwave, Raman, Rydberg
from pulser.channels.dmm import DMM
from pulser.devices import Chadoq2, Device, VirtualDevice
from pulser.register import Register, Register3D
from pulser.register.register_layout import RegisterLayout
from pulser.register.special_layouts import TriangularLatticeLayout
@pytest.fixture
def test_params():
return dict(
name="Test",
dimensions=2,
rydberg_level=70,
channel_ids=None,
channel_objects=(),
min_atom_distance=1,
max_atom_num=None,
max_radial_distance=None,
)
@pytest.mark.parametrize(
"param, value, msg",
[
("name", 1, None),
("supports_slm_mask", 0, None),
("reusable_channels", "true", None),
("max_atom_num", 1e9, None),
("max_radial_distance", 100.4, None),
("rydberg_level", 70.0, "Rydberg level has to be an int."),
(
"channel_ids",
{"fake_channel"},
"When defined, 'channel_ids' must be a tuple or a list "
"of strings.",
),
(
"channel_ids",
("ch1", 2),
"When defined, 'channel_ids' must be a tuple or a list "
"of strings.",
),
(
"channel_objects",
("Rydberg.Global(None, None)",),
"All channels must be of type 'Channel', not 'str'",
),
(
"channel_objects",
(Microwave.Global(None, None),),
"When the device has a 'Microwave' channel, "
"'interaction_coeff_xy' must be a 'float',"
" not '<class 'NoneType'>'.",
),
(
"dmm_objects",
("DMM(bottom_detuning=-1)",),
"All DMM channels must be of type 'DMM', not 'str'",
),
("max_sequence_duration", 1.02, None),
("max_runs", 1e8, None),
],
)
def test_post_init_type_checks(test_params, param, value, msg):
test_params[param] = value
error_msg = msg or f"{param} must be of type"
with pytest.raises(TypeError, match=error_msg):
VirtualDevice(**test_params)
@pytest.mark.parametrize(
"param, value, msg",
[
(
"dimensions",
1,
re.escape("'dimensions' must be one of (2, 3), not 1."),
),
("rydberg_level", 49, "Rydberg level should be between 50 and 100."),
("rydberg_level", 101, "Rydberg level should be between 50 and 100."),
(
"min_atom_distance",
-0.001,
"'min_atom_distance' must be greater than or equal to zero",
),
("max_atom_num", 0, None),
("max_radial_distance", 0, None),
(
"max_layout_filling",
0.0,
"maximum layout filling fraction must be greater than 0. and"
" less than or equal to 1.",
),
(
"channel_ids",
("rydberg_global", "rydberg_global"),
"When defined, 'channel_ids' can't have repeated elements.",
),
(
"channel_ids",
("rydberg_global",),
"When defined, the number of channel IDs must"
" match the number of channel objects.",
),
("max_sequence_duration", 0, None),
("max_runs", 0, None),
],
)
def test_post_init_value_errors(test_params, param, value, msg):
test_params[param] = value
error_msg = msg or f"When defined, '{param}' must be greater than zero"
with pytest.raises(ValueError, match=error_msg):
VirtualDevice(**test_params)
# TODO: Add test of comptability SLM-DMM once DMM is added for serialization
# def test_post_init_slm_dmm_compatibility(test_params):
# test_params["supports_slm_mask"] = True
# test_params["dmm_objects"] = ()
# with pytest.raises(ValueError,
# match="One DMM object should be defined to support SLM mask."
# ):
# VirtualDevice(**test_params)
potential_params = ["max_atom_num", "max_radial_distance"]
always_none_allowed = ["max_sequence_duration", "max_runs"]
@pytest.mark.parametrize("none_param", potential_params + always_none_allowed)
def test_optional_parameters(test_params, none_param):
test_params.update({p: 10 for p in potential_params})
test_params[none_param] = None
if none_param not in always_none_allowed:
with pytest.raises(
TypeError,
match=f"'{none_param}' can't be None in a 'Device' instance.",
):
Device(**test_params)
else:
Device(**test_params)
VirtualDevice(**test_params) # Valid as None on a VirtualDevice
def test_default_channel_ids(test_params):
# Needed because of the Microwave global channel
test_params["interaction_coeff_xy"] = 10000.0
test_params["channel_objects"] = (
Rydberg.Local(None, None),
Raman.Local(None, None),
Rydberg.Local(None, None),
Raman.Global(None, None),
Microwave.Global(None, None),
)
dev = VirtualDevice(**test_params)
assert dev.channel_ids == (
"rydberg_local",
"raman_local",
"rydberg_local_2",
"raman_global",
"mw_global",
)
def test_tuple_conversion(test_params):
test_params["channel_objects"] = [Rydberg.Global(None, None)]
test_params["channel_ids"] = ["custom_channel"]
dev = VirtualDevice(**test_params)
assert dev.channel_objects == (Rydberg.Global(None, None),)
assert dev.channel_ids == ("custom_channel",)
def test_valid_devices():
for dev in pulser.devices._valid_devices:
assert dev.dimensions in (2, 3)
assert dev.rydberg_level > 49
assert dev.rydberg_level < 101
assert dev.max_atom_num > 10
assert dev.max_radial_distance > 10
assert dev.min_atom_distance > 0
assert dev.interaction_coeff > 0
assert 0 < dev.max_layout_filling <= 1
assert isinstance(dev.channels, dict)
with pytest.raises(FrozenInstanceError):
dev.name = "something else"
assert Chadoq2 in pulser.devices._valid_devices
assert Chadoq2.supported_bases == {"digital", "ground-rydberg"}
with patch("sys.stdout"):
Chadoq2.print_specs()
assert Chadoq2.__repr__() == "Chadoq2"
def test_change_rydberg_level():
dev = pulser.devices.MockDevice
dev.change_rydberg_level(60)
assert dev.rydberg_level == 60
assert np.isclose(dev.interaction_coeff, 865723.02)
with pytest.raises(TypeError, match="Rydberg level has to be an int."):
dev.change_rydberg_level(70.5)
with pytest.raises(
ValueError, match="Rydberg level should be between 50 and 100."
):
dev.change_rydberg_level(110)
dev.change_rydberg_level(70)
def test_rydberg_blockade():
dev = pulser.devices.MockDevice
assert np.isclose(dev.rydberg_blockade_radius(3 * np.pi), 9.119201)
assert np.isclose(dev.rabi_from_blockade(9), 10.198984)
rand_omega = np.random.rand() * 2 * np.pi
assert np.isclose(
rand_omega,
dev.rabi_from_blockade(dev.rydberg_blockade_radius(rand_omega)),
)
def test_validate_register():
with pytest.raises(ValueError, match="The number of atoms"):
Chadoq2.validate_register(Register.square(50))
coords = [(100, 0), (-100, 0)]
with pytest.raises(TypeError):
Chadoq2.validate_register(coords)
with pytest.raises(ValueError, match="at most 50 μm away from the center"):
Chadoq2.validate_register(Register.from_coordinates(coords))
with pytest.raises(ValueError, match="at most 2D vectors"):
coords = [(-10, 4, 0), (0, 0, 0)]
Chadoq2.validate_register(Register3D(dict(enumerate(coords))))
with pytest.raises(ValueError, match="The minimal distance between atoms"):
Chadoq2.validate_register(
Register.triangular_lattice(3, 4, spacing=3.9)
)
with pytest.raises(
ValueError, match="associated with an incompatible register layout"
):
tri_layout = TriangularLatticeLayout(200, 20)
Chadoq2.validate_register(tri_layout.hexagonal_register(10))
Chadoq2.validate_register(Register.rectangle(5, 10, spacing=5))
def test_validate_layout():
coords = [(100, 0), (-100, 0)]
with pytest.raises(TypeError):
Chadoq2.validate_layout(Register.from_coordinates(coords))
with pytest.raises(ValueError, match="at most 50 μm away from the center"):
Chadoq2.validate_layout(RegisterLayout(coords))
with pytest.raises(ValueError, match="at most 2 dimensions"):
coords = [(-10, 4, 0), (0, 0, 0)]
Chadoq2.validate_layout(RegisterLayout(coords))
with pytest.raises(ValueError, match="The minimal distance between traps"):
Chadoq2.validate_layout(
TriangularLatticeLayout(12, Chadoq2.min_atom_distance - 1e-6)
)
valid_layout = RegisterLayout(
Register.square(int(np.sqrt(Chadoq2.max_atom_num * 2)))._coords
)
Chadoq2.validate_layout(valid_layout)
valid_tri_layout = TriangularLatticeLayout(
Chadoq2.max_atom_num * 2, Chadoq2.min_atom_distance
)
Chadoq2.validate_layout(valid_tri_layout)
@pytest.mark.parametrize(
"register",
[
TriangularLatticeLayout(100, 5).hexagonal_register(80),
TriangularLatticeLayout(100, 5).make_mappable_register(51),
],
)
def test_layout_filling(register):
assert Chadoq2.max_layout_filling == 0.5
assert register.layout.number_of_traps == 100
with pytest.raises(
ValueError,
match=re.escape(
"the given register has too many qubits "
f"({len(register.qubit_ids)}). "
"On this device, this layout can hold at most 50 qubits."
),
):
Chadoq2.validate_layout_filling(register)
def test_layout_filling_fail():
with pytest.raises(
TypeError,
match="'validate_layout_filling' can only be called for"
" registers with a register layout.",
):
Chadoq2.validate_layout_filling(Register.square(5))
def test_calibrated_layouts():
with pytest.raises(ValueError, match="The minimal distance between traps"):
Device(
name="TestDevice",
dimensions=2,
rydberg_level=70,
max_atom_num=100,
max_radial_distance=50,
min_atom_distance=4,
channel_objects=(),
pre_calibrated_layouts=(TriangularLatticeLayout(201, 3),),
)
TestDevice = Device(
name="TestDevice",
dimensions=2,
rydberg_level=70,
max_atom_num=100,
max_radial_distance=50,
min_atom_distance=4,
channel_objects=(),
pre_calibrated_layouts=(
TriangularLatticeLayout(100, 6.8),
TriangularLatticeLayout(200, 5),
),
)
assert TestDevice.calibrated_register_layouts.keys() == {
"TriangularLatticeLayout(100, 6.8µm)",
"TriangularLatticeLayout(200, 5.0µm)",
}
def test_device_with_virtual_channel():
with pytest.raises(
ValueError,
match="A 'Device' instance cannot contain virtual channels.",
):
Device(
name="TestDevice",
dimensions=2,
rydberg_level=70,
max_atom_num=100,
max_radial_distance=50,
min_atom_distance=4,
channel_objects=(Rydberg.Global(None, 10),),
)
def test_convert_to_virtual():
params = dict(
name="Test",
dimensions=2,
rydberg_level=80,
min_atom_distance=1,
max_atom_num=20,
max_radial_distance=40,
channel_objects=(Rydberg.Global(0, 10),),
)
assert Device(
pre_calibrated_layouts=(TriangularLatticeLayout(40, 2),), **params
).to_virtual() == VirtualDevice(
supports_slm_mask=False, reusable_channels=False, **params
)
def test_device_params():
all_params = Chadoq2._params()
init_params = Chadoq2._params(init_only=True)
assert set(all_params) - set(init_params) == {"reusable_channels"}
virtual_chadoq2 = Chadoq2.to_virtual()
all_virtual_params = virtual_chadoq2._params()
init_virtual_params = virtual_chadoq2._params(init_only=True)
assert all_virtual_params == init_virtual_params
assert set(all_params) - set(all_virtual_params) == {
"pre_calibrated_layouts"
}
def test_dmm_channels():
dmm = DMM(
bottom_detuning=-1,
clock_period=1,
min_duration=1,
max_duration=1e6,
mod_bandwidth=20,
)
device = replace(Chadoq2, dmm_objects=(dmm,))
assert len(device.dmm_channels) == 1
assert device.dmm_channels["dmm_0"] == dmm
with pytest.raises(
ValueError,
match=(
"When defined, the names of channel IDs must be different"
" than the names of DMM channels 'dmm_0', 'dmm_1', ... ."
),
):
device = replace(
Chadoq2,
dmm_objects=(dmm,),
channel_objects=(Rydberg.Global(None, None),),
channel_ids=("dmm_0",),
)
assert not dmm.is_virtual()
assert DMM().is_virtual()
|
"""relaydomains API v1 unit tests."""
import json
from django.urls import reverse
from modoboa.admin import factories as admin_factories, models as admin_models
from modoboa.lib.tests import ModoAPITestCase
from modoboa.transport import factories as tr_factories, models as tr_models
class DataMixin(object):
"""A mixin to provide test data."""
@classmethod
def setUpTestData(cls): # NOQA:N802
super(DataMixin, cls).setUpTestData()
transport = tr_factories.TransportFactory(
pattern="test.com", _settings={
"relay_target_host": "external.host.tld",
"relay_target_port": 25,
"relay_verify_recipients": False
}
)
cls.domain1 = admin_factories.DomainFactory(
name="test.com", type="relaydomain", transport=transport)
transport = tr_factories.TransportFactory(
pattern="domain2.test", _settings={
"relay_target_host": "external.host.tld",
"relay_target_port": 25,
"relay_verify_recipients": True
}
)
cls.domain2 = admin_factories.DomainFactory(
name="test2.com", type="relaydomain", transport=transport)
class RelayDomainAPITestCase(DataMixin, ModoAPITestCase):
"""API test cases."""
def test_list(self):
"""Test list service."""
url = reverse("api:relaydomain-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_create(self):
"""Test create service."""
url = reverse("api:relaydomain-list")
settings = {
"relay_target_host": "1.2.3.4"
}
data = {
"name": "test3.com",
"transport": {
"service": "relay",
"_settings": json.dumps(settings)
}
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data["transport"]["_settings"],
["relay_target_port: This field is required"]
)
settings.update({"relay_target_port": 25})
data["transport"]["_settings"] = json.dumps(settings)
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
domain = admin_models.Domain.objects.get(name="test3.com")
self.assertEqual(
domain.transport.next_hop, "[{}]:{}".format(
settings["relay_target_host"], settings["relay_target_port"])
)
def test_update(self):
"""Test update service."""
url = reverse("api:relaydomain-detail", args=[self.domain1.pk])
settings = self.domain1.transport._settings.copy()
settings.update({"relay_target_port": 1000})
data = {
"name": "test3.com",
"transport": {
"service": "relay",
"_settings": json.dumps(settings)
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
self.domain1.refresh_from_db()
self.domain1.transport.refresh_from_db()
self.assertEqual(self.domain1.name, data["name"])
self.assertEqual(
self.domain1.transport.next_hop, "[{}]:{}".format(
settings["relay_target_host"], settings["relay_target_port"])
)
def test_delete(self):
"""Test delete service."""
url = reverse("api:relaydomain-detail", args=[self.domain1.pk])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
with self.assertRaises(admin_models.Domain.DoesNotExist):
self.domain1.refresh_from_db()
self.assertFalse(
tr_models.Transport.objects.filter(pattern="test.com").exists())
|
from django.urls import re_path
from elections.views.sync import get_election_fixture
from .views import (
CONDITION_DICT,
FORMS,
AllElectionsView,
ElectionTypesView,
IDCreatorWizard,
ReferenceDefinitionView,
SingleElection,
)
id_creator_wizard = IDCreatorWizard.as_view(
FORMS,
url_name="id_creator_step",
done_step_name="home",
condition_dict=CONDITION_DICT,
)
urlpatterns = [
re_path(
r"^election_types/$",
ElectionTypesView.as_view(),
name="election_types_view",
),
re_path(
r"^reference_definition/$",
ReferenceDefinitionView.as_view(),
name="reference_definition_view",
),
re_path(r"^elections/$", AllElectionsView.as_view(), name="elections_view"),
re_path(
r"^elections/(?P<election_id>.+)/$",
SingleElection.as_view(),
name="single_election_view",
),
re_path(
r"^id_creator/(?P<step>.+)/$", id_creator_wizard, name="id_creator_step"
),
re_path(r"^id_creator/$", id_creator_wizard, name="id_creator"),
re_path(r"^sync/$", get_election_fixture),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 13:03:35 2020
@author: Ayax
"""
import pandas as pd
import numpy as np
datos = pd.read_csv('crx.data', sep=',',header=None)
datos = datos.replace(np.nan, '0')
datos = datos.replace('?', '10')
print(datos)
from sklearn.preprocessing import LabelEncoder
encoder=LabelEncoder()
datos['columna1']=encoder.fit_transform(datos[1].values)
arreglo = np.array(datos[4])
datos[4] = np.where(arreglo == '10', 'g', arreglo)
datos['columna4']=encoder.fit_transform(datos[4].values)
datos['columna12']=encoder.fit_transform(datos[12].values)
#print(datos.columna1.unique())
#print(datos.columna4.unique())
#print(datos.columna12.unique())
#Datos de entrada
X=datos[['columna1','columna4','columna12']]
#X=datos[['columna1','columna4']]
#X=datos['columna1']
#print(X)
#Datos de salida
y=datos['columna12']
#y=datos['columna4']
#print(y)
#Cuantos datos se necesitan para entrenamiento y cuantos para prueba 80-20
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#Red neuronal
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=500, alpha=1e-5, solver='lbfgs', random_state=300,tol=1e-2)
#Entrenamiento
mlp.fit(X_train,y_train)
predictions=mlp.predict(X_test)
from sklearn.metrics import confusion_matrix
matriz = confusion_matrix(y_test, predictions)
print(matriz)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mock import patch
from wadebug.wa_actions.curl_utils import (
CURLExitCode,
CURLTestResult,
https_get_request_from_container,
)
class MockContainer:
def exec_run(self):
pass
mock_container = MockContainer()
class TestCurlUtils(unittest.TestCase):
@patch.object(mock_container, "exec_run", return_value=(CURLExitCode.OK, b"404:1"))
def test_request_should_return_non_http_ok_if_status_code_not_200(self, *_):
result, response_time = https_get_request_from_container(
mock_container, "url", "timeout"
)
assert result == CURLTestResult.HTTP_STATUS_NOT_OK
@patch.object(mock_container, "exec_run", return_value=(CURLExitCode.OK, b"200:1"))
def test_request_should_return_ok_if_http_200_exit_code_0(self, *_):
result, response_time = https_get_request_from_container(
mock_container, "url", "timeout"
)
assert result == CURLTestResult.OK
@patch.object(
mock_container, "exec_run", return_value=(CURLExitCode.TIMEOUT, b"200:1")
)
def test_request_should_return_timeout_if_exit_code_28(self, *_):
result, response_time = https_get_request_from_container(
mock_container, "url", "timeout"
)
assert result == CURLTestResult.CONNECTION_TIMEOUT
@patch.object(
mock_container,
"exec_run",
return_value=(CURLExitCode.SSL_CERT_UNKNOWN, b"200:1"),
)
def test_request_should_return_cert_unknown_if_exit_code_60(self, *_):
result, response_time = https_get_request_from_container(
mock_container, "url", "timeout"
)
assert result == CURLTestResult.SSL_CERT_UNKNOWN
|
#!/usr/bin/python3
#import main mongo connect class
from joiningSegments.mongoConnect import mongoConn
config = {
"host" : "127.0.0.1",
"port" : 27017,
"db" : "testing"
}
print (config["db"])
if __name__ == "__main__":
"""make new monogo conn"""
dbConn = mongoConn(config["host"], config["port"])
dbConn.getDbHandle(config["db"])
dbConn.selectDistinct("users")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt, getdate, nowdate, now_datetime
from frappe import msgprint, _
from frappe.utils import flt, getdate, nowdate
from datetime import date
import json
class SkillMapping(Document):
def update_skill_mapping_details(self, args):
self.set('skill_mapping_details', [])
for data in args.get('data'):
if data.get('industry')!=None:
nl = self.append('skill_mapping_details',{})
nl.skill = data.get('master_industry')
nl.sub_skill = data.get('industry')
nl.beginner = data.get('beginner')
nl.imtermediatory = data.get('imtermediatory')
nl.expert = data.get('expert')
nl.none_field = data.get('none_field')
self.save()
frappe.msgprint("Skill Mapping Details Saved")
def validate(self):
pass
@frappe.whitelist()
def get_sample_data():
return {
"get_sample_data": frappe.db.sql("""select skill_matrix_18,sub_skill,1 from `tabSkill Matrix 120` order by skill_matrix_18 asc, sub_skill asc""", as_list=1)
}
@frappe.whitelist()
def get_sample_data_from_table(doc_name):
return {
"get_sample_data": frappe.db.sql("""select skill,sub_skill,none_field,beginner,imtermediatory,expert from `tabSkill Mapping Details` where parent='%s' order by skill asc, sub_skill asc"""%doc_name, as_list=1)
}
|
import os
import tarfile
from six.moves import urllib
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import hashlib
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, cross_val_predict
from sklearn.preprocessing import Imputer, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.linear_model import LinearRegression, SGDClassifier
from sklearn.metrics import mean_squared_error, confusion_matrix, precision_score, recall_score, f1_score, precision_recall_curve, roc_curve, roc_auc_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
Download_root = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
Housing_path = os.path.join("datasets","housing")
Housing_url = Download_root + "datasets/housing/housing.tgz"
def fetch_housing__data(housing_url = Housing_url, housing_path = Housing_path):
if not os.path.isdir(housing_path): # if the file doesnt exist
os.makedirs(housing_path)
print('creating the file datasets/housing')
tgz_path = os.path.join(housing_path, "housing.tar")
urllib.request.urlretrieve(housing_url,tgz_path) # download the zip file
housing_tgz = tarfile.open(tgz_path) # go in the right directory
housing_tgz.extractall(path=housing_path) # and extract the file in the new file
housing_tgz.close()
def load_housing_data(housing_path = Housing_path):
csv_path = os.path.join(housing_path,"housing.csv")
return pd.read_csv(csv_path)
######################## DATA ANALYSIS #####################################
housing = load_housing_data()
## QUICK LOOK
housing.head()
housing.info()
housing.ocean_proximity.value_counts()
housing.describe()
housing.hist(bins=50)
plt.show()
## CREATE A TEST SET
#option 1 simple: mais a chaque fois different, a force l'algo ne sera entrainer sur tout le dataset
def split_train_test(data, ratio):
suffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data)*ratio)
test_indices = suffled_indices[:test_set_size]
train_indices = suffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(housing, 0.2)
#option 2: complexe ---> hash ??
def test_set_check(identifier, ratio, hash):
return hash(np.int64(identifier)).digest()[-1]< 256 * ratio # transform id in array
def split_train_test_by_id(data, ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, ratio, hash))
print(in_test_set)
return data.loc[~in_test_set], data[in_test_set]
housing = housing.reset_index() # to add the index as the id column
train_set, test_set = split_train_test_by_id(housing, 0.2, "index")
print(str(len(train_set)) + " in the train set and " + str(len(test_set)) + " in the test set")
# option 3 avec sklearn
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# create a income category to check
housing['income_cat'] = np.ceil(housing.median_income/1.5)
housing.income_cat.unique()
housing.income_cat.where(housing.income_cat < 5, 5, inplace=True) # bizareeeee
housing.income_cat.unique()
housing.income_cat.hist()
plt.show()
# option 2 avec sklearn
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing.income_cat):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
#remove the category to back to normal
for set in (strat_test_set, strat_test_set):
print(set)
set.drop("income_cat", axis=1, inplace=True)
## DATA VIZUALISATION
housing = test_set.copy()
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=housing.population/100, label='population',
c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True)
## CORRELATION
corr_matrix = housing.corr()
corr_matrix.median_house_value.sort_values(ascending=False)
attributes = ['median_house_value', 'mediam_income', 'total_rooms', 'housing_mediam_age']
scatter_matrix(housing[attributes], figsize =(2,8))
housing.plot(kind='scatter', x = 'median_income', y='median house value')
# combinaison d'attributes
housing['rooms_per_household'] = housing.total_rooms/housing.households
corr_matrix = housing.corr()
corr_matrix.median_house_value.sort_values(ascending=False)
## PREP FOR ML
housing = strat_train_set.drop('median_house_value', axis=1)
housing_label = strat_train_set['median_house_value'].copy()
#Data cleaning
# 3 way todeal with missing values
housing.dropna(subset=["total_bedrooms"]) # get rid of lines with missing values
housing.drop("total_bedrooms", axis=1) # get rif of the attribute
mean = housing.total_bedrooms.mean()
housing.total_bedrooms.fillna(mean, inplace=True) # replace with median
# same with sk learn
housing_num = housing.drop('ocean_proximity', axis=1)
imputer =Imputer(strategy="median")
imputer.fit(housing_num)# Imputer can only be run on numerical dataframe
imputer.statistics_
housing_num.median().values
X = imputer.transform(housing_num) # numpy
housing_tr= pd.DataFrame(X, columns=housing_num.columns)
# Text category
housing_cat = housing.ocean_proximity
encoder = OneHotEncoder()
housing_cat_encoded, housing_categories = housing_cat.factorize() # convert categories into integers
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1)) # -1 is the length of the 1D dimension given
binarycat = pd.DataFrame(housing_cat_1hot.toarray(), columns=housing_categories)
housing_notext = pd.concat([housing.drop('ocean_proximity', axis=1).reset_index(drop=True), binarycat], axis=1)
housing = housing_notext
# Custom Transformers
room_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
"""Herite des 2 classes de sklearn"""
def __init__(self, add_bedrooms_per_room = True): # no *argpr **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, room_ix] / X[:, household_ix]
population_per_household = X[:, population_ix]/ X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room= X[:, bedrooms_ix ] / X[:, room_ix]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# Feature scaling of the training data
# Normalization: (x-Min)/max
#fit_transform(X[, y]) Fit to data, then transform it.
scaler = MinMaxScaler()
housing_norm = scaler.fit_transform(housing)
housing_norm = pd.DataFrame(housing_norm, columns=housing.columns)
print(scaler.data_max_)
print(scaler.data_min_)
housing_norm.max()
# Standardize: (X-mean)/variance
scaler = StandardScaler()
housing_norm = scaler.fit_transform(housing)
housing_norm = pd.DataFrame(housing_norm, columns=housing.columns)
housing_norm.max() # not working with the binary variable
scatter_matrix(housing_num)
plt.style.use('seaborn-white')
housing_norm.hist(bins=np.arange(0,1.2,0.2), ec='w')
# Tranformation Pipelines
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
##################### SUM UP ###############
housing = load_housing_data()
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
housing = train_set.drop('median_house_value', axis=1)
housing_label = train_set['median_house_value'].copy()
housing_num = housing.drop('ocean_proximity', axis=1)
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy='median')),
('attributs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('cat_encoder', OneHotEncoder()),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
# # autre facond e faire un pipeline
# model = make_pipeline(Imputer(strategy='mean'),
# PolynomialFeatures(degree=2),
# LinearRegression())
housing_prepared = full_pipeline.fit_transform(housing)
col = list(housing.columns) + list(housing.ocean_proximity.unique())
housing_prepared_pd = pd.DataFrame(housing_prepared.toarray(), columns=col)
### SELECT AND TRAIN A MODEL
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_label)
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_label, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse) # underfitting!
# 2eme model: Decision Tree
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_label)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_label, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse) # overfitting !
# Cross Validation
#K-fold cross-validation
scores = cross_val_score(tree_reg, housing_prepared, housing_label, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores) # utility function and not cost
# Save your models
joblib.dump(tree_reg, "treemodel.pkl")
model = joblib.load("treemodel.pkl")
# See where the model is wrong
mat = confusion_matrix(housing)
## FINE TUNE YOUR MODELS
# Grid search
## Once you selected the ifnal model, Evaluate model en test set
X_test = test_set.drop("median_house_value", axis=1)
y_test = test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test) # AND NOT FIT !!
final_predictions = lin_reg.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
|
from urllib.request import urlopen
url = "http://olympus.realpython.org/profiles/aphrodite"
page = urlopen(url)
html_bytes = page.read()
html = html_bytes.decode("utf-8")
tag = {"title":"", "head":"", "html":"", "body":""}
for i in tag:
tag_startindex = html.find("<" + i + ">") + len("<" + i +">")
tag_endindex = html.find("</" + i + ">")
tag[i] = html[tag_startindex:tag_endindex]
for i, j in tag.items():
print(i + " is " + j)
|
#!/usr/bin/env python
#
# Copyright (c) 2011 Polytechnic Institute of New York University
# Author: Adrian Sai-wah Tam <adrian.sw.tam@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of New York University.
#
# This program takes two input files: (1) a Rocketfuel format topology file and
# (2) a flow file in the format of
# <node> <node> <load> <begin> <end>
# where the <node> is the code correspond to the topology file and the <load>
# is a floating point number, <begin> and <end> are the time for this flow to
# start and finish.
#
# The program place these flows into the network using flow-based ECMP, i.e.
# each flow will only take one path, but the path is selected at random amongst
# all equal-cost shortest paths. This program simulate the flows'
# arrival/departure as discrete events and output the change of link loads
# against time.
#
import getopt,sys,random,heapq
###########################################################
# Global parameters
topofile = 'topology.txt' # default topology file
flowfile = 'flow.txt' # default flow specification file
digraph = False # topology specification is a digraph
#random.seed(1) # Debug use: Uncomment this line for repeatible random numbers
optlist, userlist = getopt.getopt(sys.argv[1:], 't:f:dsh')
for opt, optarg in optlist:
if opt == '-t':
topofile = optarg
elif opt == '-f':
flowfile = optarg
elif opt == '-d':
digraph = True
else:
# getopt will fault for other options
print "Available options"
print " -t file : The topology file in Rocketfuel format, default is topology.txt"
print " -f file : The flow file, default is flow.txt"
print " -d : Treat the topology file as digraph, i.e. each link is unidirectional"
print " -h : This help message"
sys.exit(1)
###########################################################
# Helper functions
def ReadInput(f1, f3):
"""
Read in a Rocketfuel format topology file, and then the flow specification.
We assumed the link specification contains at least the two endpoints
refered by the name of nodes. Optionally, the 3rd and 4th argument in
the link specification are the length and capacity respectively. This
optional part is not in the Rocketfuel file.
The flow specification is in the following format:
<source> <destination> <load> <begin> <end>
To mean the flow from source to destination begins and ends at certain
time (number of seconds since start) and it is of the size of certain
load. The flow can only be routed in one path, no spliting allowed.
"""
print "Reading input file %s" % f1
topoFile = open(f1, "r") # Topology file
nodes = [] # names of nodes
links = [] # links as an ordered pair of node IDs
length = [] # lengths of links
capacity = [] # link capacities
nodeDic = {} # reverse lookup for node ID
for line in topoFile:
token = line.split()
if (len(token) < 2): continue
if token[0] == "N": # specifying a node by its name
nodeDic[token[1]] = len(nodes) - 1
nodes.append(token[1])
elif token[0] == "l": # specifying a link as a connection between two nodes
e = (nodeDic[token[1]], nodeDic[token[2]])
links.append(e)
length.append(1 if len(token) < 4 else token[3])
capacity.append(1 if len(token) < 5 else token[4])
if not digraph:
links.append((e[1],e[0]))
length.append(length[-1])
capacity.append(capacity[-1])
topoFile.close()
print "Reading input file %s" % f3
flowFile = open(f3, "r") # Flow history file
flows = [] # flow specs (src,dst,size,begin,end)
events = [] # flow arrival/departure events (time, flowID, isArrival)
for line in flowFile:
token = line.split()
if (len(token) != 5): continue # Not a flow specification
begin, end = float(token[3]), float(token[4])
if end == begin: continue # Skip this malformed flow
heapq.heappush(events, (begin, len(flows), True))
heapq.heappush(events, (end, len(flows), False))
spec = (nodeDic[token[0]], nodeDic[token[1]], float(token[2]), begin, end)
flows.append(spec)
flowFile.close()
return nodes, links, length, capacity, flows, events
class memoized(object):
"""
Copied from http://wiki.python.org/moin/PythonDecoratorLibrary
Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned,
and not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
@memoized
def BellmanFord(t):
"""
Use Bellman-Ford to deduce the shortest path tree of any node to t
"""
d = [float('inf') for i in nodes] # Shortest distance to t
n = [-1 for i in nodes] # Next hop toward t
d[t] = 0
for i in range(len(nodes)-1):
nochange = True
for j,(u,v) in enumerate(links):
if d[u] > d[v] + length[j]:
nochange = False
d[u] = d[v] + length[j]
n[u] = v
if nochange: break
return n,d
###########################################################
# Step 1:
# Read in data
nodes, links, length, capacity, flows, events = ReadInput(topofile, flowfile)
###########################################################
# Step 2:
# Exhaust the event list to establish/remove a flow on the network, and in
# the meantime, print the link load if there is any change
clock = 0.0
linkload = [0 for l in links]
flowpaths = {} # Dictionary for flow:->set_of_links mapping
for e,l in enumerate(linkload):
# print initial link load
print "%f\t%d\t%f" % (clock, e, l)
while events:
time, fid, arrival = heapq.heappop(events)
if arrival:
# Find a path for this flow on the tree generated by Bellman-Ford
tree, dist = BellmanFord(flows[fid][1])
currentnode = flows[fid][0]
path = []
clock = time
while currentnode != flows[fid][1]:
# Find a random next hop on the shortest paths
neighbour = list(set(e[1] for e in links if e[0]==currentnode))
mindist = min(dist[i] for i in neighbour)
minneighbour = [i for i in neighbour if dist[i] == mindist]
nextnode = random.choice(minneighbour)
# Then look up the link, and distribute traffic to it
linkid = [i for i,e in enumerate(links) if e == (currentnode, nextnode)]
path.append(linkid[0])
linkload[linkid[0]] += flows[fid][2]
# Print the upated link load
print "%f\t%d\t%f" % (clock, linkid[0], linkload[linkid[0]])
currentnode = nextnode
# Remember the path
flowpaths[fid] = path
else:
# Retrieve the path for this flow
path = flowpaths.pop(fid)
clock = time
# For each link in the path, decrease the load
for l in path:
linkload[l] -= flows[fid][2]
print "%f\t%d\t%f" % (clock, l, linkload[l])
sys.exit(1)
|
from turtle import*
setup(800,800)
pendown()
seth(0)
fd(150)
seth(90)
fd(150)
seth(180)
fd(150)
seth(270)
fd(150)
done()
|
class Check:
def __init__(self, item_name, location, email):
self.item_name = item_name
self.location = location
self.email = email
def dictify(self):
return {
'item_name' : self.item_name,
'location' : self.location,
'email' : self.email
}
def adapt(self, li):
self.item_name = li[0]
self.location = li[1]
self.email = li[2]
|
import re
from datetime import timedelta
from django import forms
from django.db.models import Q, F
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.postgres.search import SearchQuery, SearchRank
from core.forms.widgets import (AutocompleteSelectMultiple,
MultipleChoiceFilterWidget)
from backers.models import Backer
from geofr.models import Perimeter
from geofr.forms.fields import PerimeterChoiceField
from tags.models import Tag
from tags.fields import TagChoiceField
from aids.models import Aid
AID_TYPES = (
(_('Financial aids'), (
('grant', _('Grant')),
('loan', _('Loan')),
('recoverable_advance', _('Recoverable advance')),
('interest_subsidy', _('Interest subsidy')),
)),
(_('Technical and methodological aids'), (
('guidance', _('Guidance')),
('networking', _('Networking')),
('valorisation', _('Valorisation')),
)),
)
class BaseAidForm(forms.ModelForm):
tags = TagChoiceField(
label=_('Tags'),
choices=list,
required=False)
class Meta:
widgets = {
'mobilization_steps': forms.CheckboxSelectMultiple,
'targeted_audiances': forms.CheckboxSelectMultiple,
'aid_types': forms.CheckboxSelectMultiple,
'destinations': forms.CheckboxSelectMultiple,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['aid_types'].choices = AID_TYPES
# We set the existing tags as the `choices` value so the existing
# tags will be displayed in the widget
all_tags = self.instance.tags
if self.is_bound:
if hasattr(self.data, 'getlist'):
all_tags += self.data.getlist('tags')
else:
all_tags += self.data.get('tags', [])
self.fields['tags'].choices = zip(all_tags, all_tags)
custom_labels = {
'name': _('Aid title'),
'targeted_audiances': _('Who can apply to this aid?'),
'backers': _('Aid backers'),
'destinations': _('The aid is destined to…'),
'eligibility': _('Are the any other eligibility criterias?'),
'url': _('Link to a full description'),
'application_url': _('Link to an online application form'),
'contact_detail': _('Name of a contact in charge'),
'contact_email': _('E-mail address of a contact in charge'),
'contact_phone': _('Phone number of a contact in charge'),
}
for field, label in custom_labels.items():
self.fields[field].label = label
custom_help_text = {
'tags': _('Add up to 16 keywords to describe your aid'
' (separated by ",")'),
}
for field, help_text in custom_help_text.items():
self.fields[field].help_text = help_text
def _save_m2m(self):
super()._save_m2m()
self._save_tag_relations()
def _save_tag_relations(self):
"""Updtate the m2m keys to tag objects.
Tag that do not exist must be created.
"""
all_tag_names = self.instance.tags
existing_tag_objects = Tag.objects.filter(name__in=all_tag_names)
existing_tag_names = [tag.name for tag in existing_tag_objects]
missing_tag_names = list(set(all_tag_names) - set(existing_tag_names))
new_tags = [Tag(name=tag) for tag in missing_tag_names]
new_tag_objects = Tag.objects.bulk_create(new_tags)
all_tag_objects = list(existing_tag_objects) + list(new_tag_objects)
self.instance._tags_m2m.set(all_tag_objects, clear=True)
class AidAdminForm(BaseAidForm):
"""Custom Aid edition admin form."""
class Media:
js = ['admin/js/tags_autocomplete.js']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tags'].widget.attrs['class'] = 'admin-autocomplete'
class AidSearchForm(forms.Form):
"""Main form for search engine."""
AID_CATEGORY_CHOICES = (
('', ''),
('funding', _('Funding')),
('non-funding', _('Non-funding')),
)
SCALES = (
(1, _('Commune')),
(5, _('EPCI')),
(10, _('Department')),
(15, _('Region')),
(20, _('France')),
(25, _('Europe')),
)
perimeter = PerimeterChoiceField(
label=_('Perimeter'),
required=False)
text = forms.CharField(
label=_('Text search'),
required=False)
# With use a multiple choice field so the filter rendering remains
# consistent with the other filters
recent_only = forms.MultipleChoiceField(
label=_('Recent aids'),
choices=(
('yes', _('Only display aids created less than 30 days ago')),),
required=False,
widget=MultipleChoiceFilterWidget)
apply_before = forms.DateField(
label=_('Apply before…'),
required=False,
widget=forms.TextInput(
attrs={'type': 'date', 'placeholder': _('yyyy-mm-dd')}))
aid_types = forms.MultipleChoiceField(
label=_('Aid type'),
required=False,
choices=AID_TYPES,
widget=MultipleChoiceFilterWidget)
mobilization_step = forms.MultipleChoiceField(
label=_('When to mobilize the aid?'),
required=False,
choices=Aid.STEPS,
widget=MultipleChoiceFilterWidget)
destinations = forms.MultipleChoiceField(
label=_('Destinations'),
required=False,
choices=Aid.DESTINATIONS,
widget=MultipleChoiceFilterWidget)
scale = forms.MultipleChoiceField(
label=_('Diffusion'),
required=False,
choices=SCALES,
widget=MultipleChoiceFilterWidget)
# This field is not related to the search, but is submitted
# in views embedded through an iframe.
integration = forms.CharField(
required=False,
widget=forms.HiddenInput)
def clean_zipcode(self):
zipcode = self.cleaned_data['zipcode']
if zipcode and re.match('\d{5}', zipcode) is None:
msg = _('This zipcode seems invalid')
raise forms.ValidationError(msg)
return zipcode
def filter_queryset(self, qs):
"""Filter querysets depending of input data."""
if not self.is_bound:
return qs
# Populate cleaned_data
if self.errors:
pass
perimeter = self.cleaned_data.get('perimeter', None)
if perimeter:
qs = self.perimeter_filter(qs, perimeter)
mobilization_steps = self.cleaned_data.get('mobilization_step', None)
if mobilization_steps:
qs = qs.filter(mobilization_steps__overlap=mobilization_steps)
aid_types = self.cleaned_data.get('aid_types', None)
if aid_types:
qs = qs.filter(aid_types__overlap=aid_types)
destinations = self.cleaned_data.get('destinations', None)
if destinations:
qs = qs.filter(destinations__overlap=destinations)
scale = self.cleaned_data.get('scale', None)
if scale:
qs = qs.filter(perimeter__scale__in=scale)
apply_before = self.cleaned_data.get('apply_before', None)
if apply_before:
qs = qs.filter(submission_deadline__lt=apply_before)
text = self.cleaned_data.get('text', None)
if text:
query = SearchQuery(text, config='french')
qs = qs \
.filter(search_vector=query) \
.annotate(rank=SearchRank(F('search_vector'), query))
recent_only = self.cleaned_data.get('recent_only', False)
if recent_only:
a_month_ago = timezone.now() - timedelta(days=30)
qs = qs.filter(date_created__gte=a_month_ago.date())
return qs
def order_queryset(self, qs):
"""Set the order value on the queryset.
We scale results by perimeter scale, unless the user submitted a
search query, then we sort by query relevance.
"""
text = self.cleaned_data.get('text', None)
if text:
qs = qs.order_by(
'-rank', 'perimeter__scale', 'submission_deadline')
else:
qs = qs.order_by('perimeter__scale', 'submission_deadline')
return qs
def perimeter_filter(self, qs, perimeter):
"""Filter queryset depending on the given perimeter.
When we search for a given perimeter, we must return all aids:
- where the perimeter is wider and contains the searched perimeter ;
- where the perimeter is smaller and contained by the search
perimeter ;
E.g if we search for aids in "Hérault (department), we must display all
aids that are applicable to:
- Hérault ;
- Occitanie ;
- France ;
- Europe ;
- M3M (and all other epcis in Hérault) ;
- Montpellier (and all other communes in Hérault) ;
"""
# Since we only handle french aids, searching for european or
# national aids will return all results
if perimeter.scale in (Perimeter.TYPES.country,
Perimeter.TYPES.continent):
return qs
# Exclude all other perimeters from the same scale.
# E.g We search for aids in "Herault", exclude all aids from other
# departments.
q_same_scale = Q(perimeter__scale=perimeter.scale)
q_different_code = ~Q(perimeter__code=perimeter.code)
qs = qs.exclude(q_same_scale & q_different_code)
# Exclude all perimeters that are more granular and that are not
# contained in the search perimeter.
# E.g we search for aids in "Hérault", exclude communes and epcis that
# are not in Hérault.
if perimeter.scale > Perimeter.TYPES.commune:
q_smaller_scale = Q(perimeter__scale__lt=perimeter.scale)
if perimeter.scale == Perimeter.TYPES.region:
q_not_contained = ~Q(
perimeter__regions__contains=[perimeter.code])
if perimeter.scale == Perimeter.TYPES.department:
q_not_contained = ~Q(
perimeter__departments__contains=[perimeter.code])
if perimeter.scale == Perimeter.TYPES.basin:
# Edge case, when we search by drainage basins, don't
# show aids from departments and regions, because that poorly
# overlaps.
qs = qs.exclude(perimeter__scale__in=(
Perimeter.TYPES.department,
Perimeter.TYPES.region))
q_not_contained = ~Q(perimeter__basin=perimeter.code)
if perimeter.scale == Perimeter.TYPES.epci:
q_not_contained = ~Q(perimeter__epci=perimeter.code)
qs = qs.exclude(q_smaller_scale & q_not_contained)
# Exclude all perimeters that are wider and that does not
# contain our search perimeter.
# E.g we search for aids in "Hérault", exclude regions that are not
# Occitanie.
if perimeter.regions:
q_scale_region = Q(perimeter__scale=Perimeter.TYPES.region)
q_different_region = ~Q(perimeter__code__in=perimeter.regions)
qs = qs.exclude(q_scale_region & q_different_region)
if perimeter.departments:
q_scale_department = Q(perimeter__scale=Perimeter.TYPES.department)
q_different_department = ~Q(
perimeter__code__in=perimeter.departments)
qs = qs.exclude(q_scale_department & q_different_department)
if perimeter.basin:
q_scale_basin = Q(perimeter__scale=Perimeter.TYPES.basin)
q_different_basin = ~Q(perimeter__code=perimeter.basin)
qs = qs.exclude(q_scale_basin & q_different_basin)
if perimeter.epci:
q_scale_epci = Q(perimeter__scale=Perimeter.TYPES.epci)
q_different_epci = ~Q(perimeter__code=perimeter.epci)
qs = qs.exclude(q_scale_epci & q_different_epci)
return qs
class AidEditForm(BaseAidForm):
backers = forms.ModelMultipleChoiceField(
label=_('Backers'),
queryset=Backer.objects.all(),
widget=AutocompleteSelectMultiple)
perimeter = PerimeterChoiceField(
label=_('Perimeter'))
class Meta(BaseAidForm.Meta):
model = Aid
fields = [
'name',
'description',
'tags',
'targeted_audiances',
'backers',
'recurrence',
'start_date',
'predeposit_date',
'submission_deadline',
'perimeter',
'aid_types',
'subvention_rate',
'mobilization_steps',
'destinations',
'eligibility',
'application_url',
'url',
'contact_detail',
'contact_email',
'contact_phone',
]
widgets = {
'description': forms.Textarea(attrs={'rows': 3}),
'eligibility': forms.Textarea(attrs={'rows': 3}),
'mobilization_steps': MultipleChoiceFilterWidget,
'targeted_audiances': MultipleChoiceFilterWidget,
'aid_types': MultipleChoiceFilterWidget,
'destinations': MultipleChoiceFilterWidget,
'start_date': forms.TextInput(
attrs={'type': 'date', 'placeholder': _('yyyy-mm-dd')}),
'predeposit_date': forms.TextInput(
attrs={'type': 'date', 'placeholder': _('yyyy-mm-dd')}),
'submission_deadline': forms.TextInput(
attrs={'type': 'date', 'placeholder': _('yyyy-mm-dd')}),
}
|
x = float(input())
print((x % 1), "\n", int(x % 1 * 10))
|
# -*- coding: utf-8 -*-
#十進位換算
a=int(input())
print(format(a,"X"))
|
# from nipype.interfaces import fsl
from nipype.interfaces.fsl import (FLIRT, FAST, ConvertXFM, ImageMaths)
import nibabel as nib
import numpy as np
from scipy.ndimage.morphology import binary_erosion as erode
from nipype.pipeline import Node, Workflow
from nipype.interfaces.utility import IdentityInterface, Function
from nipype.interfaces.afni import Resample
def get_wf_tissue_priors(name='wf_tissue_priors3'):
'''
This Function gives a workflow that Resamples the tissue priors and then thresholds it at 0.5
'''
# csf_tissue_prior_path, gm_tissue_prior_path, wm_tissue_prior_path,
# threshold = 0.5
wf_tissue_priors = Workflow(name=name)
inputspec = Node(IdentityInterface(fields=['csf_tissue_prior_path', 'wm_tissue_prior_path',
'threshold','std2func_mat_path', 'reference_func_file_path']),
name="inputspec")
'''
# 'gm_tissue_prior_path',
resample_tissue_prior_csf = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
outputtype='NIFTI'),
name="resample_tissue_prior_csf")
# resample_tissue_prior_gm = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
# outputtype='NIFTI'),
# name="resample_tissue_prior_gm")
resample_tissue_prior_wm = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
outputtype='NIFTI'),
name="resample_tissue_prior_wm")
wf_tissue_priors.connect(inputspec, 'csf_tissue_prior_path', resample_tissue_prior_csf, 'in_file' )
# wf_tissue_priors.connect(inputspec, 'gm_tissue_prior_path', resample_tissue_prior_gm, 'in_file' )
wf_tissue_priors.connect(inputspec, 'wm_tissue_prior_path', resample_tissue_prior_wm, 'in_file' )
'''
# # Invert the func2anat matrix to get anat2func
# inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')
# wf_tissue_priors.connect(inputspec, 'func2anat_mat_path', inv_mat, 'in_file')
# Transform the tissue priors to the functional space using the inverse matrix
std2func_xform_csf_prior = Node(FLIRT(output_type='NIFTI',
apply_xfm=True, interp='sinc'), name='std2func_xform_csf_prior')
wf_tissue_priors.connect(inputspec, 'reference_func_file_path', std2func_xform_csf_prior, 'reference')
wf_tissue_priors.connect(inputspec, 'std2func_mat_path', std2func_xform_csf_prior, 'in_matrix_file')
std2func_xform_wm_prior = Node(FLIRT(output_type='NIFTI',
apply_xfm=True, interp='sinc'), name='std2func_xform_wm_prior')
wf_tissue_priors.connect(inputspec, 'reference_func_file_path', std2func_xform_wm_prior, 'reference')
wf_tissue_priors.connect(inputspec, 'std2func_mat_path', std2func_xform_wm_prior, 'in_matrix_file')
# Transformed the priors
# Get the input in_file(s) of the std2func_xform_csf and std2func_xform_wm from the old workspace
wf_tissue_priors.connect(inputspec, 'csf_tissue_prior_path', std2func_xform_csf_prior, 'in_file')
wf_tissue_priors.connect(inputspec, 'wm_tissue_prior_path', std2func_xform_wm_prior, 'in_file')
# Threshold
def get_opstring(threshold, tissue_type):
if tissue_type == 'csf':
max = 216 # 216 is the highest intensity of the resampled afni output for CSF
elif tissue_type == 'wm':
max = 253 # 253 is the highest intensity of the resampled afni output for WM
threshold = int(threshold * max)
op = '-thr '+str(threshold)+' -bin'
return op
# ----- CSF ------
threshold_csf = Node(interface=ImageMaths(suffix='_thresh'),
name='threshold_csf')
wf_tissue_priors.connect(inputspec, ('threshold', get_opstring, 'csf'), threshold_csf, 'op_string' )
wf_tissue_priors.connect(std2func_xform_csf_prior, 'out_file', threshold_csf, 'in_file')
# ------- GM --------
# threshold_gm = Node(interface=ImageMaths(suffix='_thresh'),
# name='threshold_gm')
# wf_tissue_priors.connect(inputspec, ('threshold', get_opstring), threshold_gm, 'op_string' )
# wf_tissue_priors.connect(resample_tissue_prior_gm, 'out_file', threshold_gm, 'in_file')
# -------- WM --------
threshold_wm = Node(interface=ImageMaths(suffix='_thresh'),
name='threshold_wm')
wf_tissue_priors.connect(inputspec, ('threshold', get_opstring, 'wm'), threshold_wm, 'op_string' )
wf_tissue_priors.connect(std2func_xform_wm_prior, 'out_file', threshold_wm, 'in_file')
# -------------------
outputspec = Node(IdentityInterface(fields=['csf_tissue_prior_path', 'wm_tissue_prior_path', 'threshold']),
name="outputspec")
# , 'gm_tissue_prior_path'
wf_tissue_priors.connect(threshold_csf, 'out_file', outputspec, 'csf_tissue_prior_path')
# wf_tissue_priors.connect(threshold_gm, 'out_file', outputspec, 'gm_tissue_prior_path')
wf_tissue_priors.connect(threshold_wm, 'out_file', outputspec, 'wm_tissue_prior_path')
return wf_tissue_priors
if __name__ == "__main__":
tissue_priors = get_wf_tissue_priors()
tissue_priors.inputs.inputspec.csf_tissue_prior_path = '/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/avg152T1_csf.nii.gz'
# tissue_priors.inputs.inputspec.gm_tissue_prior_path = '/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/avg152T1_brain.nii.gz'
tissue_priors.inputs.inputspec.wm_tissue_prior_path = '/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/avg152T1_white.nii.gz'
tissue_priors.inputs.inputspec.threshold = 0.5
# tissue_priors.inputs.inputspec.resampled_anat_file_path = \
# '/mnt/project1/home1/varunk/fMRI/testScripts/_subject_id_0050002/resample_anat/sub-0050002_T1w_brain_resample.nii'
tissue_priors.inputs.inputspec.reference_func_file_path = \
'/mnt/project1/home1/varunk/fMRI/testScripts/func_subject_id_0050002/applyMask/sub-0050002_task-rest_run-1_bold_roi_st_mcf.nii_brain.nii.gz'
tissue_priors.inputs.inputspec.std2func_mat_path = \
'/mnt/project1/home1/varunk/fMRI/results/resultsABIDE1/preprocess/'+\
'motion_correction_bet/coreg_reg/atlas_resize_reg_directory/_subject_id_0050002/'+\
'std2func_xform/fullbrain_atlas_thr0-2mm_resample_flirt.mat'
tissue_priors.base_dir = 'results/'
TEMP_DIR_FOR_STORAGE = 'crash_files/'
tissue_priors.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}
tissue_priors.write_graph(graph2use='flat', format='png', simple_form=True)
out = tissue_priors.run()
|
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse
from . import views as user_views
from django.core.cache import cache
from datetime import datetime
from django.contrib import auth
import time
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.views import redirect_to_login
EXEMPT_URLS = [reverse(settings.LOGIN_URL)]
if hasattr(settings, 'EXEMPT_URLS'):
EXEMPT_URLS += [reverse(url) for url in settings.EXEMPT_URLS]
class LoginRequiredMiddleware:
def __init__(self, get_ressponse):
self.get_ressponse = get_ressponse
def __call__(self,request):
response = self.get_ressponse(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
assert hasattr(request,'user')
path = request.path_info
url_is_exempt = any(url == path for url in EXEMPT_URLS)
if request.user.is_authenticated and url_is_exempt:
return redirect('users-home')
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
SESSION_TIMEOUT_KEY = "_session_init_timestamp_"
class SessionTimeoutMiddleware(MiddlewareMixin):
def process_request(self, request):
if not hasattr(request, "session") or request.session.is_empty():
return
init_time = request.session.setdefault(SESSION_TIMEOUT_KEY, time.time())
expire_seconds = getattr(
settings, "SESSION_EXPIRE_SECONDS", settings.SESSION_COOKIE_AGE
)
session_is_expired = time.time() - init_time > expire_seconds
if session_is_expired:
logout(request)
request.session.flush()
messages.info(request, "You have been logged out due to inactivity")
return redirect_to_login(next=request.path)
expire_since_last_activity = getattr(
settings, "SESSION_EXPIRE_AFTER_LAST_ACTIVITY", True
)
grace_period = getattr(
settings, "SESSION_EXPIRE_AFTER_LAST_ACTIVITY_GRACE_PERIOD", 1
)
if expire_since_last_activity and time.time() - init_time > grace_period:
request.session[SESSION_TIMEOUT_KEY] = time.time()
|
from operator import truediv
def mean(arr):
return truediv(sum(arr), len(arr))
def get_mean(arr, x, y):
if min(x, y) < 2 or max(x, y) > len(arr):
return -1
return truediv(mean(arr[:x]) + mean(arr[-y:]), 2)
# # Python 3
# from statistics import mean
#
#
# def get_mean(arr, x, y):
# if min(x, y) < 2 or max(x, y) > len(arr):
# return -1
# return (mean(arr[:x]) + mean(arr[-y:])) / 2
|
################################################################################
# \file SConstruct
# \brief The SCons master build script for the persistent rnn kernels.
################################################################################
import os
def mkdir(name):
if not os.path.isdir(name):
os.mkdir(name)
if ARGUMENTS.get('mode', 'debug') == 'release':
mkdir('.release_build')
SConscript('SConscript', variant_dir='.release_build', duplicate=0,
exports={'mode':'release'})
else:
mkdir('.debug_build')
SConscript('SConscript', variant_dir='.debug_build', duplicate=0,
exports={'mode':'debug'})
|
import bitmex
import settings as s
client = bitmex.bitmex(api_key=s.API_KEY, api_secret=s.API_SECRET)
data = client.Trade.Trade_getBucketed(
binSize = '5m',
symbol='XBTUSD',
count=100,
reverse=True
).result()
|
# Generated by Django 3.1.7 on 2021-03-25 14:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('audio', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='audiobook',
options={'verbose_name': 'Audio Book', 'verbose_name_plural': 'Audio Books'},
),
migrations.AlterModelOptions(
name='podcast',
options={'verbose_name': 'Podcast', 'verbose_name_plural': 'Podcasts'},
),
migrations.AlterModelOptions(
name='song',
options={'verbose_name': 'Song', 'verbose_name_plural': 'Songs'},
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 15:52:06 2017
@author: Diabetes.co.uk
"""
#this module need to be used at the start, it will automatically extract all of the
#entities and nouns of the questions and answer, from the question and answers with class file,
#and append them to create the sampledatabsecsv file
#
#parsing the first sentence of the answer and store it as a dataframe
#this step can be omitted once we have a database that is annotated, and their features extracted.
#then it can be a sql query instead of a python query
#but this can be done using python, as the module can initiate by loading the database as a dataframe locally.
#the pograms below will be focusing on extracting the adjectives and nouns and entities, and use those as search and match.
import pandas as pd
import nltk
import os
import sys
os.chdir(os.path.dirname(sys.argv[0]))
import functions_for_extracting_pronouns_and_entities_using_api as extract
################################################################
##Parsing the 1st sentence of the answer and extracting the adjectives and nouns
################################################################
Questions = pd.read_csv('CSVfiles\\QuestionsWithAnswersAndClassCSV.csv', index_col = 'ID', encoding = 'utf-8')
Answers = Questions['ANSWER']
Questionsonly = Questions['QUESTION']
firstsent = []
for row in Answers:
results = nltk.sent_tokenize(row)
firstsent.append(results[0].lower())
Questions['Answerfirstsent'] = firstsent
#Extracting the adjectives, nouns, named entities of the sentences and storing it in new columns:
AnswerAdjectives = []
AnswerNouns = []
AnswerEntities = []
for rows in firstsent:
tokens1 = extract.get_tokens(rows)
aNOUN = extract.Nounswords(tokens1)
AnswerNouns.append(aNOUN)
#Adjectives
aADJECTIVE = extract.Adjectivewords(tokens1)
AnswerAdjectives.append(aADJECTIVE)
#Named entities
named_entities1 = extract.entities_name1(rows)
AnswerEntities.append(named_entities1)
QuestionsAdjectives = []
QuestionsNouns = []
QuestionsEntities = []
for rows in Questionsonly:
tokens1 = extract.get_tokens(rows)
aNOUN = extract.Nounswords(tokens1)
QuestionsNouns.append(aNOUN)
#Adjectives
aADJECTIVE = extract.Adjectivewords(tokens1)
QuestionsAdjectives.append(aADJECTIVE)
#Named entities
named_entities1 = extract.entities_name1(rows)
QuestionsEntities.append(named_entities1)
Questions['QuestionsAdjectives'] = QuestionsAdjectives
Questions['QuestionsNouns'] = QuestionsNouns
Questions['QuestionsEntities'] = QuestionsEntities
Questions['AnswerAdjectives'] = AnswerAdjectives
Questions['AnswerNouns'] = AnswerNouns
Questions['AnswerEntities'] = AnswerEntities
Questions.to_csv('CSVfiles\\sampledatabase.csv', encoding = 'utf-8')
#this part will take a while, so ill omit those part of the code, and store and load the result as another csv file
#can be implemented to parse a new annotated dataset
|
# ############################################################################ #
# #
# ::: :::::::: #
# config.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: charles <me@cacharle.xyz> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/09/27 11:05:38 by charles #+# #+# #
# Updated: 2020/10/01 10:15:26 by cacharle ### ########.fr #
# #
# ############################################################################ #
import os
# Location of your project directory
PROJECT_PATH = "../philosophers"
# Build your project before the test if set to True
BUILD_BEFORE = True
# Command to run before the test to build your project
# `{path}` is replaced by the philosophers directory (e.g `../philo_one` `../philo_two`)
BUILD_CMD = "make --no-print-directory -C {path}"
# Timeout for non infinite test
TIMEOUT = 1.0
# Timeout for error test
TIMEOUT_ERROR = 0.2
# Destination of the full summary of failed tests
RESULT_FILE = "result.log"
# Pager command
PAGER_CMD = ["less"]
################################################################################
# Do not edit
################################################################################
PHILO_PATHS = [
os.path.join(PROJECT_PATH, "philo_one"),
os.path.join(PROJECT_PATH, "philo_two"),
os.path.join(PROJECT_PATH, "philo_three")
]
PHILO_EXEC_PATHS = [
os.path.join(PHILO_PATHS[0], "philo_one"),
os.path.join(PHILO_PATHS[1], "philo_two"),
os.path.join(PHILO_PATHS[2], "philo_three")
]
INT_MIN = -2147483648
INT_MAX = 2147483647
UINT_MAX = 4294967295
LONG_MIN = -9223372036854775808
LONG_MAX = 9223372036854775807
ULONG_MAX = 18446744073709551615
|
import sys
input = sys.stdin.readline
numbagoose = int(input())
point = (0, 0)
dist = -1
for _ in range(numbagoose):
x, y = [int(i) for i in input().split()]
if (x ** 2 + y ** 2) ** 0.5 > dist:
dist = (x ** 2 + y ** 2) ** 0.5
point = (x, y)
print(point[0], point[1])
|
import poplib
serv = poplib.POP3_SSL( 'tamdil.iitg.ernet.in' , '995' ) # replace tamdil with your IITG Webmail server
serv.user( 'username' )
serv.pass_( 'password' )
|
'''
Created on Dec 8, 2016
@author: micro
'''
from skimage import feature
|
### python基础语法
## 数据类型与变量
# print('I\'m ok.')
# print('I\'m learning\nPython.')
# print('\\\n\\')
# print('\\\t\\')
# # 用r''表示''内部的字符串默认不转义
# print(r'\\\t\\')
# # 用'''...'''的格式表示多行内容
# print('''line1
# line2
# line3''')
# print(True)
# print(False)
# print(3>2)
# print(2<1)
#
# # 布尔值可以用and、or和not运算。
# # and运算是与运算,只有所有都为True,and运算结果才是True:
# print(True and True)
# print(True and False)
# print(False and False)
# # or运算是或运算,只要其中有一个为True,or运算结果就是True
# print(True or True)
# print(True or False)
# print(False or False)
# # not运算是非运算,它是一个单目运算符,把True变成False,False变成True
# print(not True)
# print(not False)
# print(not 3>2)
# age = 1
# if age >= 18:
# print('adult')
# else:
# print('teenager')
#
# PI=3.14159265359
# print('PI=',PI)
# print("10/3=",10/3)
# print("10//3=",10//3)
# print("9/3=",9/3)
# print("9//3=",9//3)
# print("9%3=",9%3)
# print("10%3=",10%3)
## 字符串与编码
# print("囊中羞涩")
# print(ord('囊'))
# print(chr(22218))
# print(ord('A'))
# print(chr(65))
# print("\u4e2d\u6587") # 中文
# print('ABC'.encode('ascii'))
# print(b'ABC'.decode('ascii'))
# print('中文'.encode('gb2312'))
# print(b'\xd6\xd0\xce\xc4'.decode('gb2312'))
# print('中文'.encode('UTF-8'))
# # 如果bytes中只有一小部分无效的字节,可以传入errors='ignore'忽略错误的字节
# print(b'\xe4\xb8\xad\xe6\x96'.decode('UTF-8',errors='ignore'))
#计算str包含多少个字符,可以用len()函数
# lens = len('ABC')
# print(lens)
# lens = len('中文')
# print(lens)
# lens = len('囊中羞涩')
# print(lens)
## 占位符
# %d 整数
# %f 浮点数
# %s 字符串
# %x 十六进制整数
# print('Hello, %s' % 'world')
# print('Hi, %s, you have $%d.' % ('Michael', 1000000))
# print('%2d-%02d' % (3, 1))
# print('%.2f' % 3.1415926)
# test
# s1=int(input('小明上学期期末成绩为:'))
# s2=int(input('小明本学期期末成绩为:'))
# r=((s2-s1)/s1*100)
# print('小明本次成绩变化为 %.1f%%'%r)
content = u'\xF0\xBC\x88\x99\xE4\xBB';
content = content.encode("latin1").decode("gbk");
print(content);
content = u'\xF0\xB8\xA4\xA9\xE7\x94';
content = content.encode("latin1").decode("gbk");
print(content);
|
import uuid
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from ..common.models import Article
from ..utils import upload_and_rename
def upload_and_rename_product_detail(instance, filename):
return upload_and_rename(instance.pk, filename)
class Category(models.Model):
id = models.AutoField(primary_key=True)
create_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
update_at = models.DateTimeField(_('修改时间'), auto_now=True)
name = models.CharField(_("名称"), max_length=80, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('产品类型')
verbose_name_plural = _('产品类型')
class Product(models.Model):
id = models.AutoField(primary_key=True)
create_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
update_at = models.DateTimeField(_('修改时间'), auto_now=True)
category = models.ForeignKey(Category, verbose_name=_('产品类型'), related_name='products', on_delete=models.SET_NULL, null=True)
name = models.CharField(_("名称"), max_length=80)
short_description = models.TextField(_('简介'), null=True, blank=True)
def __str__(self):
return '{}-{}'.format(self.category, self.name)
class Meta:
verbose_name = _('产品')
verbose_name_plural = _('产品')
class ProductPicture(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
create_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
update_at = models.DateTimeField(_('修改时间'), auto_now=True)
product = models.ForeignKey(Product, verbose_name='产品', related_name='images', on_delete=models.SET_NULL, null=True)
image = models.ImageField(upload_to=upload_and_rename_product_detail, verbose_name=_('图片'))
description = models.CharField(_('描述'), max_length=80, null=True, blank=True)
featured = models.BooleanField(_('是否首页展示'), default=False)
cover = models.BooleanField(_('是否作为封面'), default=False)
class Meta:
verbose_name = _('产品图片')
verbose_name_plural = _('产品图片')
def image_tag(self):
return mark_safe('<img src="%s" width="150" height="150" />' % (self.image.url))
image_tag.short_description = '图片预览'
class ProductDetailedDescription(models.Model):
id = models.AutoField(primary_key=True)
product = models.OneToOneField(Product, on_delete=models.CASCADE)
article = models.OneToOneField(Article, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = _('产品详情描述')
verbose_name_plural = _('产品详情描述')
|
h = open('Day1/numbers.txt', 'r')
# Reading from the file
content = h.readlines()
foundSum = False
# Iterating through the content of the file
for x in range(0, len(content)):
for y in range(x+1, len(content)):
sum2020 = int(content[x]) + int(content[y])
if sum2020 == 2020:
foundSum = True
print('First number: %d, second number: %d' % (int(content[x]), int(content[y])))
break
if(foundSum):
break
h.close()
|
#!/usr/bin/python
import sys, re
pattern_reg = re.compile('^.+:(\d+):(\d+): (error|warning):')
first_error = -1;
msg_error = "";
first_warning = -1;
msg_warning = "";
for line in sys.stdin:
line = line.rstrip('\n')
m = re.search(pattern_reg, line)
if (m):
# in the pattern, each set of parentheses represents a "group"
line_number = int(m.group(1))
err = m.group(3)
if err == "error" :
if first_error < 0 or line_number < first_error :
first_error = line_number
msg_error = line
else :
if first_warning < 0 or line_number < first_warning :
first_warning = line_number
msg_warning = line
if first_error >= 0 :
print(msg_error)
elif first_warning >= 0 :
print(msg_warning)
else :
print("")
|
import os
import pandas as pd
from rdkit.Chem import Descriptors
import rdkit.Chem as Chem
import matplotlib.pyplot as plt
import seaborn as sns
def Nrot(row):
""" Get number of rotatble bonds
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
N_rot: int
Number of rotatable bonds
"""
m = Chem.MolFromSmiles(row.SMILES)
N_rot = Descriptors.NumRotatableBonds(m)
return N_rot
def heavy_atoms(row):
""" Get number of Heavy atoms
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
heavy_atom_count: int
Number of heavy atoms
"""
m = Chem.MolFromSmiles(row.SMILES)
heavy_atom_count = Descriptors.HeavyAtomCount(m)
return heavy_atom_count
def clogP(row):
""" Get number of Heavy atoms
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
cLogP: float
Calculated partition coefficient of n-octonal and water
"""
m = Chem.MolFromSmiles(row.SMILES)
clogp = Descriptors.MolLogP(m)
return clogp
def TPSA(row):
"""Get Total polar surface area
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
total_polar_surface_area: float
total polar surface area
"""
m = Chem.MolFromSmiles(row.SMILES)
total_polar_surface_area = Descriptors.TPSA(m)
return total_polar_surface_area
def NDon(row):
"""Get Number of H-Bond Donors
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
donors: int
Number of Donors
"""
m = Chem.MolFromSmiles(row.SMILES)
donors = Descriptors.NumHDonors(m)
return donors
def NDon(row):
"""Get Number of H-Bond Donors
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
donors: int
Number of Donors
"""
m = Chem.MolFromSmiles(row.SMILES)
donors = Descriptors.NumHDonors(m)
return donors
def NAcc(row):
"""Get Number of H-Bond Acceptors
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
Acceptors: int
Number of Acceptors
"""
m = Chem.MolFromSmiles(row.SMILES)
acceptors = Descriptors.NumHAcceptors(m)
return acceptors
def Fsp3(row):
"""Get Fraction of carbons that are sp3
Parameters
----------
row:
row of pandas.DataFrame containing SMILES field
Returns
-------
FSP3:
Fraction of carbons that are sp3
"""
m = Chem.MolFromSmiles(row.SMILES)
FSP3 = Descriptors.FractionCSP3(m)
return FSP3
def add_properties_to_df(df):
""" Add predicted properties to DataFrame with SMILES
Adds new column for each of:
N_rot: Number of rotatable bonds
HAC: Heavy atom count
cLogP: Calculated log partition coefficent
TSPA: Total polar surface area
NDON: number of Hbond donors
NAcc: Number of Hbond Acceptors
Fsp3: Fraction of sp3 carbons
"""
df['N_rot'] = df.apply(Nrot, axis=1)
df['HAC'] = df.apply(heavy_atoms, axis=1)
df['cLogP'] = df.apply(clogP, axis =1)
df['TSPA'] = df.apply(TPSA, axis=1)
df['NDon'] = df.apply(NDon, axis=1)
df['NAcc'] = df.apply(NAcc, axis=1)
df['Fsp3'] = df.apply(Fsp3, axis=1)
return df
def plot_histograms(df, library_name):
properties = ['N_rot', 'HAC', 'cLogP', 'TSPA', 'NDon', 'NAcc', 'Fsp3']
for prop in properties:
df.plot(y=prop, kind='hist', bins=40)
plt.xlabel('{}'.format(prop))
plt.title('{}'.format(library_name))
plt.savefig('Output/{}_{}.png'.format(prop, library_name), dpi=300)
plt.close()
def plot_multiple_libraries(df_dict):
properties = ['N_rot', 'HAC', 'cLogP', 'TSPA', 'NDon', 'NAcc', 'Fsp3']
for prop in properties:
f, axes = plt.subplots(3, 1, figsize=(7, 7), sharex=True)
sns.distplot(df_dict['FragLite'][prop], kde=False, ax=axes[0],label='FragLite')
axes[0].set_title('FragLite')
sns.distplot(df_dict['DSiP'][prop], kde=False, ax=axes[1], label='DSiP')
axes[1].set_title('DSiP')
sns.distplot(df_dict['Minifrag'][prop], kde=False, ax=axes[2], label='MiniFrag')
axes[2].set_title('MiniFrag')
plt.subplots_adjust(hspace=0.3)
plt.xlabel('{}'.format(prop))
plt.savefig('Output/{}_all.png'.format(prop), dpi=300)
plt.close()
if __name__ == '__main__':
dspi_df = pd.read_excel('DSPI.xlsx')
dspi_df = add_properties_to_df(dspi_df)
fraglite_df = pd.read_excel('ECHO_FragLiteSET1_EG.xlsx')
fraglite_df = add_properties_to_df(fraglite_df)
minifrag_df = pd.read_excel('MiniFrag.xlsx')
minifrag_df = add_properties_to_df(minifrag_df)
os.mkdir("Output")
df_dict = {"DSiP":dspi_df,
"FragLite":fraglite_df,
"Minifrag":minifrag_df}
plot_multiple_libraries(df_dict)
print("DPSI")
print(dspi_df.mean())
print(dspi_df.SMILES.nunique())
print("_-------------------------")
plot_histograms(dspi_df,"DSPI")
dspi_df.to_excel("Output/DSPI_with_properties.xlsx")
print("MiniFrag")
print(minifrag_df.mean())
print(minifrag_df.SMILES.nunique())
print("_-------------------------")
plot_histograms(minifrag_df,"MiniFrag")
print(minifrag_df)
minifrag_df.to_excel("Output/minifrag_with_properties.xlsx")
|
#
# spectral_sequence_class.py
#
import numpy as np
from multiprocessing import Pool
from functools import partial
from ..simplicial_complexes.differentials import complex_differentials
from ..gauss_mod_p.gauss_mod_p import gauss_col_rad, gauss_barcodes
from ..persistence_algebra.barcode_bases import barcode_basis
from .local_chains_class import local_chains
class spectral_sequence(object):
"""Space and methods for Mayer-Vietoris spectral sequences
Parameters
----------
nerve : :obj:`list(Numpy Array)`
Simplicial complex storing the nerve of the covering. This is stored as
a list, where the ith entry contains a :obj:`Numpy Array` storing
all the ith simplices; a simplex for each row.
nerve_point_cloud : :obj:`list(list(Numpy Array))`
Point clouds indexed by nerve of the cover, see
:mod:`permaviss.covers.cubical_cover`
points_IN : :obj:`list(list(Numpy Array))`
Point Identification Numbers (IN) indexed by nerve of the cover, see
:mod:`permaviss.covers.cubical_cover`
max_dim : int
Maximum dimension of simplices.
max_r : float
Maximum persistence radius.
no_pages : int
Number of pages of the spectral sequence
p : int(prime)
The prime number so that our computations are mod p
Attributes
----------
nerve, nerve_point_cloud, points_IN, max_dim, max_r, no_pages, p :
as described above
nerve_differentials : :obj:`list(Numpy Array)`
Differentials of Nerve. Used for computing Cech Complex.
no_rows, no_columns : int, int
Number of rows and columns in each page
nerve_differentials : :obj:`list(Numpy Array)`
List storing the differentials of the Nerve. The ith entry stores the
matrix of the ith differential.
subcomplexes : :obj:`list(list(list(Numpy Array)))`
List storing the simplicial complex on each cover element. For integers
`n_dim`, `k` and `dim` the variable `subcomplexes[n_dim][k][dim]`
stores the `dim`-simplices on the cover indexed by the `k` simplex of
dimension `n_dim` in the nerve.
zero_diff : :obj:`list(list(list(Numpy Array)))`
List storing the vertical differential matrices on the 0 page of the
spectral sequence. For integers `n_dim`, `k` and `dim` the variable
`zero_diff[n_dim][k][dim]` stores the `dim` differential of
the complex on the cover indexed by the `k` simplex of dimension
`n_dim` in the nerve.
cycle_dimensions : :obj:`list(list(list(int)))`
List storing the number of bars on each local persistent homology.
Given two integers `n_dim` and `dim`, the variable
`cycle_dimensions[n_dim][dim]` contains a list where each entry
corresponds to an `n_dim` simplex in the nerve. For each such entry,
we store the number of nontrivial persistent homology classes of
dimension `dim` in the corresponding cover.
Hom : :obj:`list(...(list(barcode_basis)))`
Homology for each page of the spectral sequence. Given three integers
which we denote `n_dim`, `nerv_spx` and `deg` we have that
`Hom[0][n_dim][nerv_spx][deg]` stores a :obj:`barcode_basis` with the
`deg`-persistent homology of the covering indexed by
`nerve[n_dim][nerv_spx]`. All these store the homology on the `0` page
of the spectral sequence. Additionally, for integers `k > 0`, `n_dim`
and `deg`, we store in `Hom[k][n_dim][deg]` the :obj:`barcode_basis`
for the homology on the `(deg, n_dim)` entry in the `k` page of the
spectral sequence.
Im : :obj:`list(...(list(barcode_basis)))`
Image for each page of the spectral sequence. Given three integers
which we denote `n_dim`, `nerv_spx` and `deg` we have that
`Im[0][n_dim][nerv_spx][deg]` stores a :obj:`barcode_basis` for the
image of the `deg+1`-differential of the covering indexed by
`nerve[n_dim][nerv_spx]`. All these store the images on the `0` page
of the spectral sequence. Additionally, for integers `k > 0`, `n_dim`
and `deg`, we store in `Im[k][n_dim][deg]` the :obj:`barcode_basis` for
the image on the `(deg, n_dim)` entry in the `k` page of the spectral
sequence.
PreIm : :obj:`list(...(list(Numpy Array)))`
Preimages for each page of the spectral sequence. Given three integers
which we denote `n_dim`, `nerv_spx` and `deg` we have that
`PreIm[0][n_dim][nerv_spx][deg]` stores a :obj:`Numpy Array` for the
Preimage of the `deg+1`-differential of the covering indexed by
`nerve[n_dim][nerv_spx]`. Additionally, for integers `k > 0`, `n_dim`
and `deg`, we store in `PreIm[k][n_dim][deg]` a :obj:`Numpy Array` for
the preimages of the differential images in the `(deg, n_dim)` entry in
the `k` page of the spectral sequence.
tot_complex_reps : :obj:`list(list(*))`
The asterisc `*` on the type can be either [] or
:obj:`list(Numpy Array)`. This is used for storing complex
representatives for the cycles.
page_dim_matrix : :obj:`Numpy Array(no_pages+1, max_dim, no_columns)`
Array storing the dimensions of the entries in each page. Notice that
the order in which we store columns and rows differs from all the
previous attributes.
persistent_homology : :obj:`list(barcode_basis)`
List storing the persistent homology generated by the spectral
sequence. The `i` entry contains the `i` dimensional persistent
homology.
order_diagonal_basis : `list`
This intends to store the original order of `persistent_homology`
before applying the standard order.
extensions : :obj:`list(list(list(Numpy Array)))`
Nested lists, where the first two indices are for the column and row.
The last index indicates the corresponding extension matrix.
Notes
-----
The indexing on the 0 page is different from that of the next pages. This
is because we do not want to store all the 0 page information on the same
place.
"""
def __init__(self, nerve, nerve_point_cloud, points_IN, max_dim,
max_r, no_pages, p):
"""Construction method
"""
# dimensions of spectral sequence
self.no_pages = no_pages
self.no_rows = max_dim
self.no_columns = len(nerve)
self.max_r = max_r
self.p = p
local_chains.p = p
# add nerve_point_cloud to spectral_sequence info
self.nerve_point_cloud = nerve_point_cloud
# add points IN to support Cech Differential
self.points_IN = points_IN
# add nerve and compute nerve differentials
self.nerve = nerve
# count number of simplices in nerve
self.nerve_spx_number = []
self.nerve_spx_number.append(self.nerve[0])
for nerve_simplices in nerve[1:self.no_columns]:
self.nerve_spx_number.append(np.size(nerve_simplices, 0))
# end for
self.nerve_spx_number
self.nerve_differentials = complex_differentials(nerve, p)
# list containing barcode bases for Hom, Im and PreIm
# Hom and Im go through all pages, whereas
# PreIm is only contained in the 0 page
self.Hom = [[]]
self.Im = [[]]
self.PreIm = [[]]
self.subcomplexes = []
self.zero_diff = []
self.cycle_dimensions = []
self.first_page_barcodes = []
# vectors that translate local indices to global
self.tot_complex_reps = []
# higher page representatives
self.Hom_reps = [[]]
self.Im_reps = [[]]
# store extension matrices
self.extensions = []
for n_dim in range(len(nerve)):
self.Hom[0].append([])
self.Im[0].append([])
self.PreIm[0].append([])
self.subcomplexes.append([])
self.zero_diff.append([])
self.cycle_dimensions.append([])
self.first_page_barcodes.append([])
self.tot_complex_reps.append([])
self.extensions.append([])
for deg in range(self.no_rows):
self.first_page_barcodes[n_dim].append([])
self.tot_complex_reps[n_dim].append([])
self.extensions[n_dim].append([])
# make lists to store information in higher pages
for k in range(1, no_pages):
self.Hom.append([])
self.Im.append([])
self.PreIm.append([])
self.Hom_reps.append([])
self.Im_reps.append([])
for n_dim in range(self.no_columns):
self.Hom[k].append([])
self.Im[k].append([])
self.PreIm[k].append([])
self.Hom_reps[k].append([[]])
self.Im_reps[k].append([[]])
for deg in range(self.no_rows):
self.Hom[k][n_dim].append(barcode_basis([]))
self.Im[k][n_dim].append([])
self.PreIm[k][n_dim].append([])
self.Hom_reps[k][n_dim].append([])
self.Im_reps[k][n_dim].append([])
# save space for dimension matrices for all pages
# the order of variables is for printing the spectral sequence
self.page_dim_matrix = np.zeros((no_pages+1, max_dim,
self.no_columns)).astype(int)
# define persistent homology and order of diagonal basis
self.persistent_homology = []
self.order_diagonal_basis = []
###########################################################################
# add content to first page
def add_output_first(self, output, n_dim):
"""Stores the 0 page data of `n_dim` column after it has been computed
in parallel by `multiprocessing.pool`
Parameters
----------
output : :obj:`list`
Result after using `multiprocessing.pool` on
:meth:`..local_persistent_homology`
n_dim : int
Column of `0`-page whose data has been computed.
"""
self.subcomplexes[n_dim] = [it[0] for it in output]
self.zero_diff[n_dim] = [it[1] for it in output]
self.Hom[0][n_dim] = [it[2] for it in output]
self.Im[0][n_dim] = [it[3] for it in output]
self.PreIm[0][n_dim] = [it[4] for it in output]
# check that the level of intersection is not empty.
if len(self.Hom[0][n_dim]) > 0:
for deg in range(self.no_rows):
no_cycles = 0
# cumulative dimensions
self.cycle_dimensions[n_dim].append(
np.zeros(self.nerve_spx_number[n_dim]+1).astype(int))
for k in range(self.nerve_spx_number[n_dim]):
# Generate page dim matrix and local_coordinates info
cycles_in_cover = self.Hom[0][n_dim][k][deg].dim
no_cycles += cycles_in_cover
self.cycle_dimensions[n_dim][deg][k] = no_cycles
# end for
self.page_dim_matrix[1, deg, n_dim] = no_cycles
# put together first page barcodes
if no_cycles == 0:
self.first_page_barcodes[n_dim][deg] = []
else:
self.first_page_barcodes[n_dim][deg] = np.zeros((
no_cycles, 2))
prev = 0
for k in range(self.nerve_spx_number[n_dim]):
# Generate page dim matrix and local_coordinates info
next = self.cycle_dimensions[n_dim][deg][k]
if prev < next:
self.first_page_barcodes[n_dim][deg][
prev:next] = self.Hom[0][n_dim][k][deg].barcode
# end if
prev = next
# end for
# end else
# end for
# end if
###########################################################################
# self.first_differential(self, n_dim, deg):
def first_differential(self, n_dim, deg):
""" Compute differential on first page (n_dim, deg) --> (n_dim-1, deg)
Parameters
----------
n_dim, deg : int, int
Differential domain position on first page.
Returns
-------
Betas : np.array
Coefficients of image of first page differentials. The image of
each class from (n_dim, deg) is given as a row.
"""
# handle trivial cases
if self.page_dim_matrix[1, deg, n_dim] == 0:
return np.array([])
# generate chains for sending to cech_diff_and_lift
domain_chains = local_chains(self.nerve_spx_number[n_dim])
# compute array of initial radii
R = np.zeros(self.page_dim_matrix[1, deg, n_dim])
# birth radii and localized_coordinates for classes
prev = 0
for nerve_spx_index, next in enumerate(
self.cycle_dimensions[n_dim][deg][:-1]):
if prev < next:
domain_chains.add_entry(
nerve_spx_index, np.array(range(prev, next)),
(self.Hom[0][n_dim][nerve_spx_index][deg].coordinates).T)
R[prev:next] = self.Hom[0][n_dim][nerve_spx_index][
deg].barcode[:, 0]
prev = next
# end for
# call cech_diff_and_lift
Betas, _ = self.cech_diff_and_lift(n_dim, deg, domain_chains, R)
return Betas
###########################################################################
# self.high_differential(self, n_dim, deg, current_page):
def high_differential(self, n_dim, deg, current_page):
""" Compute differential on `current-page`
(n_dim, deg) --> (n_dim - current_page, deg + current_page - 1).
Parameters
----------
n_dim, deg : int, int
Differential domain position.
Returns
-------
Betas : np.array
Coefficients of image of current_page differentials. The image of
each class from (n_dim, deg) is given as a row.
"""
# handle trivial case
if self.Hom[current_page-1][n_dim][deg].dim == 0:
return np.array([])
# take last total complex entry of Hom reps
chains = self.Hom_reps[current_page-1][n_dim][deg][current_page-1]
Hom_barcode = self.Hom[current_page-1][n_dim][deg].barcode
# codomain position
Sn_dim = n_dim - current_page
Sdeg = deg + current_page - 1
# differential (n_dim, deg) --> (Sn_dim, Sdeg)
Betas, _ = self.cech_diff_and_lift(Sn_dim + 1, Sdeg, chains,
Hom_barcode[:, 0])
Betas, _ = self.lift_to_page(Sn_dim, Sdeg, current_page, Betas,
Hom_barcode)
return Betas
# end high_differential
###########################################################################
# self.lift_to_page(self, n_dim, deg, page, chains):
def lift_to_page(self, n_dim, deg, target_page, Betas, Beta_barcode):
""" Lifts chains in position (n_dim, deg) from page 1 to target_page
Returns Betas and image coordinates.
Parameters
----------
n_dim, deg : int, int
Differential domain position.
target_page : int
Lift classes up to this page.
Betas : np.array
Coordinates of classes on first page.
Betas_barcode : np.array
Barcodes of classes to be lifted.
Returns
-------
Betas.T : np.array
Coefficients of image of current_page differentials. The image of
each class from (n_dim, deg) is given as a row.
Gammas.T : np.array
Coefficients of added differentials of (current_page - 1) page.
This is such that the sum of differentials using Gammas,
plus adding classes using target_betas leads to the original Betas.
"""
Betas = Betas.T
# lift up to target_page
for k in range(1, target_page):
Im = self.Im[k][n_dim][deg]
Hom = self.Hom[k][n_dim][deg]
if Hom.dim > 0:
if Im.dim > 0:
Im_Hom = np.append(Im.coordinates, Hom.coordinates, axis=1)
barcode_col = np.append(Im.barcode, Hom.barcode, axis=0)
else:
Im_Hom = Hom.coordinates
barcode_col = Hom.barcode
else:
Betas = np.array([])
Gammas = np.array([])
break
start_index = Im.dim + Hom.dim
# add radii coordinates and radii
barcode_col = np.append(barcode_col, Beta_barcode, axis=0)
A = np.append(Im_Hom, Betas, axis=1)
# barcodes of rows
barcode_row = self.Hom[k][n_dim][deg].prev_basis.barcode
# gaussian reduction on matrix between persistence modules
# order here barcode_row
rows_basis = barcode_basis(barcode_row)
order = rows_basis.sort(send_order=True)
# order row barcodes as well
ordered_barcode_row = barcode_row[order]
A = A[order]
coefficients = gauss_barcodes(
A, ordered_barcode_row, barcode_col, start_index, self.p)
# next page coefficients
Gammas = coefficients[:Im.dim]
Betas = coefficients[Im.dim:]
# end for
return Betas.T, Gammas.T
# end lift_to_page
###########################################################################
# self.cech_diff_and_lift
def cech_diff_and_lift(self, n_dim, deg, start_chains, R):
"""Given chains in position (n_dim, deg), computes horizontal
differential followed by lift by vertical differential.
Procedure:
(1) take chains in position (n_dim, deg)
(2) compute the Cech differential of these chains. We do this in
parallel over the covers in (n_dim-1, deg)
(3) Lift locally.
Steps (2) and (3) are parallelized at the same time.
Parameters
----------
n_dim, deg, current_page : int, int, int
Postition on spectral sequence and current page.
chains : :obj:`list(list(Numpy Array))`
Returns
-------
betas : coordinates on first pages
[lift_references, lift_coordinates]: local coordinates lifted by
vertical differential.
"""
# store space for coordinates in first page
Betas_1_page = np.zeros((
len(R), self.page_dim_matrix[1, deg, n_dim-1]
))
# store space for preimages
lift_chains = local_chains(self.nerve_spx_number[n_dim-1])
if len(R) > 0:
partial_cech_diff_and_lift_local = partial(
self.cech_diff_and_lift_local, R, start_chains, n_dim - 1, deg)
# map reduce local cech differential and lifts
workers_pool = Pool()
output = workers_pool.map(
partial_cech_diff_and_lift_local,
range(self.nerve_spx_number[n_dim-1]))
workers_pool.close()
workers_pool.join()
# output = []
# for j in range(self.nerve_spx_number[n_dim-1]):
# output.append(partial_cech_diff_and_lift_local(j))
prev = 0
# store results
for nerve_spx_index, next in enumerate(
self.cycle_dimensions[n_dim-1][deg][:-1]):
if output[nerve_spx_index] is not None:
Betas_1_page[:, prev:next] = output[nerve_spx_index][0]
lift_chains.add_entry(
nerve_spx_index, output[nerve_spx_index][1],
output[nerve_spx_index][2])
prev = next
# end for
return Betas_1_page, lift_chains
# end cech_diff_and_lift
###########################################################################
# self.cech_diff
def cech_diff(self, n_dim, deg, start_chains):
""" Given chains in (n_dim + 1, deg), compute Cech differential.
Parameters
----------
n_dim, deg: int, int
Codomain position in spectral sequence.
chains : :class:`local_chains` object
Chains on (n_dim+1, deg) that are stored as references in chains[0]
and local coordinates as rows in chains[1].
Returns
-------
image_chains : :obj:`Local Coordinates`
Image coordinates of Cech differential.
"""
image_chains = local_chains(self.nerve_spx_number[n_dim])
# CECH DIFFERENTIAL
for nerve_spx_index in range(self.nerve_spx_number[n_dim]):
loc_im_ref, loc_im_coord = self.cech_diff_local(
start_chains, n_dim, deg, nerve_spx_index)
image_chains.add_entry(nerve_spx_index, loc_im_ref, loc_im_coord)
# end for
return image_chains
#######################################################################
# Cech chain plus lift of preimage
def cech_diff_and_lift_local(
self, R, start_chains, n_dim, deg, nerve_spx_index):
""" Takes some chains in position (n_dim+1, deg) and computes Cech diff
followed by a lift by vertical differential. This is done locally at
cover information in (n_dim, deg).
This method is meant to be run in parallel.
Parameters
----------
R : :obj:`list`
Vector of radii
start_chains : :class:`local_chains` object
Chains in position (n_dim + 1, deg)
n_dim, deg, nerve_spx_index : int, int, int
Position in spectral sequence and local index.
Returns
-------
betas_1_page : :obj:`Numpy Array`
Coefficients of lift to 1st page on position (n_dim, deg)
local_lift_references : :obj:`list`
List of local references of lift.
local_lift_coordinates : :obj:`Numpy Array`
Local coordinates of lift.
"""
# if nerve_spx_index==0, then prev=0
prev = self.cycle_dimensions[n_dim][deg][nerve_spx_index-1]
next = self.cycle_dimensions[n_dim][deg][nerve_spx_index]
# if trivial cover skip
if prev == next:
return
# CECH DIFFERENTIAL
generators, local_chains = self.cech_diff_local(
start_chains, n_dim, deg, nerve_spx_index)
# if there are no images to compute, return
if len(generators) == 0:
return
# LOCAL LIFT TO FIRST PAGE
gammas, betas = self.first_page_local_lift(
n_dim, deg, local_chains, R[generators], nerve_spx_index)
# store first page coefficients
betas_1_page = np.zeros((len(R), next - prev))
betas_1_page[generators] = np.transpose(betas)
# compute vertical preimage and store
preimages = np.matmul(self.PreIm[0][n_dim][nerve_spx_index][deg+1],
gammas).T
# look for indices of nonzero columns
nonzero_idx = np.where(gammas.any(axis=0))[0]
if len(nonzero_idx) > 0:
local_lift_ref = generators[nonzero_idx]
# correct sign
local_lift_coord = -preimages[nonzero_idx] % self.p
else:
local_lift_ref, local_lift_coord = [], []
# end if else
return betas_1_page, local_lift_ref, local_lift_coord
# end cech_diff_and_lift_local
###########################################################################
# self.cech_diff_local
def cech_diff_local(
self, start_chains, n_dim, deg, nerve_spx_index):
""" Local Cech differential, starting from chains in (n_dim + 1, deg).
Parameters
----------
start_chains : :class:`local_chains` object
Chains to compute Cech differential from.
n_dim, deg, nerve_spx_index : int, int, int
Position in spectral sequence and local index.
Returns
-------
local_image_ref : :obj:`list`
List of local references of image.
local_image_coord.T : :obj:`Numpy Array`
Local coordinates of image. Expressions correspond to rows while
local simplices correspond to columns.
"""
coboundary = self.nerve_differentials[n_dim + 1][nerve_spx_index]
# cofaces and coefficients on cech differential
cofaces = np.nonzero(coboundary)[0]
coefficients = coboundary[cofaces]
# indices of generators that are nontrivial by cech diff
generators = start_chains.ref[cofaces[0]]
for coface_index in cofaces[1:]:
generators = np.append(generators, start_chains.ref[
coface_index]).astype(int)
# end for
local_image_ref = np.unique(generators)
# if there are no images to compute, return
if len(local_image_ref) == 0:
return [], []
# size of local complex
if deg == 0:
cpx_size = self.subcomplexes[n_dim][nerve_spx_index][0]
else:
cpx_size = len(self.subcomplexes[n_dim][nerve_spx_index][deg])
local_image_coord = np.zeros((cpx_size, len(local_image_ref)))
# IMAGE OF CECH DIFFERENTIAL #############################
for coface_index, nerve_coeff in zip(cofaces, coefficients):
# check that there are some local coordinates
if len(start_chains.ref[coface_index]) > 0:
# generate boundary matrix
cech_local = self.local_cech_matrix(
n_dim+1, deg, coface_index, nerve_spx_index,
nerve_coeff)
active_generators = np.where(np.in1d(
local_image_ref, start_chains.ref[coface_index])
)[0]
# image of cech complex
local_image_coord[:, active_generators] += np.matmul(
cech_local, start_chains.coord[coface_index].T
)
# end if
# end for
local_image_coord %= self.p
return local_image_ref, local_image_coord.T
###########################################################################
# local boundary matrix
def local_cech_matrix(self, n_dim, deg, nerve_spx_index,
nerve_face_index, nerve_coeff):
"""Returns matrix of Cech differential in (n_dim, deg) restricted
on component (nerve_face_index, nerve_spx_index).
Parameters
----------
n_dim, deg: int, int
Position in spectral sequence.
nerve_spx_index, nerve_face_index : int, int
Local indices in domain and codomain respectively.
nerve_coeff : int
Coefficient in nerve differential determined by the pair
nerve_spx_index and nerve_face_index.
Returns
-------
boundary : :obj:`Numpy Array`
Matrix of size (subcpx[n_dim-1][nerve_face_index][deg],
subcpx[n_dim][nerve_spx_index][deg]) that represents the local
cech differential.
"""
deg_sign = (-1)**deg
if deg == 0:
# save space for boundary matrix
boundary = np.zeros((
self.subcomplexes[n_dim-1][nerve_face_index][deg],
self.subcomplexes[n_dim][nerve_spx_index][deg]))
# inclusions for points
for point_idx in range(
self.subcomplexes[n_dim][nerve_spx_index][0]):
face_point_idx = np.argmax(self.points_IN[
n_dim-1][nerve_face_index] == self.points_IN[n_dim][
nerve_spx_index][point_idx])
boundary[face_point_idx, point_idx] = nerve_coeff * deg_sign
boundary[face_point_idx, point_idx] %= self.p
# end for
else:
# save space for boundary matrix
boundary = np.zeros((
len(self.subcomplexes[n_dim-1][nerve_face_index][deg]),
len(self.subcomplexes[n_dim][nerve_spx_index][deg])))
# inclusions for edges, 2-simplices and higher
# Iterate over nontrivial local simplices in domain
for spx_index, simplex in enumerate(
self.subcomplexes[n_dim][nerve_spx_index][deg]):
# Obtain IN for vertices of simplex
vertices_spx = self.points_IN[n_dim][nerve_spx_index][simplex]
for im_index, im_spx in enumerate(
self.subcomplexes[n_dim-1][
nerve_face_index][deg]):
vertices_face = self.points_IN[n_dim-1][
nerve_face_index][im_spx.astype(int)]
# When the vertices coincide, break the loop
if len(np.intersect1d(
vertices_spx,
vertices_face)) == deg + 1:
boundary[im_index, spx_index] = nerve_coeff * deg_sign
boundary[im_index, spx_index] %= self.p
break
# end if
# end for
# end for
# end else
return boundary
# end local_cech_matrix
###########################################################################
# self.first_page_local_lift
def first_page_local_lift(
self, n_dim, deg, local_coord, lift_radii, nerve_spx_index):
""" Lift to first page on a given open cover.
Parameters
----------
n_dim, deg : int, int
Position on spectral sequence.
local_coord : :obj:`Numpy Array`
Local coordinates to be lifted to first page and vertical
differential. Rows are expressions while columns correspond
to local simplices.
lift_radi : :obj:`list`
Values at which we want to lift `start_chains` by the
vertical differential.
nerve_spx_inex : int
Local index. This function is meant to be parallelized over this.
Returns
-------
gammas : :obj:`Numpy Array`
2D Matrix expressing coefficients of lift. Each expression
corresponds to a column, while image generators correspond to rows.
betas : :obj:`Numpy Array`
2D Matrix expressing coefficients in terms of homology classes on
page 1. Expressions correspond to columns, while homology classes
correspond to rows.
"""
# take care of case when local_coord is a `local_chains` object
if isinstance(local_coord, local_chains):
if len(local_coord.ref[nerve_spx_index]) > 0:
lift_radii = lift_radii[local_coord.ref[nerve_spx_index]]
local_coord = local_coord.coord[nerve_spx_index]
else:
return [], []
# return if nothing to lift
if len(lift_radii) == 0:
return [], []
# R_M : vector of birth radii of columns in M
# distinguish from trivial case where images are zero
if self.Im[0][n_dim][nerve_spx_index][deg].dim > 0 and self.Hom[0][
n_dim][nerve_spx_index][deg].dim > 0:
Im_Hom = np.append(
self.Im[0][n_dim][nerve_spx_index][deg].coordinates,
self.Hom[0][n_dim][nerve_spx_index][deg].coordinates, axis=1)
R_M = self.Im[0][n_dim][nerve_spx_index][deg].barcode[:, 0]
R_M = np.concatenate([
R_M, self.Hom[0][n_dim][nerve_spx_index][deg].barcode[:, 0]],
axis=None)
elif self.Hom[0][n_dim][nerve_spx_index][deg].dim > 0:
Im_Hom = self.Hom[0][n_dim][nerve_spx_index][deg].coordinates
R_M = self.Hom[0][n_dim][nerve_spx_index][deg].barcode[:, 0]
elif self.Im[0][n_dim][nerve_spx_index][deg].dim > 0:
Im_Hom = self.Im[0][n_dim][nerve_spx_index][deg].coordinates
R_M = self.Im[0][n_dim][nerve_spx_index][deg].barcode[:, 0]
else:
return [], []
R_M = np.concatenate([R_M, lift_radii], axis=None)
start_index = np.size(Im_Hom, 1)
# Gaussian elimination of M = (Im | Hom | start_chains (local))
M = np.append(Im_Hom, local_coord.T, axis=1)
_, T = gauss_col_rad(M, R_M, start_index, self.p)
# look at reductions on generators and correct sign
T = -T[:, start_index:] % self.p
gammas = T[0:self.Im[0][n_dim][nerve_spx_index][deg].dim]
betas = T[self.Im[0][n_dim][nerve_spx_index][deg].dim:start_index]
# return preimage coordinates and beta coordinates
return gammas, betas
# end first_page_local_lift
###########################################################################
# self.first_page_lift
def first_page_lift(self, n_dim, deg, start_chains, R):
"""Given some chains in position (n_dim, deg), lift to first page
accross several covers.
Parameters
----------
n_dim, deg, current_page : int, int, int
Postition on spectral sequence and current page.
start_chains : :class:`local_chains` object
Chains in position (n_dim, deg) that we lift to first page.
R : :obj:`list`
Values at which we lift `start_chains`
Returns
-------
Betas_1_page : :obj:`Numpy Array`
Coordinates on first page. Rows correspond to expressions and
columns to homology classes.
lift_chains : :class:`local_coord` object
Chains after lifting vertically by horizontal differential.
"""
# store space for preimages
lift_chains = local_chains(self.nerve_spx_number[n_dim])
# coordinates in first page
Betas_1_page = np.zeros((
len(R), self.page_dim_matrix[1, deg, n_dim]))
# return if trivial
if len(R) == 0:
return Betas_1_page, lift_chains
# compute vertical lifts in parallel
partial_first_page_local_lift = partial(
self.first_page_local_lift, n_dim, deg, start_chains, R)
workers_pool = Pool()
output = workers_pool.map(
partial_first_page_local_lift, range(self.nerve_spx_number[n_dim]))
workers_pool.close()
workers_pool.join()
# proceed to store the result
prev = 0
for nerve_spx_index, next in enumerate(
self.cycle_dimensions[n_dim][deg][:-1]):
gammas, betas = output[nerve_spx_index]
# save betas
if len(betas) > 0:
Betas_aux = np.zeros((len(R), next-prev))
Betas_aux[start_chains.ref[nerve_spx_index]] = betas.T
Betas_1_page[:, prev:next] = Betas_aux
# save lifts
if len(gammas) > 0:
lift_chains.add_entry(
nerve_spx_index, start_chains.ref[nerve_spx_index],
(np.matmul(self.PreIm[
0][n_dim][nerve_spx_index][deg+1], gammas)).T)
# end if
prev = next
# end for
return Betas_1_page, lift_chains
###########################################################################
# add higher page contents
def add_output_higher(self, Hom, Im, PreIm, end_n_dim, end_deg,
current_page):
"""Stores higher page data that has been computed along a sequence of
consecutive differentials.
The studied sequence of differentials ends in
`(end_n_dim, end_deg)`
coming from
`(end_n_dim + current_page, end_deg - current_page + 1)`
and continuing until reaching an integer `r > 0` such that either
end_n_dim + r * current_page > self.no_columns
or
end_deg - r * current_page + 1 > 0
Parameters
----------
Hom : :obj:`list(barcode_basis)`
Homology of a sequence of differentials in the spectral sequence.
This is computed using
:mod:`permaviss.persistence_algebra.module_persistence_homology`.
Im : :obj:`list(barcode_basis)`
Images of a sequence of differentials in the spectral sequence.
PreIm : :obj:`list(Numpy Array)`
Preimages of a sequence of differentials in the spectral sequence.
end_n_dim : int
Integer specifying the column position where the sequence of
differentials ends.
end_deg : int
Integer specifying the row position where the sequence of
differentials ends.
current_page : int
Current page of the spectral sequence.
"""
n_dim = end_n_dim
deg = end_deg
for i, h in enumerate(Hom):
self.PreIm[current_page][n_dim][deg] = PreIm[i]
self.Hom[current_page][n_dim][deg] = h
self.page_dim_matrix[current_page+1, deg, n_dim] = h.dim
self.Im[current_page][n_dim][deg] = Im[i]
deg = deg - current_page + 1
n_dim += current_page
###########################################################################
# compute_two_page_representatives
def compute_two_page_representatives(self, n_dim, deg):
""" Computes total complex representatives for second page classes in
position (n_dim, deg).
Resulting representatives are written in self.Hom_reps[1][n_dim][deg]
Parameters
----------
n_dim, deg : int, int
These specify the position on the spectral sequence where we want
to compute and store the second page representatives.
"""
# return if nothing to compute
if self.Hom[1][n_dim][deg].dim == 0:
return
coordinates_hom = self.Hom[1][n_dim][deg].coordinates
rep_chains = [local_chains(self.nerve_spx_number[n_dim])]
prev = 0
# localized_format for first page info
for nerv_idx, next in enumerate(self.cycle_dimensions[
n_dim][deg][:-1]):
if prev < next:
rep_chains[0].add_entry(
nerv_idx, range(self.Hom[1][n_dim][deg].dim),
np.matmul(self.Hom[0][n_dim][
nerv_idx][deg].coordinates, coordinates_hom[prev:next]
).T)
# end if
prev = next
# end for
# add extra entries when possible
if n_dim > 0:
# birth values classes
R = self.Hom[1][n_dim][deg].barcode[:, 0]
# cech_diff_and_lift
betas, lift = self.cech_diff_and_lift(n_dim, deg, rep_chains[0], R)
if np.any(betas):
raise(ValueError)
# vertical lift written on total_complex_reps
rep_chains.append(lift)
# end if
# store on allocated space for second page representatives
self.Hom_reps[1][n_dim][deg] = rep_chains
# end def
###########################################################################
# compute_higher_representatives
def compute_higher_representatives(self, n_dim, deg, current_page):
""" Computes total complex representatives for current_page classes in
position (n_dim, deg).
Resulting representatives written in
`self.Hom_reps[current_page][n_dim][deg]`
Parameters
----------
n_dim, deg, current_page : int, int, int
Position on the spectral sequence and current page.
"""
# handle trivial cases
if self.Hom[current_page][n_dim][deg].dim == 0:
self.Hom_reps[current_page][n_dim][deg] = []
return
# lift image to page
hom_barcode = self.Hom[current_page][n_dim][deg].barcode
hom_sums = (self.Hom[current_page][n_dim][deg].coordinates).T
# create total complex reps up to last entry by using coefficients and
# total complex representatives on previous page
total_complex_reps = []
# compute
for chains in self.Hom_reps[current_page - 1][n_dim][deg]:
total_complex_reps.append(local_chains.sums(chains, hom_sums))
# end for
# (n_dim, deg) --> (Sn_dim, Sdeg)
Sn_dim = n_dim - current_page
Sdeg = deg + current_page - 1
# if differential is trivial, no need to compute cech differential
if Sn_dim < 0:
self.Hom_reps[current_page][n_dim][deg] = total_complex_reps
return
for target_page in range(current_page, 1, -1):
Betas, _ = self.cech_diff_and_lift(
Sn_dim + 1, Sdeg, total_complex_reps[-1], hom_barcode[:, 0])
# go up to target_page and modify total_complex_reps
Betas, Gammas = self.lift_to_page(
Sn_dim, Sdeg, target_page, Betas, hom_barcode)
# modify reps
# from im_coord obtain total cpx chains
Tn_dim = Sn_dim + target_page - 1
Tdeg = Sdeg - target_page + 2
preimage_reps = []
# obtain preimage_coefficients from expressions in -Gammas
preimage_coefficients = np.matmul(
self.PreIm[target_page - 1][Tn_dim][Tdeg], -Gammas.T
)
# case of target_page is special due to local coordinates
if np.any(Gammas) and target_page == 2:
prev = 0
for spx_idx, next in enumerate(
self.cycle_dimensions[Sn_dim + 1][Sdeg]):
if prev < next:
local_preimage = (np.matmul(
self.Hom[0][Sn_dim + 1][spx_idx][Sdeg].coordinates,
preimage_coefficients[prev:next])).T
# if non empby, add
if len(total_complex_reps[-1].ref[spx_idx]) > 0:
total_complex_reps[-1].coord[
spx_idx] += local_preimage
else:
total_complex_reps[-1].add_entry(
spx_idx, range(np.size(local_preimage, 0)),
local_preimage)
prev = next
# end for
elif np.any(Gammas):
for chains in self.Hom_reps[target_page][Tn_dim][Tdeg]:
preimage_reps.append(local_chains.sums(
chains, preimage_coefficients))
# add preimage reps to current tot_complex reps
for idx, chains in enumerate(preimage_reps):
total_complex_reps[
current_page - target_page + 1 + idx] += chains
# end for
# end elif
# end for
Betas, lift = self.cech_diff_and_lift(
Sn_dim + 1, Sdeg, total_complex_reps[-1], hom_barcode[:, 0])
# check that there are no problems
if np.any(Betas):
raise RuntimeError
# add vertical lift to reps
total_complex_reps.append(lift)
# store modified total complex reps
self.Hom_reps[current_page][n_dim][deg] = total_complex_reps
# end compute_total_representatives
###########################################################################
# extension
def extension(self, start_n_dim, start_deg):
""" Take information from spectral sequence class, and calculate
extension coefficients for a given position (start_deg, start_n_dim).
"""
death_radii = self.Hom[self.no_pages-1][start_n_dim][
start_deg].barcode[:, 1]
Hom_reps = local_chains.copy_seq(
self.Hom_reps[self.no_pages - 1][start_n_dim][start_deg])
# bars for extension problem of infty page classes
barcode_extension = np.ones((len(death_radii), 2))
barcode_extension[:, 0] = death_radii
barcode_extension[:, 1] *= self.max_r
# if death_radii is equal to max_r, no need to compute ext coefficients
ext_bool = death_radii < self.max_r
# initialize extension matrices as zero matrices
Sdeg = start_deg
Sn_dim = start_n_dim
for chains in Hom_reps:
self.extensions[start_n_dim][start_deg].append(np.zeros((
self.Hom[self.no_pages-1][Sn_dim][Sdeg].dim,
len(death_radii))))
Sdeg += 1
Sn_dim -= 1
# end for
# if there are no extension coefficients to compute, return
if not np.any(ext_bool):
return
# zero out representatives not in ext_bool
for chains in Hom_reps:
for k, local_ref in enumerate(chains.ref):
if len(local_ref) > 0 and np.any(
np.invert(ext_bool[local_ref])):
chains.coord[k][np.invert(ext_bool[local_ref])] *= 0
# end if
# end for
# end for
# COMPUTE EXTENSION COEFFICIENTS
# go through all diagonal
Sdeg = start_deg
Sn_dim = start_n_dim
for idx, chains in enumerate(Hom_reps):
# lift to infinity page and substract betas
Betas, _ = self.first_page_lift(Sn_dim, Sdeg, chains,
death_radii)
# go up to target_page
Betas, _ = self.lift_to_page(Sn_dim, Sdeg, self.no_pages, Betas,
barcode_extension)
# STORE EXTENSION COEFFICIENTS
self.extensions[start_n_dim][start_deg][idx] = Betas.T
# MODIFY TOTAL COMPLEX REPS using BETAS
if np.any(Betas):
for ext_deg, Schains in enumerate(
self.Hom_reps[self.no_pages - 1][Sn_dim][Sdeg]):
# compute chains using betas and substract to reps
# problem with local sums!!!
local_chains_beta = local_chains.sums(Schains, -Betas)
for k, local_coord in enumerate(
Hom_reps[ext_deg + idx].coord):
local_ref = Hom_reps[ext_deg + idx].ref[k]
if (len(local_ref)) > 0 and (
len(local_chains_beta.ref[k]) > 0):
if not np.array_equal(
local_ref, local_chains_beta.ref[k]):
raise ValueError
Hom_reps[ext_deg + idx].coord[k] = (
local_coord + local_chains_beta.coord[k]
) % self.p
elif len(local_chains_beta.ref[k]) > 0:
Hom_reps[ext_deg + idx].ref[
k] = local_chains_beta.ref[k]
Hom_reps[ext_deg + idx].coord[
k] = local_chains_beta.coord[k]
# end elif
# end for
# end for
# end if
# reduce up to 1st page using gammas
for target_page in range(self.no_pages, 1, -1):
# get coefficients on first page
Betas, _ = self.first_page_lift(Sn_dim, Sdeg, chains,
death_radii)
# go up to target_page
Betas, Gammas = self.lift_to_page(
Sn_dim, Sdeg, target_page, Betas, barcode_extension)
# if lift target_page is nonzero, raise an error
if np.any(Betas):
raise(RuntimeError)
# MODIFY TOTAL COMPLEX REPS using GAMMAS
if np.any(Gammas):
# compute coefficients of Gammas in 1st page
image_classes = np.matmul(
self.Im[target_page-1][Sn_dim][Sdeg].coordinates,
-Gammas.T)
# look for bars that might be already zero
if target_page == 2:
# obtain coefficients for gammas
image_chains = local_chains(
self.nerve_spx_number[Sn_dim])
prev = 0
for nerv_idx, next in enumerate(self.cycle_dimensions[
Sn_dim][Sdeg]):
if prev < next:
image_chains.add_entry(
nerv_idx, range(np.size(Gammas, 0)),
np.matmul(
image_classes[prev:next].T,
self.Hom[0][Sn_dim][nerv_idx][
Sdeg].coordinates.T)
)
prev = next
# end for
else:
image_chains = local_chains.sums(
self.Hom_reps[target_page-2][Sn_dim][Sdeg][0],
image_classes.T
)
# end else
chains += image_chains
# end if
# end for
# lift to first page
Betas, lift_coord = self.first_page_lift(Sn_dim, Sdeg, chains,
death_radii)
# correct sign of lift_coord and trivialise references
lift_coord.minus()
# end for
# if lift to first page is nonzero, raise an error
if np.any(Betas):
raise(RuntimeError)
if Sn_dim > 0:
# compute Cech differential of lift_coord
# and add to current reps
image_chains = self.cech_diff(Sn_dim - 1, Sdeg + 1, lift_coord)
Hom_reps[idx+1] = Hom_reps[idx + 1] + image_chains
# advance reduction position
Sdeg += 1
Sn_dim -= 1
# end for
# end def
# end spectral_sequence class #################################################
|
#!/usr/bin/python
import time, imaplib, getpass, socket
missing_libs = []
# Try to import pynotify
try:
import pynotify
except ImportError:
missing_libs.append('python-notify')
# Try to import gnomekeyring
try:
import gnomekeyring as gk
except ImportError:
missing_libs.append('python-gnomekeyring')
# Print error message and exit if a library is missing
if len(missing_libs) is not 0:
print 'You\'re missing libraries!\napt-get install', " ".join(missing_libs)
exit(0)
# Which keyring and key to use to store login information
KEYRING_NAME = 'important'
KEY_NAME = 'Gmail Watcher Login'
# Clear old login entry and create a new one from user input
def new_auth():
# Clear all old entries
try:
for item in gk.find_items_sync(gk.ITEM_GENERIC_SECRET, {'application': KEY_NAME}):
gk.item_delete_sync(KEYRING_NAME, item.item_id)
except gk.NoMatchError: pass
# Read in username and password
user = raw_input('User: ')
passwd = getpass.getpass()
# Save the user as an attribute, set application to make this easier to iterate through
attrs = {'user': user, 'application': KEY_NAME}
gk.item_create_sync(KEYRING_NAME, gk.ITEM_GENERIC_SECRET, KEY_NAME, attrs, passwd, True)
return user,passwd
def start_session():
try:
return imaplib.IMAP4_SSL('imap.gmail.com','993')
except (socket.error, imaplib.IMAP4.error):
return None
def login(user,passwd):
# Start the IMAP session with gmail
print 'Starting SSL IMAP session, could take a minute'
session = start_session()
if session is None:
print 'What is gmail? (your connection or dns may be down)'
print 'Trying until I succeed!'
while session is None:
time.sleep(5)
session = start_session()
logged_in = False
# Try to log in until it succeeds, try new login information if it fails
while not logged_in:
try:
print 'Trying to log in...'
session.login(user,passwd)
print 'Successfully authenticated!'
logged_in = True
except (socket.error, imaplib.IMAP4.error):
print 'Could not authenticate! '
user,passwd = new_auth()
return session
def get_unread(session):
# Get the initial list of unread emails
read_in = False
while not read_in:
try:
session.select()
unread = session.search(None,'UNSEEN')[1][0].split(' ')
read_in = True
except (socket.error, imaplib.IMAP4.error):
print 'Disconnected! Trying to reconnect'
session = login(user,passwd)
return unread, session
if __name__ == "__main__":
# See if previous login information exists in the keyring
try:
info = gk.find_items_sync(gk.ITEM_GENERIC_SECRET, {'application': KEY_NAME})
user = info[0].attributes['user']
passwd = info[0].secret
# If not, read in and store new login information
except gk.NoMatchError:
user,passwd = new_auth()
session = login(user,passwd)
try:
# Get the initial number of unread emails
unread, session = get_unread(session)
count = 0 if unread[0] == '' else len(unread)
pynotify.init('Gmail Watcher')
# Display initial number of unread emails
if count is 1:
n = pynotify.Notification('Successfully Authenticated!', '1 unread email', 'gmail')
else:
n = pynotify.Notification('Successfully Authenticated!', "%d unread emails" % count, 'gmail')
n.show()
# Main loop: watch for new emails
while True:
prev = unread
# Get a list of unread emails
unread, session = get_unread(session)
new = [email for email in unread if email not in prev]
# For each new email, show the subject and body
if new != [] and new != ['']:
try:
content = session.fetch(','.join(new), '(BODY.PEEK[1] BODY.PEEK[HEADER.FIELDS (SUBJECT)])')
except (socket.error, imaplib.IMAP4.error):
print 'Disconnected! Trying to reconnect'
session = login(user,passwd)
# Repeat this loop
unread = prev
continue
# Display a separate notification for each new email
for i in range(0,len(new)):
subj = content[1][3*i][1].strip()
msg = content[1][3*i+1][1][0:200]
n = pynotify.Notification(subj, msg, 'gmail')
n.show()
time.sleep(5)
except KeyboardInterrupt:
print 'Logging out'
session.logout()
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from threading import Thread
import time, sys
import numpy as np
from Config import Config
class ThreadPredictor(Thread):
def __init__(self, server, id):
super(ThreadPredictor, self).__init__()
self.setDaemon(True)
self.id = id
self.server = server
self.exit_flag = False
def run(self):
if Config.LOCAL_QUEUE:
data_queue = self.server.local_prediction_q
else:
data_queue = self.server.prediction_q
total_time = 0
collect_time = 0
predict_time = 0
step = 0
acc_batch_size = 0.0
ids = np.zeros(Config.PREDICTION_BATCH_SIZE, dtype=np.uint16)
episode_ids = np.zeros(Config.PREDICTION_BATCH_SIZE, dtype = np.uint16)
states = np.zeros(
(Config.PREDICTION_BATCH_SIZE, Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH, Config.STACKED_FRAMES),
dtype=np.float32)
while not self.exit_flag:
step += 1
s0 = time.time()
size = 0
states[0], ids[0], episode_ids[0] = data_queue.get()
size = 1
while size < Config.PREDICTION_BATCH_SIZE and not data_queue.empty():
# while size < Config.PREDICTION_BATCH_SIZE:
states[size], ids[size], episode_ids[size] = data_queue.get()
size += 1
s1 = time.time()
batch = states[:size]
p, v = self.server.model.predict_p_and_v(batch)
s2 = time.time()
for i in range(size):
if ids[i] < len(self.server.agents):
self.server.agents[ids[i]].wait_q.put((episode_ids[i], p[i], v[i]))
s3 = time.time()
total_time += s3 - s0
collect_time += s1 - s0
predict_time += s2 - s1
acc_batch_size += size
if self.id == 0 and step % 1000 == 0:
print("[predictor %d] collect: %.1f %.1f%%, predict: %.1f %.1f%%, total: %.1f, batch: %d, local_q: %d, remote_q: %d" %
(step, collect_time, collect_time / total_time * 100,
predict_time, predict_time / total_time * 100,
total_time, acc_batch_size / step, self.server.local_prediction_q.qsize(), self.server.prediction_q.qsize()))
sys.stdout.flush()
|
class Student:
def __init__(self,name,years):
self.__name = name
self.__years = years
def set_weight(self,weight):
self.__weight = weight
def get_weight(self):
return self.__weight
stu = Student('Eason',21)
stu.set_weight(125)
print(stu.get_weight())
|
# 標準入力を受け付ける。
N = int(input())
A = list(map(int, input().split()))
# 数字の出現回数を配列でメモしておく。
# num_cnt_list[0]の要素は利用しない。
num_cnt_list = []
for _ in range(N + 1):
# 数字の出現回数の初期値を0としておく。
num_cnt_list.append(0)
for i in range(N):
num_cnt_list[A[i]] += 1
# 一度でも2回以上同じ数字が出現したら`No`, そうでなければ`Yes`を出力する。
if num_cnt_list[A[i]] > 1:
print('No')
exit()
print('Yes')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 18:42:52 2019
@author: SMA
"""
### load modules
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from collections import defaultdict
import missingno
pd.set_option('display.float_format', lambda x: '%.5f' % x)
### load data
os.chdir("C:/Users/SMA/Desktop/DataScienceSpringboard/Capstone1/santander-product-recommendation")
df = pd.read_csv("train_ver2.csv",parse_dates=["fecha_dato","fecha_alta"])
df.columns =["Date","id","EmployeeBank","ResidenceCountry","Sex","Age","first_contract_date","new_customer",
"seniority_time","primary_cust_in_month","last_date_as_primary","cust_type","cust_relation_type",
"residence_index","foreigner_birth","spouse_with_employee","channel_used","deceased","address_type",
"province_code","province_name","activity_index","gross_income","customer_segment","Saving_Account",
"Guarantees","Current_Account","Derivada_Account","Payroll_Account","Junior_Account","MP_Account",
"P_Accouont","PP_Account","St_deposit","MT_deposit","LT_deposit","e_account","Funds","Mortage",
"Pensions","Loans","Taxes","Credit_Card","Securities","Home_Account","Payroll","Pension","Direct_Debit"]
df.info()
### reduce size of sparse matrix
colS = df.loc[:,"Saving_Account":].columns
for k in colS:
df[k] = pd.to_numeric(df[k],downcast="unsigned")
df.Date.value_counts().plot(kind="bar")
df.id.value_counts().size
df.groupby("Date").id.nunique().plot(kind="bar")
df["EmployeeBank"] = df["EmployeeBank"].astype("category")
def cleanString(x):
if isinstance(x,int):
return(x)
elif isinstance(x,str):
vor = x.rsplit(" ")
if "n" in x.lower():
return(np.nan)
else:
return([int(s) for s in vor if s!=""][0])
df.ResidenceCountry.value_counts()
df.groupby("Date").ResidenceCountry.nunique()
df["ResidenceCountry"] = df["ResidenceCountry"].astype("category")
df.Sex.value_counts()
df["Sex"].replace({"V":"W","H":"M"},inplace=True)
df.groupby("Date").Sex.value_counts(normalize=True).plot(kind="bar",stacked=True)
df["Sex"] = df["Sex"].astype("category")
df["Age"] = df["Age"].apply(cleanString)
df.Age.describe()
### replace date feature to time differences
df.first_contract_date.value_counts()
df.first_contract_date.isna().sum()
delta_t = (df.Date - df.first_contract_date)/np.timedelta64(1,"D")
df["first_contract_date"]= delta_t
df[df["first_contract_date"]<0] = np.nan
df.new_customer.value_counts(normalize=True)
df.groupby("Date").new_customer.value_counts(normalize=True).plot(kind="bar",stacked=True)
df["new_customer"] = df.new_customer.astype("category")
df["seniority_time"]= df.seniority_time.apply(cleanString)
df["seniority_time"] = df.seniority_time.replace({-999999.0:np.nan})
df.primary_cust_in_month = df.primary_cust_in_month.astype("category")
df.last_date_as_primary
df.cust_type.value_counts()
df["cust_type"] = df.cust_type.replace({"P":5}).astype(float)
df["cust_type"] = df.cust_type.astype("category")
df.cust_relation_type.value_counts()
df["cust_relation_type"] = df["cust_relation_type"].replace({"N":np.nan})
df.groupby("Date").cust_relation_type.value_counts()
df["cust_relation_type"] = df["cust_relation_type"].astype("category")
df["residence_index"].value_counts()
df.residence_index.isna().sum()
df["residence_index"]= df.residence_index.replace({"S":"Y"}).astype("category")
df.foreigner_birth.value_counts()
df["foreigner_birth"] = df.foreigner_birth.replace({"S":"Y"}).astype("category")
df.spouse_with_employee.isna().sum()
df["spouse_with_employee"] = df.spouse_with_employee.replace({"S":"Y"}).astype("category")
df.channel_used.value_counts()
df.channel_used.unique()
df["channel_used"] = df.channel_used.astype("category")
df["deceased"].value_counts()
df["deceased"] = df.deceased.replace({"S":"Y"}).astype("category")
df.address_type.value_counts()
df["address_type"] = df.address_type.astype("category")
df["province_code"].value_counts()
df.province_name.value_counts()
df["province_name"] = df.province_name.astype("category")
df.activity_index.value_counts()
df["activity_index"] = df.activity_index.astype("category")
df["customer_segment"].value_counts()
df["customer_segment"].replace({"02 - PARTICULARES":2,"03 - UNIVERSITARIO":3,"01 - TOP":1},inplace=True)
df["customer_segment"] = df["customer_segment"].astype("category")
df.gross_income.describe()
### screen for outliers
out = defaultdict(list)
cols = df.columns[2:19]
for col in cols:
mcol = df[col]
if df.dtypes[col] =="float64":
dev = mcol.mean() + 3*mcol.std()
ixx_to = np.where(mcol>dev)[0].tolist()
out[col].extend([len(ixx_to)/len(mcol),ixx_to])
else:
dev = mcol.value_counts(normalize=True)
ixm = dev[dev< 0.015].index
ixx_to = []
for i in ixm:
ixx_to.extend(np.where(i==mcol)[0])
out[col].extend([len(ixx_to)/len(mcol),ixx_to])
nameS = list();values=list()
for k in out.keys():
if out[k][0]>0:
values.append(out[k][0])
nameS.append(k)
outliers = pd.Series(values,index=nameS)
### Age and Gross Income is checked manually
### Age: most people's age is limited and children are not likely to take responsibility over an account
age = df.Age.copy()
age[age >110]= np.nan
age[age <12]= np.nan
### very unlikely that people are having more than 2.5 million gross income..
gI = df.gross_income.copy()
gI[gI>2500000] = np.nan
df["Age"]=age
df["gross_income"]=gI
## now calculate target values, i.e. sparse matrix of products that will be added the next month
### separate sparse matrix and sort values accoding to client id and Date
### drop unique id entries: there is nothing they will add
sparse = df.loc[:,"Saving_Account":"Direct_Debit"].copy()
sparse["Date"] = df["Date"]
sparse["id"] = df["id"]
su = sparse[sparse.id.duplicated(False)].copy()
su.sort_values(["id","Date"],inplace=True)
su_date_id = su[["Date","id"]]
su.drop(columns= ["Date","id"],inplace=True)
### calculate product migration over whole matrix; therefore identification of last entries along customer ids is necessary
### then simple boolean comparision
last_one = np.where(su_date_id["id"] != su_date_id["id"].shift(-1))[0]
a = su.to_numpy()
ref = su.to_numpy()
comp = su.shift(-1).to_numpy()
boolS = (ref!= comp) & (comp==1)
new_boolS = np.delete(boolS,last_one,axis=0)
#### give back indices where changes in product basket occurred and transform to sparse matrix output
x,y = np.where(new_boolS ==True)
s1 = su_date_id["id"].to_numpy()
s1 =np.delete(s1,last_one,axis=0)
s2 = su_date_id["Date"].to_numpy()
s2 = np.delete(s2,last_one,axis=0)
data_sp = {"ids":s1[x],"Dates":s2[x],"values_":y.tolist()}
sp_target = pd.DataFrame(data_sp)
sp_target["ik"]= 1
spx = sp_target.pivot_table(index=["Dates","ids"],columns="values_",values="ik")
spx.fillna(0,inplace=True)
### concatenate sparse target matrix with frame
spx.sort_index(level=[0,1],inplace=True)
spx.columns = df.columns[24:] + "_target"
spx.index.names =["Date","id"]
df.set_index(["Date","id"],inplace=True)
df.sort_index(level=[0,1],inplace=True)
nd = pd.concat([df,spx],axis=1,ignore_index=False)
#### fill Na with zero; i.e. Na result from those who did not have any change in basket and those who were unique
repl = nd.loc[:,"Saving_Account_target":].copy()
repl.fillna(0,inplace=True)
nd.loc[:,"Saving_Account_target":] = repl.to_numpy()
### generate additional features: a) how many products has a client purchased in comparsion to the month before
### b) how many products has a client sold off before
first_one = np.where(su_date_id["id"]!= su_date_id["id"].shift(1))[0]
compp = su.shift(1).to_numpy()
boolSN = (ref==1) & (compp != ref)
boolSL = (ref==0) & (compp != ref)
boolSN = np.delete(boolSN,first_one,axis=0)
boolSL = np.delete(boolSL,first_one,axis=0)
s_1 = np.delete(su_date_id["id"].to_numpy(),first_one,axis=0)
s_2 = np.delete(su_date_id["Date"].to_numpy(),first_one,axis=0)
switching = {"id":s_1,"Date":s_2,"new_ones":boolSN.sum(axis=1),"leave_ones":boolSL.sum(axis=1)}
switching = pd.DataFrame(switching)
switching.set_index(["Date","id"],inplace=True)
switching.sort_index(level=[0,1],inplace=True)
#### merge an fill missings with 0
nd = pd.concat([nd,switching],axis=1,ignore_index=False)
ndex = nd[["new_ones","leave_ones"]].copy()
ndex.fillna(0,inplace=True)
nd.loc[:,["new_ones","leave_ones"]] = ndex.to_numpy()
### clean up
for i in nd.loc[:,"Saving_Account_target":].columns:
nd[i] = pd.to_numeric(nd[i],downcast="unsigned")
df = nd.copy()
del(nd)
os.chdir("...")
df.to_pickle("df_targets.pkl")
#### NA visualization
df.loc[:,:"customer_segment"].isna().sum().plot(kind="bar")
### na frequency per rows
df.loc[:,:"customer_segment"].isna().sum(axis=1).hist()
### is there a systematic dependence between NA values?
missingno.heatmap(df.loc[:,:"customer_segment"])
#### first removals:
### spouse_with_employee, last_date_as_primary,
### desceased ones True // we cannot predict anything on deceased ones
### province_code// same info as province_name
### drop na values in basket as well
df.drop(["spouse_with_employee","last_date_as_primary","province_code"],axis=1,inplace=True)
mask = (df["Payroll"].isna()) | (df["Pension"].isna())
df = df[~mask].copy()
mask = df.deceased=="Y"
df = df[~mask]
df.drop("deceased",inplace=True,axis=1)
cols = ["Sex","Age","seniority_time","cust_type","cust_relation_type","channel_used","province_name",
"gross_income","customer_segment"]
for col in cols:
df[col] = replace_NA_Persons(df,col)
df.isna().sum()[:19]
df.isna().sum(axis=1).value_counts()
####
#### there are 11255 rows which are full of NA readings in their clients' features
### moreover, there are 89K wih 5 NA readings
### it is fairly difficult to find a proper reasoning to replace these values with lookup tables
rem_mask = df.isna().sum(axis=1) >10
df = df[~rem_mask.to_numpy()]
df.isna().sum()[:19]
(df.cust_relation_type.isna() & df.cust_type.isna()).sum()
nacols = ["Sex","Age","seniority_time","cust_type","cust_relation_type","channel_used","province_name","gross_income","customer_segment",
"first_contract_date"]
for ff in nacols:
df[ff] = lookuptable(ff,df)
df.isna().sum()[:20]
## A couple of NA values have not been replaced possibly due to ambiguities in the lookup frame
## they will be removed; moreover, the column deceased is getting removed as well due unique values
df.dropna(axis=0,inplace=True)
######plottings
##### 4 plots are generated: distributional with and without correction; plot in relation to target and time dependence
### first: number of targets and binary Target is calculated
num_targets = df.loc[:,"Saving_Account_target":"Direct_Debit_target"].copy().sum(axis=1).to_numpy()
df["Number_targets"] =num_targets
num_targets[num_targets > 0] = 1
df["Target"] = num_targets
### due to high number of labels in channel_used, province_name, ResidenceCountry an overwrite is done, i.e. creation of a
### a fake dataset for plotting
def overwrite(x,vals):
if x not in vals:
return ("O")
else:
return(x)
cor_ch = df.channel_used.copy()
cor_ch = cor_ch.apply(overwrite,vals=["KHE","KAT","KFC"]).astype("category")
cor_ch2 = df.province_name.copy()
cor_ch2 = cor_ch2.apply(overwrite,vals=df.province_name.value_counts().iloc[0:6].index.values).astype("category")
cor_ch3 = df.ResidenceCountry.copy()
cor_ch3 = cor_ch3.apply(overwrite,vals=df.ResidenceCountry.value_counts().iloc[0:6].index.values).astype("category")
fake_da = df.copy()
fake_da["province_name"] = cor_ch2.to_numpy()
fake_da["channel_used"] = cor_ch.to_numpy()
fake_da["ResidenceCountry"] = cor_ch3.to_numpy()
cols = df.columns[:17].tolist()
cols.extend(["new_ones","leave_ones"])
mask = (df["Target"] ==1).to_numpy()
#os.chdir("D:/Project_II/plots2")
for col in cols:
colS = fake_da[col].copy()
fig,axS = plt.subplots(2,2,figsize=(10,10),constrained_layout=True)
#fig.tight_layout()
if fake_da.dtypes[col].name =="category":
if col =="province_name":
colS = cor_ch2
if col =="channel_used":
colS = cor_ch
oS =colS.value_counts(normalize=True).reset_index()
sns.barplot(x="index",y=col,data=oS,ax=axS[0,0])
axS[0,0].set_title("Proportions",loc="left")
colS= colS.reset_index(level=1)
colS.drop_duplicates(subset="id",keep="first",inplace=True)
oS = colS[col].value_counts(normalize=True).reset_index()
sns.barplot(x="index",y=col,data=oS,ax=axS[0,1])
axS[0,1].set_title("Proportions with corrections",loc="left")
#
wT = fake_da[mask][col]
nT = fake_da[~mask][col]
bf = pd.concat([wT.value_counts(normalize="True"),nT.value_counts(normalize=True)],axis=1,ignore_index=False)
bf.columns= ["purchase","no_purchase"]
bf.T.plot(kind="bar",stacked=True,ax=axS[1,0])
axS[1,0].set_title("Proportoins separated by target",loc="left")
#
hh = fake_da[col].groupby(level=0).value_counts(normalize=True)
hh.unstack().plot(kind="bar",stacked=True,ax=axS[1,1])
axS[1,1].set_title("Proportions over time",loc="left")
else:
sns.distplot(colS,ax =axS[0,0])
axS[0,0].set_title(label="Histogram",loc="left")
colS = colS.reset_index(level=1)
colS.drop_duplicates("id",inplace=True)
sns.distplot(colS[col],ax=axS[0,1])
axS[0,1].set_title("Histogram with correction",loc="left")
co =fake_da[[col,"Target"]]
sns.boxplot(x="Target",y=col,data=co,ax=axS[1,0])
axS[1,0].set_title("boxplot separate by target",loc="left")
fake_da[col].groupby(level=0).mean().plot(ax=axS[1,1])
axS[1,1].set_title(label="Median over time",loc="left")
fig.suptitle("{} : desciptive graphs".format(col),fontsize=20)
#plt.savefig(col+".png")
### graphing the product basket
product_basket = mm.loc[:,"Saving_Account":"Direct_Debit"]
perc_av = product_basket.sum(axis=0)
perc_av = perc_av/perc_av.sum()
plt.figure(figsize=(10,10))
perc_av.plot(kind="bar")
plt.title("Percentage of products")
plt.figure(figsize=(10,10))
product_basket.sum(axis=1).hist()
plt.title("Frequency of Basket Size")
### statistic CramersV and heatmap
from itertools import permutations
import researchpy
cols = df.columns[:19]
perm = permutations(cols,2)
outP = {"indeX":list(),"colS":list(),"valueS":list()}
ii = 0
for hh in list(perm)[67:]:
col1 = mm[hh[0]]
col2 = mm[hh[1]]
if mm.dtypes[hh[0]].name =="float64":
col1 =pd.qcut(col1,5)
if mm.dtypes[hh[1]].name =="float64":
col2 = pd.qcut(col2,5)
a,b = researchpy.crosstab(col1,col2,test="chi-square")
outP["valueS"].append(b.iloc[2,1])
outP["indeX"].append(hh[0])
outP["colS"].append(hh[1])
ii+=1
print(ii)
rouP = pd.DataFrame(outP)
mmK = rouP.pivot_table(index="indeX",columns="colS",values="valueS")
mmK.fillna(1,inplace=True)
mmK.to_pickle("heatmap.pkl")
fig=plt.figure(figsize=(10,10))
sns.heatmap(mmK,cmap="BuPu")
plt.savefig("heatmap_1.png")
##chi square over samples
ok = {"feature":list(),"p-value":list(),"Cramers":list()}
for hh in cols:
col1 = mm[hh]
col2 = mm["Target"]
if mm.dtypes[hh].name =="float64":
col1 =binarize(mm,hh,q=5)
a,b = researchpy.crosstab(col2,col1,test="chi-square")
ok["Cramers"].append(b.iloc[2,1])
ok["p-value"].append(b.iloc[1,1])
ok["feature"].append(hh)
|
class MockSocket(object):
def __init__(self):
self.connected = False
self.timeout = 0
self.ipaddr = None
self.ipport = None
self.buffer = []
self.request = []
def settimeout(self, timeout):
self.timeout = timeout
def connect(self, ipaddrAndipportTup):
self.connected = True
self.ipaddr = ipaddrAndipportTup[0]
self.ipport = ipaddrAndipportTup[1]
def close(self):
self.connected = False
def recv(self, size):
return self.buffer.pop(0)
def sendall(self, data):
self.request.append(data)
def _set_buffer(self, resp_list):
for bs in resp_list:
self.buffer.append(bs)
def clear_buffer(self):
self.buffer = []
def mock_socket(AF_INET, SOCK_STREAM):
return MockSocket()
|
from django.db import models
from product.models import Item
class Cart(models.Model):
code = models.CharField(max_length=10)
ship_value = models.DecimalField(max_digits=6, decimal_places=2)
total = models.DecimalField(max_digits=100, decimal_places=2, default=0.00)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return str(self.cart.id)
class CartItem(models.Model):
item = models.ForeignKey(Item, on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
def __unicode__(self):
return "Cart id: %s" % (self.id)
@staticmethod
def add_item_on_cart(item, cart):
CartItem.objects.create(item=item, cart=cart)
|
# 用try--except捕获断言异常
res1 = {'code': 1, 'msg': '登陆成功'}
res2 = {'code': 0, 'msg': '登陆失败'}
try:
assert res1 == res2
except AssertionError as e:
print("编号A1用例不通过!")
raise e # 处理异常后,抛出异常
else:
print("编号A1用例通过!")
|
# -*- coding: utf-8 -*-
"""API endpoints for CRUD operations."""
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.functional import cached_property
from django.http import Http404
from rest_framework.decorators import list_route
from rest_framework.exceptions import NotAuthenticated
from rest_framework.mixins import UpdateModelMixin
from rest_framework.viewsets import ModelViewSet as BaseModelViewSet
from rest_framework.viewsets import ReadOnlyModelViewSet as BaseROModelViewSet
from drf_cached_instances.mixins import CachedViewMixin as BaseCacheViewMixin
from .cache import Cache
from .history import Changeset
from .mixins import PartialPutMixin
from .models import (
Browser, Feature, Maturity, Reference, Section, Specification, Support,
Version)
from .serializers import (
BrowserSerializer, FeatureSerializer, MaturitySerializer,
ReferenceSerializer, SectionSerializer, SpecificationSerializer,
SupportSerializer, VersionSerializer,
ChangesetSerializer, UserSerializer,
HistoricalBrowserSerializer, HistoricalFeatureSerializer,
HistoricalMaturitySerializer, HistoricalReferenceSerializer,
HistoricalSectionSerializer, HistoricalSpecificationSerializer,
HistoricalSupportSerializer, HistoricalVersionSerializer)
from .view_serializers import (
ViewFeatureListSerializer, ViewFeatureSerializer,
ViewFeatureRowChildrenSerializer)
#
# Base classes
#
class CachedViewMixin(BaseCacheViewMixin):
cache_class = Cache
def perform_create(self, serializer):
kwargs = {}
if getattr(self.request, 'delay_cache', False):
kwargs['_delay_cache'] = True
serializer.save(**kwargs)
def perform_update(self, serializer):
kwargs = {}
if getattr(self.request, 'delay_cache', False):
kwargs['_delay_cache'] = True
serializer.save(**kwargs)
def perform_destroy(self, instance):
if getattr(self.request, 'delay_cache', False):
instance._delay_cache = True
instance.delete()
class FieldsExtraMixin(object):
def initialize_request(self, request, *args, **kwargs):
irequest = super(FieldsExtraMixin, self).initialize_request(
request, *args, **kwargs)
self.request = irequest
irequest.parser_context['fields_extra'] = self.get_fields_extra()
return irequest
def get_renderer_context(self):
context = super(FieldsExtraMixin, self).get_renderer_context()
context['fields_extra'] = self.get_fields_extra()
return context
def get_fields_extra(self):
serializer_cls = self.get_serializer_class()
return serializer_cls.get_fields_extra()
class GroupRouterMixin(object):
"""Extra parameters used by the GroupedRouter."""
lookup_value_regex = r'\d+'
alt_lookup_field = None
alt_lookup_value_regex = None
class ModelViewSet(
PartialPutMixin, CachedViewMixin, FieldsExtraMixin, GroupRouterMixin,
BaseModelViewSet):
"""Base class for ViewSets supporting CRUD operations on models."""
class ReadOnlyModelViewSet(
FieldsExtraMixin, GroupRouterMixin, BaseROModelViewSet):
"""Base class for ViewSets supporting read operations on models."""
class ReadUpdateModelViewSet(
PartialPutMixin, CachedViewMixin, FieldsExtraMixin, UpdateModelMixin,
GroupRouterMixin, BaseROModelViewSet):
"""Base class for ViewSets supporting read and update operations."""
pass
#
# 'Regular' viewsets
#
class BrowserBaseViewSet(ModelViewSet):
queryset = Browser.objects.order_by('id')
serializer_class = BrowserSerializer
class FeatureBaseViewSet(ModelViewSet):
queryset = Feature.objects.order_by('id')
serializer_class = FeatureSerializer
class MaturityBaseViewSet(ModelViewSet):
queryset = Maturity.objects.order_by('id')
serializer_class = MaturitySerializer
class ReferenceBaseViewSet(ModelViewSet):
queryset = Reference.objects.order_by('id')
serializer_class = ReferenceSerializer
class SectionBaseViewSet(ModelViewSet):
queryset = Section.objects.order_by('id')
serializer_class = SectionSerializer
class SpecificationBaseViewSet(ModelViewSet):
queryset = Specification.objects.order_by('id')
serializer_class = SpecificationSerializer
class SupportBaseViewSet(ModelViewSet):
queryset = Support.objects.order_by('id')
serializer_class = SupportSerializer
class VersionBaseViewSet(ModelViewSet):
queryset = Version.objects.order_by('id')
serializer_class = VersionSerializer
#
# Change control viewsets
#
class ChangesetBaseViewSet(ModelViewSet):
queryset = Changeset.objects.order_by('id')
serializer_class = ChangesetSerializer
class UserBaseViewSet(CachedViewMixin, ReadOnlyModelViewSet):
queryset = User.objects.order_by('id')
serializer_class = UserSerializer
@list_route()
def me(self, request, **extra_kwargs):
"""Redirect to the authenticated user's resource."""
if request.user.is_anonymous():
raise NotAuthenticated()
else:
kwargs = {'pk': request.user.pk}
kwargs.update(extra_kwargs)
url = reverse('%s:user-detail' % self.namespace, kwargs=kwargs)
return redirect(url)
#
# Historical object viewsets
#
class HistoricalBrowserBaseViewSet(ReadOnlyModelViewSet):
queryset = Browser.history.model.objects.order_by('id')
serializer_class = HistoricalBrowserSerializer
class HistoricalFeatureBaseViewSet(ReadOnlyModelViewSet):
queryset = Feature.history.model.objects.order_by('id')
serializer_class = HistoricalFeatureSerializer
class HistoricalMaturityBaseViewSet(ReadOnlyModelViewSet):
queryset = Maturity.history.model.objects.order_by('id')
serializer_class = HistoricalMaturitySerializer
class HistoricalReferenceBaseViewSet(ReadOnlyModelViewSet):
queryset = Reference.history.model.objects.order_by('id')
serializer_class = HistoricalReferenceSerializer
class HistoricalSectionBaseViewSet(ReadOnlyModelViewSet):
queryset = Section.history.model.objects.order_by('id')
serializer_class = HistoricalSectionSerializer
class HistoricalSpecificationBaseViewSet(ReadOnlyModelViewSet):
queryset = Specification.history.model.objects.order_by('id')
serializer_class = HistoricalSpecificationSerializer
class HistoricalSupportBaseViewSet(ReadOnlyModelViewSet):
queryset = Support.history.model.objects.order_by('id')
serializer_class = HistoricalSupportSerializer
class HistoricalVersionBaseViewSet(ReadOnlyModelViewSet):
queryset = Version.history.model.objects.order_by('id')
serializer_class = HistoricalVersionSerializer
#
# Views
#
class ViewFeaturesBaseViewSet(ReadUpdateModelViewSet):
queryset = Feature.objects.order_by('id')
format_suffixes = ('api', 'json', 'html')
alt_lookup_field = 'slug'
alt_lookup_value_regex = r'[-a-zA-Z0-9_]+'
def get_serializer_class(self):
"""Return the serializer to use based on action and query."""
if self.action == 'list':
return ViewFeatureListSerializer
else:
if self.include_child_pages:
return ViewFeatureSerializer
else:
return ViewFeatureRowChildrenSerializer
def get_serializer_context(self):
"""Add include_child_pages to context."""
context = super(ViewFeaturesBaseViewSet, self).get_serializer_context()
context['include_child_pages'] = self.include_child_pages
return context
@cached_property
def include_child_pages(self):
"""Return True if the response should include paginated child pages.
The default is exclude paginated child pages, and only include row
children that detail the subject feature. This matches the
expectations of most writers - the table on:
/Web/JavaScript/Reference/Global_Objects/Object
should only include a "Basic Support" row, not the 30+ pages under
Object, such as:
/Web/JavaScript/Reference/Global_Objects/Object/toString
However, if they do want a table summarizing the entire page
heirarchy, they can add a query parameter such as:
?child_pages=1
These (and variants with capital letters) are synonyms for the default
of excluding paginated child pages:
?child_pages=0
?child_pages=false
?child_pages=no
?child_pages=NO
and anything else will include them:
?child_pages
?child_pages=yes
?child_pages=Please%20let%20me%20have%20them
"""
if self.action != 'retrieve':
return True
child_pages = self.request.query_params.get('child_pages', '0')
falsy = ('0', 'false', 'no')
return bool(child_pages.lower() not in falsy)
def alternate_lookup(self, request, slug, **extra_kwargs):
"""Lookup features by slug."""
try:
pk = Feature.objects.only('pk').get(slug=slug).pk
except Feature.DoesNotExist:
raise Http404('No feature has the requested slug.')
kwargs = {'pk': pk}
kwargs.update(extra_kwargs)
url = reverse('%s:viewfeatures-detail' % self.namespace, kwargs=kwargs)
return redirect(url)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class EigenToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('eigen')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['eigen'].version
values['PFX'] = spec['eigen'].prefix
fname = 'eigen3.xml'
contents = str("""
<tool name="eigen" version="${VER}">
<client>
<environment name="EIGEN_BASE" default="${PFX}"/>
<environment name="INCLUDE" default="$$EIGEN_BASE/include/eigen3"/>
</client>
<flags CPPDEFINES="EIGEN_DONT_PARALLELIZE"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
import json
import logging
import time
from functools import wraps, partial
from flask import Flask, Response, request
from flask_cors import CORS
from requests import RequestException
from api.douban import DoubanAPI
from api.weibo import WeiboAPI
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(module)s - %(levelname)s - %(message)s')
app = Flask(__name__)
CORS(app, supports_credentials=True)
def _is_ok(value):
return (value is not None) and (value is not False)
def _error_resp(err):
return Response(json.dumps({"ok": False, "error": err, "result": None}), mimetype='application/json')
def _ok_resp(result):
return Response(json.dumps({"ok": _is_ok(result), "result": result}), mimetype='application/json')
PARAMS_ERROR_RESP = _error_resp("Invalid parameter.")
def api_wrapper(f=None, *, err_resp=None):
if f is None:
return partial(api_wrapper, err_resp=err_resp)
if err_resp is None:
err_resp = Response('{"ok": false, "error": "some internal error.", "result": []}', mimetype='application/json')
@wraps(f)
def decorated_function(*args, **kwargs):
try:
start = time.time()
resp = f(*args, **kwargs)
cost = (time.time() - start) * 1000
logging.info('%s cost %.3f millseconds', f.__name__, cost)
return resp
except Exception:
logging.exception('%s failed.', f.__name__)
return err_resp
return decorated_function
@app.route('/hz')
@api_wrapper
def is_alive():
"""
存活确认。
"""
return 'well'
@app.route("/weibo/user/info", methods=["GET"])
@api_wrapper
def weibo_user_info():
data = request.values
uid = data['uid']
raw = data.get("raw", False)
if uid:
try:
result = WeiboAPI.get_user_info(uid, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route("/weibo/user/following", methods=["GET"])
@api_wrapper
def weibo_user_following():
data = request.values
uid = data['uid']
if uid:
try:
result = WeiboAPI.get_user_following(uid)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/user/blogs', methods=['GET'])
@api_wrapper
def weibo_user_blogs():
data = request.values
uid = data['uid']
raw = data.get("raw", False)
if uid:
try:
result = WeiboAPI.get_user_blogs(uid, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/user/likes', methods=['GET'])
@api_wrapper
def weibo_user_likes():
data = request.values
uid = data['uid']
raw = data.get("raw", False)
if uid:
try:
result = WeiboAPI.get_user_likes(uid, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/blog/detail', methods=['GET'])
@api_wrapper
def weibo_blog_detail():
data = request.values
weibo_id = data['id']
raw = data.get("raw", False)
if weibo_id:
try:
result = WeiboAPI.get_blog_detail(weibo_id, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/blog/comments', methods=['GET'])
@api_wrapper
def weibo_blog_comments():
data = request.values
weibo_id = data['id']
raw = data.get("raw", False)
if weibo_id:
try:
result = WeiboAPI.get_blog_comments(weibo_id, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/blog/likes', methods=['GET'])
@api_wrapper
def weibo_blog_likes():
data = request.values
weibo_id = data['id']
raw = data.get("raw", False)
if weibo_id:
try:
result = WeiboAPI.get_blog_likes(weibo_id, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/weibo/blog/reposts', methods=['GET'])
@api_wrapper
def weibo_blog_reposts():
data = request.values
weibo_id = data['id']
raw = data.get("raw", False)
if weibo_id:
try:
result = WeiboAPI.get_blog_reposts(weibo_id, raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/douban/user/info', methods=['GET'])
@api_wrapper
def douban_user_info():
data = request.values
uid = data['uid']
if uid:
try:
result = DoubanAPI.get_user_info(uid)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
@app.route('/douban/user/life', methods=['GET'])
@api_wrapper
def douban_user_life():
data = request.values
uid = data['uid']
raw = data.get("raw", False)
if uid:
try:
result = DoubanAPI.get_life_stream(uid, raw=raw)
return _ok_resp(result)
except RequestException as e:
return _error_resp(e.args[0])
return PARAMS_ERROR_RESP
|
__author__ = 'Elisabetta Ronchieri'
import unittest
from tstorm.tests.atomic import atomics
from tstorm.tests import utilities
def ts_dcache_ping(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_dcache_ping',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_ping(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_storm_ping',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_ping_wo(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_storm_ping_wo',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_gtp(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_storm_gtp',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_gtp_wo(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_storm_gtp_wo',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_lcg_ls_unexist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_lcg_ls_unexist_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_dcache_mkdir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_dcache_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_dcache_mkdir_exist(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_dcache_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_dcache_rm_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_dcache_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_lcg_ls_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(atomics.AtomicsTest('test_dcache_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_lcg_cp_out(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_lcg_ls_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_dcache_rm_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_dcache_rm_unexist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
#s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
#s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, lfn))
return s
def ts_dcache_rm_unexist_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
#s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_unexist_dir',conf, ifn, dfn, bifn, uid, lfn))
#s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, lfn))
return s
def ts_lcg_cp_in(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_in',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_dcache_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
|
from __future__ import division
from collections import defaultdict
from math import *
from random import sample
import csv
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
class BaseAlgorithm():
#def __init__(self):
# self.update_data()
def update_data(self,a,b,c):
filename = "data3.csv"
"""
self.cities = []
#self.size = len(self.cities)
self.coords = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
self.coords.append([float(row[0]),float(row[1])])
self.cities = range(0,len(self.coords))
"""
self.cities=[]
self.coords=[[-a/2,-a/2],[-a/2,a/2],[a/2,a/2],[a/2,-a/2]]
for i in range(1,int(ceil(a/c))):
self.coords.append([0-a/2,c*i-a/2])
self.coords.append([c*i-a/2,0-a/2])
self.coords.append([c*i-a/2,a/2])
self.coords.append([a/2,c*i-a/2])
self.denum = len(self.coords)
print self.coords,self.denum
#random.shuffle(self.coords)
for i in range(0,int((a/b))):
for j in range(0,int((a/b))):
self.coords.append([-a/2+b/2+(b*j),a/2-b/2-(b*i)])
#print self.coords,len(self.coords)
self.cities=range(0,len(self.coords))
#random.shuffle(self.coords[7:])
print self.coords,len(self.coords)
self.size = len(self.cities)
self.distances = self.compute_distances()
def haversine_distance(self, cityA, cityB):
coord1 = self.coords[cityA]
coord2= self.coords[cityB]
a = (coord1[0]-coord2[0])**2+(coord1[1]-coord2[1])**2
c = sqrt(a)
return c
def compute_distances(self):
self.distances = defaultdict(dict)
for cityA in self.cities:
for cityB in self.cities:
if cityB not in self.distances[cityA]:
distance = self.haversine_distance(cityA, cityB)
self.distances[cityA][cityB] = distance
self.distances[cityB][cityA] = distance
return self.distances
# add node k between node i and node j
def add(self, i, j, k):
return self.distances[i][k] + self.distances[k][j] - self.distances[i][j]
class TourConstructionHeuristics(BaseAlgorithm):
# find the neighbor k closest to the tour, i.e such that
# cik + ckj - cij is minimized with (i, j) an edge of the tour
# add k between the edge (i, j), resulting in a tour with subtour (i, k, j)
# used for the cheapest insertion algorithm
def __init__(self,dist,grid,comm,Q):
self.update_data(dist,grid,comm)
self.Q=Q
self.comm=comm
self.sqr=dist
self.grid=grid
##print self.cities
def closest_neighbor(self, tour, node, in_tour=False, farthest=False):
neighbors = self.distances[node]
##print node
##print neighbors.items()
##print tour
current_dist = [(c, d) for c, d in neighbors.items()
if (c in tour)]
return sorted(current_dist, key=itemgetter(1))[-farthest]
def add_closest_to_tour(self, tours,tourslength,unass,depot_dist):
#print tours,tourslength,depot_dist
best_ratio,best_dist, new_tour = float('inf'),float('inf'), None
##print vehlengths
##print veh
##print tourslength
##print tours
t=1
tour_index = None
city1 = None
c=0.55
d=0.45
for city in unass:
##print city
for tour in tours:
a=abs(self.coords[tour[0]][0]-self.coords[city][0])
b=abs(self.coords[tour[0]][1]-self.coords[city][1])
x=abs(self.coords[tour[-1]][0]-self.coords[city][0])
y=abs(self.coords[tour[-1]][1]-self.coords[city][1])
##print tour
for index in range(len(tour) - 1):
dist = self.add(tour[index], tour[index + 1], city)
#print unass
##print dist
##print vehlengths[tours.index(tour)]
if len(tour)!=2:
ratio = c*dist+d*depot_dist[tours.index(tour)]
else:
ratio = c*(dist+tourslength[tours.index(tour)])+d*depot_dist[tours.index(tour)]
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<self.Q and a<=self.comm and b<=self.comm and x<=self.comm and y<=self.comm:
best_dist = dist
best_ratio = ratio
new_tour = tour[:index + 1] + [city] + tour[index + 1:]
tour_index = tours.index(tour)
city1 = city
##print best_dist
##print city1
return best_dist, new_tour, tour_index,city1
##print unass
def samedis(self,tours,tourslength,veh,vehlength):
c=0.5
d=0.5
t=1
for tour1 in tours:
if len(tour1)!=2:
i=1
while (i<len(tour1)-1):
##print(len(tour1))
##print("!@#!")
for j in range(0,t):
if tour1 in veh[j]:
o=j
p=veh[j].index(tour1)
b=vehlength[j]
##print veh ##print b,"s"
best_dist = self.add(tour1[i-1], tour1[i+1], tour1[i])
h=best_dist
best_ratio = c*best_dist + d*(b)
#print best_dist,best_ratio,"sss"
#print "ddd"
##print("!!!!")
for tour in tours:
for j in range(0,t):
if tour in veh[j]:
a=vehlength[j]
w=j
s=veh[j].index(tour)
##print("******")
if tour != tour1 and len(tour)!=2 :
##print a
for index in range(len(tour) - 1):
##print tour
##print index
dist = self.add(tour[index], tour[index + 1], tour1[i])
#print dist
##print dist
ratio = c*dist + d*(a+dist)
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<self.Q:
best_dist = dist
w1=w
s1=s
new_tour = tour[:index + 1] + [tour1[i]] + tour[index + 1:]
tour_index = tours.index(tour)
best_ratio = c*best_dist + d*(a+dist)
#print best_dist,"fff"
#print new_tour,best_ratio
#print c*best_dist + d*(b)
if best_ratio < c*h + d*(b):
##print veh
tours[tour_index]=new_tour
#print tours[tour_index]
tourslength[tour_index]+= best_dist
veh[w1][s1]=new_tour
vehlength[w1]+=best_dist
##print veh
tourslength[tours.index(tour1)]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
##print o,i
##print vehlength[o]
vehlength[o]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
#print veh
#print tour1
veh[o][p].remove(tour1[i])
#print tour1
#print veh
if (len(tour1)==2):
vehlength[o]-=self.distances[tour1[0]][tour1[1]]
#tour1.remove(tour1[i])
#print veh
else:
i+=1
##print self.distances ##print(i)
return tours, tourslength,veh,vehlength
def perp_dist(self,a,b):
dist = abs(self.coords[a][0]-self.coords[b][0])+abs(self.coords[a][1]-self.coords[b][1])
return dist
def depot_array(self,denum,unass):
depot=[]
x=self.comm
for i in range(0,len(unass)):
depot.append([])
for i in range(0,denum):
for f in range(0,len(unass)):
k=0
for j in unass[f]:
if abs(self.coords[i][0]-self.coords[j][0])<=x and abs(self.coords[i][1]-self.coords[j][1])<=x:
k+=1
print(j,i,"ss",k)
if k==len(unass[f]):
depot[f].append(i)
return depot
def depot_path(self,depot1,depot2,depot3,depot4):
depot_path=[]
depot_insec=[]
depot=[depot1,depot2,depot3,depot4]
for i in range(0,len(depot)-1):
b=float('inf')
for j in range(0,len(depot[i])):
for k in range(0,len(depot[i+1])):
a=self.perp_dist(depot[i][j],depot[i+1][k])
if a==0:
g=depot[i][j]
elif a<b:
b=a
f=depot[i][j]
if i==0:
depot_path.append(g)
depot_insec.append(g)
if i!=0:
depot_path.append(f)
depot_path.append(g)
depot_insec.append(g)
return depot_path,depot_insec
def closest_city(self,unass,city):
best_dist=float('inf')
closest=None
for x in unass:
dist=self.distances[city][x]
#print dist,x
if dist<best_dist:
best_dist=dist
#print "s"
closest=x
return closest,best_dist
def intersection(self,a,b):
for i in a:
for j in b:
if i==j:
x=i
return x
def cheapest_insertion(self):
denum=self.denum
comm=self.comm
dist=self.sqr
grid=self.grid
#c=1
g=None
##print a
#tour = [0,a]
c=[]
d=[]
depot_dist=[]
gr=4
depot_insec=[]
total_len=0
prevcdep=None
same_y=[]
l=len(self.coords)
tourslength=[]
tours=[]
unass2=[]
#tourslength.append(0)
qqq=[]
ppp= self.cities[denum:l]
for i in range(0,int (ceil(dist/comm)**2)):
unass2.append([])
for i in range(0,int(ceil(dist/comm))):
if i%2==0:
for j in range(0,int(ceil(dist/comm))):
qqq.append([i,j])
else:
for j in range(0,int (ceil(dist/comm))):
qqq.append([i,int (ceil(dist/comm))-j-1])
print qqq
for k in self.cities[denum:l]:
a=self.coords[k][0]
b=self.coords[k][1]
#print k,a,b
for i in range(0,int(ceil(dist/comm))):
for j in range(0,int(ceil(dist/comm))):
print k,a,b,comm*i-(dist/2), comm*(i+1)-(dist/2)
if b>=comm*i-(dist/2) and b<=comm*(i+1)-(dist/2):
if a>=comm*j-(dist/2) and a<=comm*(j+1)-(dist/2):
if k in ppp:
#print "s"
unass2[qqq.index([i,j])].append(k)
ppp.remove(k)
print unass2
print ppp
#print unass4,"4"
#unass2=[unass1,unass22,unass3,unass4]
depot=self.depot_array(denum,unass2)
depot1=depot[0]
depot2=depot[1]
depot3=depot[2]
depot4=depot[3]
#depot=[depot1,depot2,depot3,depot4]
print depot
depot_insec.append(self.intersection(depot1,depot2))
depot_insec.append(self.intersection(depot2,depot3))
depot_insec.append(self.intersection(depot3,depot4))
depot_insec.append(self.intersection(depot4,depot1))
dep=[]
for i in depot:
dep.append([])
for j in i:
for k in i:
dep[-1].append([j,k])
tours.append([j,k])
c.append(self.distances[k][depot_insec[depot.index(i)]])
d.append(self.distances[k][depot_insec[depot.index(i)-1]])
tourslength.append(self.distances[j][k])
unass=self.cities[denum:l]
tours2=tours[:]
for i in range(0,len(tours)):
depot_dist.append(min(c[i],d[i]))
f=0
while len(unass)!=0:
length, tour, index, city1 = self.add_closest_to_tour(tours,tourslength,unass,depot_dist)
print tours,depot_dist,index,len(unass)
if f==0 :
for i in depot:
if tour[0] in i and tour[-1] in i:
x=depot_insec[depot.index(i)]
y=depot_insec[depot.index(i)-1]
break
if tour[-1] not in depot_insec:
if c[index]<d[index]:
depot_dist=c[:]
#print c
p=x
g=True
else:
depot_dist=d[:]
p=y
g=False
tours[index]=tour
tourslength[index]+=length
unass.remove(city1)
a=tourslength
o=depot_dist
##print tours
i=0
while i<len(tours):
tour2=tours[i]
##print tour2
i+=1
if (len(tour2)==2):
del depot_dist[tours.index(tour2)]
#print c
tourslength.remove(a[tours.index(tour2)])
tours.remove(tour2)
i-=1
#print c
#print d
if c[index]<d[index]:
p=x
tours.append([tour[-1],p])
depot_dist.append(c[tours2.index(tours[-1])])
tourslength.append(self.distances[tour[-1]][p])
tours.append([tour[-1],tour[-1]])
tourslength.append(self.distances[tour[-1]][tour[-1]])
depot_dist.append(c[tours2.index(tours[-1])])
else:
p=y
tours.append([tour[-1],p])
tourslength.append(self.distances[tour[-1]][p])
depot_dist.append(d[tours2.index(tours[-1])])
tours.append([tour[-1],tour[-1]])
tourslength.append(self.distances[tour[-1]][tour[-1]])
depot_dist.append(d[tours2.index(tours[-1])])
else:
for i in unass2:
if tour[1] in i:
unindex=unass2.index(i)
#print unindex,unass2[unindex]
if unindex==depot_insec.index(tour[-1]):
depot_dist=c[:]
g = True
p=x
else:
depot_dist=d[:]
g = False
p=y
tours[index]=tour
tourslength[index]+=length
unass.remove(city1)
a=tourslength
o=depot_dist
##print tours
i=0
while i<len(tours):
tour2=tours[i]
##print tour2
i+=1
if (len(tour2)==2):
del depot_dist[tours.index(tour2)]
#print c
tourslength.remove(a[tours.index(tour2)])
tours.remove(tour2)
i-=1
"""
if g is True:
p=x
tours.append([tour[-1],p])
depot_dist.append(c[tours2.index(tours[-1])])
tourslength.append(self.distances[tour[-1]][p])
tours.append([tour[-1],tour[-1]])
tourslength.append(self.distances[tour[-1]][tour[-1]])
depot_dist.append(c[tours2.index(tours[-1])])
else:
p=y
tours.append([tour[-1],p])
tourslength.append(self.distances[tour[-1]][p])
depot_dist.append(d[tours2.index(tours[-1])])
tours.append([tour[-1],tour[-1]])
tourslength.append(self.distances[tour[-1]][tour[-1]])
depot_dist.append(d[tours2.index(tours[-1])])
#print depot_dist
"""
if g is True:
for i in range(0,len(depot_dist)):
depot_dist[i]=self.distances[tours[i][-1]][depot_insec[unindex+1]]
j=tour[-1]
for k in depot[unindex+1]:
tours.append([j,k])
tourslength.append(self.distances[j][k])
depot_dist.append(self.distances[k][depot_insec[unindex+1]])
if g is False:
for i in range(0,len(depot_dist)):
depot_dist[i]=self.distances[tours[i][-1]][depot_insec[unindex-2]]
j=tour[-1]
for k in depot[unindex-1]:
tours.append([j,k])
tourslength.append(self.distances[j][k])
depot_dist.append(self.distances[k][depot_insec[unindex-2]])
#print depot_dist,tours,g
if index!=None and f>0 :
#print tours
tours[index]=tour
tourslength[index]+=length
if len(tour)==3:
if tours[-1]!=tour:
tours.insert(index+1,[tour[0],tour[-1]])
depot_dist.insert(index+1,depot_dist[index])
tourslength.insert(index+1,self.distances[tour[0]][tour[-1]])
c=tour[0]
d=tour[-1]
print c,d
for j in depot:
if c in j and d in j :
u=depot.index(j)
break
print depot[u]
for i in depot[u]:
if [c,i] in tours and i!=c:
del tourslength[tours.index([c,i])]
del depot_dist[tours.index([c,i])]
#print vehlengths
tours.remove([c,i])
unass.remove(city1)
jj=0
if index==None:
jj+=1
if jj==2:
break
a=tourslength
o=depot_dist
##print tours
i=0
while i<len(tours):
tour2=tours[i]
##print tour2
i+=1
if (len(tour2)==2):
del depot_dist[tours.index(tour2)]
tourslength.remove(a[tours.index(tour2)])
tours.remove(tour2)
i-=1
if g is True:
for i in tours:
if i[0]!=i[-1]:
b=i[-1]
for h in depot_insec:
if b==h:
q=depot_insec.index(h)
break
f=q+1
if q==3:
f=0
for k in depot[f]:
tours.append([b,k])
tourslength.append(self.distances[k][b])
depot_dist.append(self.distances[k][depot_insec[f]])
if g is False:
for i in tours:
if i[0]!=i[-1]:
b=i[-1]
for h in depot_insec:
if b==h:
q=depot_insec.index(h)
break
for k in depot[q]:
tours.append([b,k])
tourslength.append(self.distances[k][b])
depot_dist.append(self.distances[k][depot_insec[q-1]])
f+=2
for i in tours:
if len(i)==2:
del tourslength[tours.index(i)]
tours.remove(i)
total=0
totaltour=0
for i in range(0,len(tourslength)):
totaltour+=tourslength[i]
total=totaltour
for i in range(0,len(tours)-1):
if tours[i+1][0]==tours[i][-1]:
total+=self.perp_dist(tours[i][0],tours[i][-1])
xx=[]
yy=[]
for i in self.cities:
xx.append(self.coords[i][0]+dist/2)
yy.append(self.coords[i][1]+dist/2)
return tours,tourslength,totaltour,total,depot_dist,xx,yy
def plot (self,tours):
b = ['r','b','g','c']
j=0
for tour in tours:
if len(tour)!=2:
for i in range (0,len(tour)-1):
if i != len(self.coords)-1:
plt.plot([self.coords[tour[i]][0], self.coords[tour[i+1]][0]],[self.coords[tour[i]][1],self.coords[tour[i+1]][1]], b[j])
#plt.show(block=False)
if j<3:
j+=1
else:
j=0
x=[]
y=[]
c=['bs','rs','gs','cs','ms']
for i in range(0,len(self.coords)):
x.append(self.coords[i][0])
y.append(self.coords[i][1])
plt.plot(self.coords[i][0],self.coords[i][1],'rs')
#plt.show()
#r= BaseAlgorithm()
xxx= 'QGC WPL 110\r\n'
import utm
#from math import *
import numpy as np
file = open("mission.txt","r")
a=file.readlines()
file.close()
lat=[]
lon=[]
#xx+=a[2]
if a[1][1]=='\t':
print "s"
j=0
print a
index=None
for k in a:
if a.index(k)!=0:
j=0
lat1='s'
lon1='s'
for i in range (0,len(k)):
if k[i]=='\t':
j+=1
print j
if j==8:
index=i
break
for i in range(index+1,len(k)):
if k[i]=='\t':
index=i
break
lat1+=k[i]
for i in range(index+1,len(k)):
if k[i]=='\t':
#index=i
break
lon1+=k[i]
print k
print index
lat.append(float(lat1[1:]))
lon.append(float(lon1[1:]))
print lat
print lon
e2,n2,aa,bb = utm.from_latlon(lat[1],lon[1])
e1,n1,_,_ = utm.from_latlon(lat[0],lon[0])
angle= atan2(n2-n1,e2-e1)
dist=np.hypot(e2-e1,n2-n1)
def takeoff(lat,lon):
return '\t0\t3\t22\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def waypoint(lat,lon):
return '\t0\t3\t16\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def land(lat,lon):
return '\t0\t3\t21\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def utm1(x,y,e1,n1,angle):
x1=x*cos(angle)-y*sin(angle)
y1=x*sin(angle)+y*cos(angle)
x1+=e1
y1+=n1
#print x1,y1
lat,lon= utm.to_latlon(x1,y1,aa,bb)
return lat,lon
print dist
x= TourConstructionHeuristics(dist=dist,grid=20,comm=dist/2,Q=300)
tours,lengths,totaltour,total,depot_dist,xx,yy=x.cheapest_insertion()
print tours
print lengths
print totaltour
print total
print depot_dist
x.plot(tours)
plt.show()
totaltour=[totaltour]
veh=[tours]
tours1, lengths1,veh1,vehlength1 = x.samedis(tours,lengths,veh,totaltour)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','c','m','y']
for i in range(0,1):
x.plot(veh1[i])
plt.show()
tours1, lengths1,veh1,vehlength1 = x.samedis(tours1,lengths1,veh1,vehlength1)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','c','m','y']
for i in range(0,1):
x.plot(veh1[i])
plt.show()
k=0
for i in tours1:
for j in range(0,len(i)):
if j==0:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+takeoff(lat,lon)
k+=1
elif j== len(i)-1:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+land(lat,lon)
k+=1
else:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+waypoint(lat,lon)
k+=1
file=open("mission1.txt","w")
file.write(xxx)
file.close()
"""
tours,lengths,total=x.greedy2()
print tours
print lengths
print total
total=[total]
veh=[tours]
x.plot(tours)
plt.show()
tours1, lengths1,veh1,vehlength1 = x.samedis(tours,lengths,veh,total)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','c','m','y']
for i in range(0,1):
x.plot(veh1[i])
plt.show()
"""
"""mission planer
qground control
dronecode
for i in range(0,denum):
for j in range(0,denum):
if ([i,j]in tours or [j,i]in tours):
continue
else:
tours.append([i,j])
tourslength.append(self.distances[i][j])
vehlengths.append(0)"""
|
import re as regex
from PyInquirer import style_from_dict, Token, ValidationError, Validator
CUSTOM_STYLE = style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Answer: '#5F819D bold',
})
STATES_LIST = ['AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL',
'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA',
'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND',
'MP', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT',
'VI', 'VA', 'WA', 'WV', 'WI', 'WY']
class PhoneNumberValidator(Validator):
def validate(self, document):
ok = regex.match(
r'^([01]{1})?[-.\s]?\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4})\s?((?:#|ext\.?\s?|x\.?\s?){1}(?:\d+)?)?$', document.text)
if not ok:
raise ValidationError(
message='Please enter a valid phone number',
cursor_position=len(document.text)) # Move cursor to end
class EmailValidator(Validator):
def validate(self, document):
ok = "@" in document.text
if not ok:
raise ValidationError(
message='Please enter a valid email',
cursor_position=len(document.text)) # Move cursor to end
class NotBlankValidator(Validator):
def validate(self, document):
ok = len(document.text) > 0
if not ok:
raise ValidationError(
message='Please do not enter a null field.',
cursor_position=len(document.text)) # Move cursor to end
class ZipCodeValidator(Validator):
def validate(self, document):
ok = regex.match(r'^[\d]{5}(?:-[\d]{4})?$', document.text)
if not ok:
raise ValidationError(
message='Please do not enter a null field.',
cursor_position=len(document.text)) # Move cursor to end
class StateValidator(Validator):
def validate(self, document):
ok = document.text in STATES_LIST
if not ok:
raise ValidationError(
message='Please do not enter a null field.',
cursor_position=len(document.text)) # Move cursor to end
LOAD_SETTINGS = [
{
'type': 'confirm',
'name': 'useSaved',
'message': 'Do you have settings saved here?'
},
{
'type': 'input',
'name': 'loadFile',
'message': 'Enter a filename to load settings from:',
'when': lambda answers: answers['useSaved'] == True,
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'quantity',
'message': 'How many times do you want to run the script?',
'validate': NotBlankValidator
}
]
QUESTIONS = [
{
'type': 'list',
'name': 'cardProvider',
'message': 'Export Privacy, Stripe or your own cards?',
'choices': ['Privacy', 'Stripe', 'Own'],
'filter': lambda val: val.lower()
},
{
'type': 'input',
'name': 'ownImport',
'message': 'What is the filename of the csv import?',
'when': lambda answers: answers['cardProvider'] == 'own'
},
{
'type': 'input',
'name': 'privacyEmail',
'message': 'What is your Privacy email?',
'validate': EmailValidator,
'when': lambda answers: answers['cardProvider'] == 'privacy'
},
{
'type': 'password',
'name': 'privacyPassword',
'message': 'What is your Privacy password?',
'when': lambda answers: answers['cardProvider'] == 'privacy',
'validate': NotBlankValidator
},
{
'type': 'list',
'name': 'privacyUnused',
'message': 'Would you like to get only unused Privacy cards, or all?',
'choices': ['Unused', 'All'],
'filter': lambda val: val.lower(),
'when': lambda answers: answers['cardProvider'] == 'privacy'
},
{
'type': 'input',
'name': 'stripeToken',
'message': 'What is your Stripe secret token?',
'when': lambda answers: answers['cardProvider'] == 'stripe',
'validate': NotBlankValidator
},
{
'type': 'list',
'name': 'stripeNewCards',
'message': 'Would you like to create new Stripe cards, or just get the existing ones?',
'choices': ['New', 'Preexisting'],
'filter': lambda val: val.lower(),
'when': lambda answers: answers['cardProvider'] == 'stripe'
},
{
'type': 'input',
'name': 'stripeCardholderPreexisting',
'message': 'Enter a cardholder id for the cards you want to get. (or leave blank for all)',
'when': lambda answers: answers.get('stripeNewCards', "") == 'preexisting'
},
{
'type': 'input',
'name': 'stripeValue',
'message': 'How many Stripe cards would you like to create?',
'when': lambda answers: answers.get('stripeNewCards', "") == 'new',
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'stripeCardholder',
'message': 'Enter a cardholder id which you would like to create new cards under. (or leave blank for new cardholders)',
'when': lambda answers: answers.get('stripeNewCards', "") == 'new'
},
{
'type': 'list',
'name': 'export',
'message': 'Which style would you like the cards exported in?',
'choices': ['EzMode', 'Wop', 'Hayha'],
'filter': lambda val: val.lower()
},
{
'type': 'input',
'name': 'wopWebhook',
'message': 'Enter a webhook to use for tasks (or leave blank)',
'when': lambda answers: answers.get('export', "") == 'wop'
},
{
'type': 'input',
'name': 'wopProxy',
'message': 'Enter a proxy to use for tasks (or leave blank)',
'when': lambda answers: answers.get('export', "") == 'wop'
},
{
'type': 'input',
'name': 'hayhaGroupID',
'message': 'Enter a group ID from the Hayha Export',
'when': lambda answers: answers.get('export', "") == 'hayha'
},
{
'type': 'input',
'name': 'email',
'message': 'Enter an email (or catchall with @catchall.com) to use for creating profiles.',
'validate': EmailValidator
},
{
'type': 'input',
'name': 'emailPrefix',
'message': 'Enter an email prefix for your catchall, otherwise leave blank and one will be generated randomly.',
'when': lambda answers: answers['email'][0] == "@"
},
{
'type': 'confirm',
'name': 'addressJig',
'message': 'Do you need address line 1 jigging?',
},
{
'type': 'confirm',
'name': 'addressJig2',
'message': 'Do you need address line 2 jigging?',
},
{
'type': 'confirm',
'name': 'phoneJig',
'message': 'Do you need phone jigging?'
},
{
'type': 'input',
'name': 'firstNames',
'message': 'Enter a comma separated list of first names to use for jigging. (alternatively, enter 1 name for no jigging.)',
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'lastNames',
'message': 'Enter a comma separated list of last names to use for jigging. (alternatively, enter 1 name for no jigging.)',
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'addressLine1',
'message': 'Enter line 1 of an address to use for creating profiles.',
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'addressLine2',
'message': 'Enter line 2 of an address to use for creating profiles. (Leave blank for none)',
},
{
'type': 'input',
'name': 'city',
'message': 'Enter a city to use for profiles.',
'validate': NotBlankValidator
},
{
'type': 'input',
'name': 'state',
'message': 'Enter a state to use for profiles (two letter code)',
'validate': StateValidator
},
{
'type': 'input',
'name': 'zipCode',
'message': 'Enter a zip code to use for profiles.',
'validate': ZipCodeValidator
},
{
'type': 'input',
'name': 'phoneNumber',
'message': 'Enter a phone number to use for profiles.',
'validate': PhoneNumberValidator
}
]
CHECK_SETTINGS = [
{
'type': 'confirm',
'name': 'settingsOkay',
'message': 'Do these settings look good?',
}
]
SAVE_SETTINGS = [
{
'type': 'confirm',
'name': 'saveSettings',
'message': 'Would you like to save these settings for future use?',
},
{
'type': 'input',
'name': 'saveFile',
'message': 'Enter a filename to save settings to:',
'when': lambda answers: answers['saveSettings'] == True
}
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
random.seed(42)
import numpy as np
np.random.seed(42)
import matplotlib.pyplot as plt
import cvxpy as cp
from sklearn_lvq import GmlvqModel, LgmlvqModel
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from plotting import plot_distmat, plot_distmats_boxplot
def encode_labels(y_test, y_pred):
enc = OneHotEncoder(categories="auto")
enc.fit(y_test)
return enc.transform(y_test).toarray(), enc.transform(y_pred).toarray()
def compute_change_in_distmat_gmlvq(model, x_orig, y_target):
# Compute change of metric
Omega = np.dot(model.omega_.T, model.omega_)
o_new = None
o_new_dist = float("inf")
n_dim = x_orig.shape[0]
X = cp.Variable((n_dim, n_dim), PSD=True)
# Search for suitable prototypes
target_prototypes = []
other_prototypes = []
for p, l in zip(model.w_, model.c_w_):
if l == y_target:
target_prototypes.append(p)
else:
other_prototypes.append(p)
# For each target prototype: Construct a mathematical program
for p_i in target_prototypes:
# Build constraints
constraints = []
for p_j in other_prototypes:
constraints.append(-2. * x_orig.T @ X @ (p_i - p_j) + p_i.T @ X @ p_i - p_j.T @ X @ p_j <= 0)
# Build costfunction
f = cp.Minimize(cp.norm1(X - Omega))
#f = cp.Minimize(cp.norm2(X - Omega))
prob = cp.Problem(f, constraints)
# Solve problem
prob.solve(solver=cp.SCS, verbose=False)
Omega_new = X.value
# Validate distance matrix
y_pred = None
min_dist = float("inf")
for p, l in zip(model.w_, model.c_w_):
d = np.dot((p - x_orig), np.dot(Omega_new, (p - x_orig)))
if d < min_dist:
min_dist = d
y_pred = l
if y_pred == y_target:
d = np.linalg.norm(Omega - Omega_new, 1)
if d < o_new_dist:
o_new_dist = d
o_new = Omega_new
if o_new is not None:
return [o_new]
else:
#print("Did not find a counterfactual metric")
return []
def compute_change_in_distmat_lgmlvq(model, x_orig, y_target):
# Compute change of metric
epsilon = 1e-5
Omegas = []
n_dim = x_orig.shape[0]
X = cp.Variable((n_dim, n_dim), PSD=True)
# Search for suitable prototypes
model_omegas = [np.dot(o.T, o) for o in model.omegas_]
target_prototypes = []
target_omegas = []
other_prototypes = []
other_omegas = []
for p, l, o in zip(model.w_, model.c_w_, model_omegas):
if l == y_target:
target_prototypes.append(p)
target_omegas.append(o)
else:
other_prototypes.append(p)
other_omegas.append(o)
# For each target prototype: Construct a mathematical program
for p_i, o_i in zip(target_prototypes, target_omegas):
# Build constraints
constraints = []
for p_j, o_j in zip(other_prototypes, other_omegas):
constraints.append(x_orig @ X @ x_orig - 2. * x_orig.T @ X @ p_i + p_i.T @ X @ p_i - x_orig.T @ o_j @ x_orig + 2. * x_orig.T @ o_j @ p_j - p_j.T @ o_j @ p_j + epsilon <= 0)
# Build costfunction
f = cp.Minimize(cp.norm1(X - o_i))
#f = cp.Minimize(cp.norm2(X - o_i))
prob = cp.Problem(f, constraints)
# Solve problem
prob.solve(solver=cp.MOSEK, verbose=False)
Omega_new = X.value
# Validate distance matrix
y_pred = None
min_dist = float("inf")
for p, l, o in zip(model.w_, model.c_w_, model_omegas):
d = np.dot((p - x_orig), np.dot(o, (p - x_orig)))
if np.array_equal(p, p_i):
d = np.dot((p - x_orig), np.dot(Omega_new, (p - x_orig)))
if d < min_dist:
min_dist = d
y_pred = l
print("Prediction under new distance matrix: {0}".format(y_pred))
if y_pred == y_target:
Omegas.append((Omega_new, o_i))
# Plot matrices
for o_new, o in Omegas:
print(o)
print(o_new)
print("L1-distance to original matrix: {0}".format(np.linalg.norm(o - o_new, 1)))
plot_distmat(o - o_new)
if __name__ == "__main__":
###############################################################################################################################################
# TOY-DATASET: An unimportant feature becomes important!
# Create data set
n_samples = 50
X = np.hstack((np.random.uniform(0, 5, n_samples).reshape(-1, 1), np.array([0 for _ in range(n_samples)]).reshape(-1, 1)))
y = [0 for _ in range(n_samples)]
X = np.vstack((X, np.hstack((np.random.uniform(7, 12, n_samples).reshape(-1, 1), np.array([5 for _ in range(n_samples)]).reshape(-1, 1)))))
y += [1 for _ in range(n_samples)]
y = np.array(y)
from plotting import plot_classification_dataset, export_as_png
plot_classification_dataset(X, y, show=False)
export_as_png("toydata.png")
# Fit model
model = GmlvqModel(prototypes_per_class=1, random_state=4242)
model.fit(X, y)
# Evaluate
y_pred = model.predict(X)
y_, y_pred_ = encode_labels(y.reshape(-1, 1), y_pred.reshape(-1, 1))
print("ROC-AUC: {0}".format(roc_auc_score(y_, y_pred_, average="weighted")))
print("Omega\n{0}".format(np.dot(model.omega_.T, model.omega_)))
print()
# Compute counterfactual metric
x_orig = np.array([10.0, 0])
y_target = 1
Omega_cf = compute_change_in_distmat_gmlvq(model, x_orig, y_target)[0]
print("Omega_cf\n{0}".format(Omega_cf))
plot_distmat(np.abs(np.dot(model.omega_.T, model.omega_)), show=False)
export_as_png("omega.png")
plot_distmat(np.abs(Omega_cf), show=False)
export_as_png("omegacf.png")
#plot_distmats_boxplot(np.abs(np.dot(model.omega_.T, model.omega_)))
#plot_distmats_boxplot(np.abs(Omega_cf))
##################################################################################################################################################
|
site_configuration = {
'systems': [
{
'name': 'dt',
'descr': 'Dardel test nodes',
'hostnames': ['dt\d.pdc.kth.se'],
'modules_system': 'lmod',
'partitions': [
{
'name': 'cpu',
'descr': 'CPU on dt0',
'scheduler': 'squeue',
'launcher': 'srun',
'access': ['-w dt0', '--exclusive'],
'environs': ['PrgEnv-cray'],
'processor': {
'num_cpus': 256,
'num_cpus_per_core': 2,
'num_cpus_per_socket': 128,
'num_sockets': 2
}
},
{
'name': 'gpu',
'descr': 'GPUs on dt2',
'scheduler': 'squeue',
'launcher': 'srun',
'access': ['-w dt2', '--exclusive'],
'environs': ['PrgEnv-cray'],
'modules': ['rocm/rocm', 'craype-accel-amd-gfx908'],
'devices': [
{
'type': 'gpu',
'arch': 'amd',
'num_devices': 2
}
],
'extras': {
'select_device': './rocm_select_gpu_device'
},
'variables': [
['MPICH_GPU_SUPPORT_ENABLED', '1']
]
}
],
'prefix': '/dt1/${USER}/reframe'
},
{
'name': 'github-actions',
'descr': 'Github Actions runner',
'hostnames': ['*'],
'partitions': [
{
'name': 'cpu',
'scheduler': 'local',
'launcher': 'mpirun',
'max_jobs': 1,
'processor': {
'num_cpus': 2,
'num_cpus_per_core': 1,
'num_cpus_per_socket': 2,
'num_sockets': 1
},
'environs': ['default']
}
]
},
],
'environments': [
{
'name': 'PrgEnv-cray',
'modules': ['PrgEnv-cray'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['dt']
},
{
'name': 'default',
'cc': 'mpicc',
'cxx': 'mpicxx',
'ftn': 'mpif90'
},
],
'logging': [
{
'handlers': [
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s'
},
{
'type': 'file',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s: %(check_info)s: %(message)s',
'append': False
}
],
'handlers_perflog': [
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_job_completion_time)s|reframe %(version)s|'
'%(check_info)s|jobid=%(check_jobid)s|'
'%(check_perf_var)s=%(check_perf_value)s|'
'ref=%(check_perf_ref)s '
'(l=%(check_perf_lower_thres)s, '
'u=%(check_perf_upper_thres)s)|'
'%(check_perf_unit)s'
),
'append': True
}
]
}
],
'general': [
{
'timestamp_dirs': '%FT%T',
}
],
}
|
import random
name_list = input("Names Seperated by comma. \n").split(", ")
print(f"{name_list[random.randint(0, len(name_list) - 1)]}")
|
import ete3
import sys
import os
sys.path.insert(0, os.path.join("tools", "msa_edition"))
import read_msa
random_alignment_format = "fasta"
def create_random_tree(msa_file, output_file):
global random_alignment_format
msa = read_msa.read_msa(msa_file)
tree = ete3.Tree()
tree.populate(len(msa.get_entries()))
leaves = tree.get_leaves()
index = 0
for entry in msa.get_entries():
leaves[index].add_feature("name", entry[0])
index += 1
tree.write(outfile=output_file, format=1)
def create_random_tree_from_species(species):
tree = ete3.Tree()
tree.populate(len(species))
leaves = tree.get_leaves()
index = 0
for s in species:
leaves[index].name = s
index += 1
return tree
def create_random_tree_from_tree(tree_file, output_file = None):
tree = ete3.Tree(tree_file)
species = tree.get_leaf_names()
random_tree = create_random_tree_from_species(species)
if (None != output_file):
random_tree.write(outfile = output_file)
return random_tree
def create_random_tree_taxa_number(taxa_number):
species = []
for i in range(0, taxa_number):
species.append("hCoV-19/Australia/NSW14/2020|EPI_ISL_" + str(i) + "|2020-03-03")
#species.append("HCOV-19_CHINA_2020012" + str(i) + "_EPI_ISL_" + str(i) )
return create_random_tree_from_species(species)
if (__name__ == "__main__"):
if (len(sys.argv) != 3):
print("Syntax: python create_random_tree.py msa output_random_tree")
sys.exit(1)
msa_file = sys.argv[1]
output_file = sys.argv[2]
create_random_tree(msa_file, output_file)
|
"""
FENICS script for solving the thermal Biot system using mixed elements with monolithic solver
Author: Mats K. Brun
"""
#from fenics import *
#from dolfin.cpp.mesh import *
#from dolfin.cpp.io import *
from dolfin import *
import numpy as np
import sympy as sy
# <editor-fold desc="Parameters">
dim = 2 # spatial dimension
eps = 10.0E-6 # error tolerance
T_final = 1. # final time
number_of_steps = 10 # number of steps
dt = T_final / number_of_steps # time step
alpha = 1. # Biot's coeff
beta = 1. # thermal coeff
c0 = 1. # Biot modulus
a0 = 1. # thermal coeff2
b0 = 1. # coupling coeff
K = 1. # permeability
Th = 1. # th conductivity
lambd = 1. # Lame param 1
mu = 1. # Lame param 2
cr = alpha**2/(mu + lambd)
ar = beta**2/(mu + lambd)
br = alpha*beta/(mu + lambd)
# </editor-fold>
# <editor-fold desc="Exact solutions and RHS">
# Define variables used by sympy
x0, x1, ti = sy.symbols('x[0], x[1], t')
# Exact solutions
pres = ti*x0*(1. - x0)*x1*(1. - x1) # pressure
temp = ti*x0*(1. - x0)*x1*(1. - x1) # temperature
disp1 = ti*x0*(1. - x0)*x1*(1. - x1) # displacement comp 1
disp2 = ti*x0*(1. - x0)*x1*(1. - x1) # displacement comp 2
pres_x = sy.diff(pres, x0)
pres_y = sy.diff(pres, x1)
df1 = - K*pres_x # Darcy flux comp 1
df2 = - K*pres_y # Darcy flux comp 2
temp_x = sy.diff(temp, x0)
temp_y = sy.diff(temp, x1)
hf1 = - Th*temp_x # heat flux comp 1
hf2 = - Th*temp_y # heat flux comp 2
# partial derivatives
disp1_x = sy.diff(disp1, x0)
disp1_y = sy.diff(disp1, x1)
disp1_xx = sy.diff(disp1, x0, x0)
disp1_yy = sy.diff(disp1, x1, x1)
disp1_xy = sy.diff(disp1, x0, x1)
disp2_x = sy.diff(disp2, x0)
disp2_y = sy.diff(disp2, x1)
disp2_xx = sy.diff(disp2, x0, x0)
disp2_yy = sy.diff(disp2, x1, x1)
disp2_xy = sy.diff(disp2, x0, x1)
df1_x = sy.diff(df1, x0)
df2_y = sy.diff(df2, x1)
hf1_x = sy.diff(hf1, x0)
hf2_y = sy.diff(hf2, x1)
# stress
sig11 = 2.*mu*disp1_x + lambd*(disp1_x + disp2_y) - alpha*pres - beta*temp
sig12 = mu*(disp1_y + disp2_x)
sig21 = mu*(disp2_x + disp1_y)
sig22 = 2.*mu*disp2_y + lambd*(disp1_x + disp2_y) - alpha*pres - beta*temp
# right hand sides
F1 = - 2.*mu*(disp1_xx + .5*(disp2_xy + disp1_yy)) \
- lambd*(disp1_xx + disp2_xy) + alpha*pres_x + beta*temp_x
F2 = - 2.*mu*(disp2_yy + .5*(disp1_xy + disp2_xx)) \
- lambd*(disp1_xy + disp2_yy) + alpha*pres_y + beta*temp_y
h = sy.diff(c0*pres - b0*temp + alpha*(disp1_x + disp2_y), ti) + df1_x + df2_y
f = sy.diff(a0*temp - b0*pres + beta*(disp1_x + disp2_y), ti) + hf1_x + hf2_y
# simplify expressions
pres = sy.simplify(pres)
temp = sy.simplify(temp)
disp1 = sy.simplify(disp1)
disp2 = sy.simplify(disp2)
df1 = sy.simplify(df1)
df2 = sy.simplify(df2)
hf1 = sy.simplify(hf1)
hf2 = sy.simplify(hf2)
sig11 = sy.simplify(sig11)
sig12 = sy.simplify(sig12)
sig21 = sy.simplify(sig21)
sig22 = sy.simplify(sig22)
F1 = sy.simplify(F1)
F2 = sy.simplify(F2)
h = sy.simplify(h)
f = sy.simplify(f)
# convert expressions to C++ syntax
pres_cc = sy.printing.ccode(pres)
temp_cc = sy.printing.ccode(temp)
disp1_cc = sy.printing.ccode(disp1)
disp2_cc = sy.printing.ccode(disp2)
df1_cc = sy.printing.ccode(df1)
df2_cc = sy.printing.ccode(df2)
hf1_cc = sy.printing.ccode(hf1)
hf2_cc = sy.printing.ccode(hf2)
sig11_cc = sy.printing.ccode(sig11)
sig12_cc = sy.printing.ccode(sig12)
sig21_cc = sy.printing.ccode(sig21)
sig22_cc = sy.printing.ccode(sig22)
F1_cc = sy.printing.ccode(F1)
F2_cc = sy.printing.ccode(F2)
h_cc = sy.printing.ccode(h)
f_cc = sy.printing.ccode(f)
# print the exact solutions and RHS
print """ Exact solutions as ccode:
p = \t %r
T = \t %r
u1 = \t %r
u2 = \t %r
w1 = \t %r
w2 = \t %r
r1 = \t %r
r2 = \t %r
F1 = \t %r
F2 = \t %r
h = \t %r
f = \t %r
""" % (pres_cc, temp_cc, disp1_cc, disp2_cc, df1_cc, df2_cc, hf1_cc, hf2_cc,
F1_cc, F2_cc, h_cc, f_cc)
# </editor-fold>
# <editor-fold desc="Mesh and function spaces">
# generate unit square mesh
mesh = UnitSquareMesh(4, 4)
mesh_size = mesh.hmax()
# finite element space
DGxDG = VectorElement('DG', mesh.ufl_cell(), 0) # displacement
DG = FiniteElement('DG', mesh.ufl_cell(), 0) # pres and temp
BDMxBDM = VectorElement('BDM', mesh.ufl_cell(), 1) # stress
RT = FiniteElement('RT', mesh.ufl_cell(), 1) # fluxes
# mixed space
X = FunctionSpace(mesh, MixedElement(DGxDG, BDMxBDM, DG, RT, DG, RT, DG))
# exact solutions and RHS
p_ex = Expression(pres_cc, degree=5, t=0)
T_ex = Expression(temp_cc, degree=5, t=0)
u_ex = Expression((disp1_cc, disp2_cc), degree=5, t=0)
w_ex = Expression((df1_cc, df2_cc), degree=5, t=0)
r_ex = Expression((hf1_cc, hf2_cc), degree=5, t=0)
sig_ex = Expression(((sig11_cc, sig12_cc),
(sig21_cc, sig22_cc)), degree=5, t=0)
F = Expression((F1_cc, F2_cc), degree=1, t=0)
h = Expression(h_cc, degree=1, t=0)
f = Expression(f_cc, degree=1, t=0)
#F = Constant((0.0, 1.0))
#h = Constant(1.0)
#f = Constant(1.0)
# </editor-fold>
# <editor-fold desc="BC and IC">
# Define boundary points
def boundary(x, on_boundary):
return on_boundary
# Dirichlet BC for displacement and pressure
#bc_su = DirichletBC(X.sub(0), Constant((0.0, 0.0)), boundary)
#bc_wp = DirichletBC(X.sub(2), Constant(0.0), boundary)
#bc_rt = DirichletBC(X.sub(4), Constant(0.0), boundary)
#bcs = [bc_su, bc_wp, bc_rt]
# trial and test functions
v, tau, q, z, S, y, e = TestFunctions(X)
u, sig, p, w, T, r, x = TrialFunctions(X)
# initial conditions (homogenous) and previous time-step
mf_n = Function(X)
u_n, sig_n, p_n, w_n, T_n, r_n, x_n = split(mf_n)
# </editor-fold>
# <editor-fold desc="Variational form">
# ID matrix
Id = Identity(dim)
# define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
#return 0.5*(nabla_grad(u) + nabla_grad(u).T)
# define compliance tensor
def compl(s):
return 1/(2*mu)*(s - lambd/2/(mu + lambd)*tr(s)*Id)
# skew matrix determined by scalar
def skw(r):
return as_matrix([[0, r], [-r, 0]])
# Constants for use in var form
#dt = Constant(dt)
#alpha = Constant(alpha)
#beta = Constant(beta)
#c0 = Constant(c0)
#a0 = Constant(a0)
#cr = Constant(cr)
#ar = Constant(ar)
#b0 = Constant(b0)
#br = Constant(br)
#K = Constant(K)
#Th = Constant(Th)
#lambd = Constant(lambd)
#mu = Constant(mu)
A1 = inner(compl(sig),tau)*dx + dot(u, div(tau))*dx \
+ cr/(2*alpha)*p*tr(tau)*dx + ar/(2*beta)*T*tr(tau)*dx
A2 = - dot(div(sig), v)*dx
A3 = (c0 + cr)*p*q*dx - (b0 - br)*T*q*dx + cr/(2*alpha)*tr(sig)*q*dx + dt*div(w)*q*dx
A4 = 1/K*dot(w,z)*dx - p*div(z)*dx
A5 = (a0 + ar)*T*S*dx - (b0 - br)*p*S*dx + ar/(2*beta)*tr(sig)*S*dx + dt*div(r)*S*dx
A6 = 1/Th*dot(r,y)*dx - T*div(y)*dx
A7 = inner(skw(x), tau)*dx + inner(sig, skw(e))*dx
L1 = dot(F,v)*dx + h*q*dx + f*S*dx
L2 = (c0 + cr)*p_n*q*dx - (b0 - br)*T_n*q*dx + cr/(2*alpha)*tr(sig_n)*q*dx \
+ (a0 + ar)*T_n*S*dx - (b0 - br)*p_n*S*dx + ar/(2*beta)*tr(sig_n)*S*dx
A = dt*A1 + dt*A2 + A3 + dt*A4 + A5 + dt*A6 + dt*A7
L = dt*L1 + L2
mf = Function(X)
# </editor-fold>
# Create VTK file for saving solution, .pvd or .xdmf
vtkfile_u = File('ThBiot_monolithic/displacement.pvd')
vtkfile_s = File('ThBiot_monolithic/stress.pvd')
vtkfile_p = File('ThBiot_monolithic/pressure.pvd')
vtkfile_w = File('ThBiot_monolithic/darcyFlux.pvd')
vtkfile_T = File('ThBiot_monolithic/temp.pvd')
vtkfile_r = File('ThBiot_monolithic/energyFlux.pvd')
# initialize time
t = 0.0
# start computation
for i in range(number_of_steps):
# update time
t += float(dt)
p_ex.t = t
T_ex.t = t
u_ex.t = t
w_ex.t = t
r_ex.t = t
sig_ex.t = t
F.t = t
h.t = t
f.t = t
# solve linear system
#solve(F == 0, wpu, bcs)
#solve(A == L, mf, bcs)
solve(A == L, mf)
_u_, _sig_, _p_, _w_, _T_, _r_, _x_ = mf.split()
# update previous time step
mf_n.assign(mf)
# Compute errors in L2 norm
p_L2 = errornorm(p_ex, _p_, 'L2')
T_L2 = errornorm(T_ex, _T_, 'L2')
u_L2 = errornorm(u_ex, _u_, 'L2')
w_L2 = errornorm(w_ex, _w_, 'L2')
r_L2 = errornorm(r_ex, _r_, 'L2')
sig_L2 = errornorm(sig_ex, _sig_, 'L2')
# print errors
print """ \n Errors in L2 norm: \n
\t Pressure: \t \t %r \n
\t Temperature: \t \t %r \n
\t Displacement: \t \t %r \n
\t Darcy flux: \t \t %r \n
\t Heat flux: \t \t %r \n
\t Stress: \t \t %r
""" % (p_L2, T_L2, u_L2, w_L2, r_L2, sig_L2)
# save to file
vtkfile_u << _u_, t
vtkfile_s << _sig_, t
vtkfile_p << _p_, t
vtkfile_w << _w_, t
vtkfile_T << _T_, t
vtkfile_r << _r_, t
# print mesh size
print """ \n Mesh size: \n
\t %r \n
Time step: \n
\t %r
""" % (mesh_size, dt)
|
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_protect
import numpy as np
np.set_printoptions(suppress=True)
import pandas as pd
from sklearn.cluster import KMeans
import json
@csrf_protect
def predicted(request):
if request.method == 'POST':
n_clusters = request.GET['n_clusters']
iterate = request.GET['iterate']
tolerance = request.GET['tolerance']
random_state = request.GET['random_state']
trainFile = request.FILES['trainFile']
testFile = request.FILES['testFile']
trainData = pd.read_csv(trainFile).replace(np.nan, 0)
kmeans = KMeans(n_clusters=int(n_clusters), init='random', max_iter=int(iterate), tol=float(tolerance),
random_state=int(random_state)).fit(trainData)
testdata = pd.read_csv(testFile).replace(np.nan, 0)
testdata.index.name = 'id'
predicted = kmeans.predict(testdata)
predicteddf = pd.DataFrame(data=predicted.flatten())
predicteddf.index.name = 'id'
final = pd.merge(predicteddf, testdata, on='id')
column = list(final.columns)
# final.to_csv("E:\\ShivSirProject\\Files\\Predicted.csv")
df = pd.DataFrame(final)
result = df.to_json(orient="values")
parsed = json.loads(result)
json_data = []
json_data.append(column)
data = []
for row in parsed:
data.append(dict(zip(column, row)))
json_data.append(data)
return JsonResponse(json_data, safe=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Hanzhiyun'
import functools
def log(*arg):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not arg:
print('Begin call %s():' % func.__name__)
func(*args, **kw)
print('End call %s()!' % func.__name__)
else:
print('%s %s():' % (arg[0], func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log()
def hello():
print('Hello,World!')
hello()
|
# -*- coding: utf-8 -*-
"""This filters the bookcrossing data
http://www.informatik.uni-freiburg.de/~cziegler/BX/
prerequisite: the data is in the folder /BX-SQL-Dump
excludes all implicit ratings
excludes all users having < 20 ratings
writes the dump to .dat files (same format as in the MovieLens files)
To use this, import the Bookcrossing data set into MySQL first
mysql -u root -p books < BX-Users.sql
mysql -u root -p books < BX-Books.sql
mysql -u root -p books < BX-Book-Ratings.sql
Before doing this, add 'SET autocommit=0;' to the beginnen and 'COMMIT;' to the
end of the files to massively speed up importing.
Should this not work, try importing the CSV dump as follows:
LOAD DATA INFILE "C:\\PhD\\...\\BX-SQL-Dump\\test.csv" IGNORE
INTO TABLE `bx-books`
COLUMNS TERMINATED BY ';'
OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\r\n'
IGNORE 1 LINES
(`ISBN`, `Book-Title`, `Book-Author`, `Year-of-Publication`, @dummy, @dummy, @dummy, @dummy);
"""
from __future__ import division, unicode_literals, print_function
import collections
from BeautifulSoup import BeautifulSoup
import urllib
import HTMLParser
import io
import MySQLdb as mdb
import operator
import os
import pandas as pd
import pdb
from pijnu.library.error import IncompleteParse
import random
import re
import sqlite3
import time
import urllib
import urllib2
import xml.etree.cElementTree as etree
from mediawiki_parser.preprocessor import make_parser as make_prep_parser
from mediawiki_parser.text import make_parser
def prepare_data():
print('getting ratings...')
user, isbn, rating = [], [], []
with open('BX-SQL-Dump/BX-Book-Ratings.csv') as infile:
for line in infile:
try:
line = line.encode('utf-8', 'ignore')
except UnicodeDecodeError:
# skip ratings with garbage bytes in the ISBN
continue
parts = line.strip().split(';')
parts = [p.strip('"') for p in parts]
if parts[-1] == '0': # TODO test run
continue
user.append(parts[0])
isbn.append(parts[1])
rating.append(parts[2])
# rating.append(1) # TODO test run
df_ratings = pd.DataFrame(data=zip(user, isbn, rating),
columns=['user', 'isbn', 'rating'])
print('getting books...')
isbn, title, author, year = [], [], [], []
with open('BX-SQL-Dump/BX-Books.csv') as infile:
for line in infile:
try:
line = line.rsplit('";"', 4)[0].encode('utf-8', 'ignore')
except UnicodeDecodeError:
continue
parts = line.strip().split('";"')
parts = [p.strip('"') for p in parts]
isbn.append(parts[0])
title.append(parts[1])
author.append(parts[2])
year.append(parts[3])
df_books = pd.DataFrame(data=zip(isbn, title, author, year),
columns=['isbn', 'title', 'author', 'year'])
print('saving...')
df_ratings[['user', 'rating']] = df_ratings[['user', 'rating']].astype(int)
df_books['year'] = df_books['year'].astype(int)
df_ratings.to_pickle('df_ratings.obj')
df_books.to_pickle('df_books.obj')
def eliminate_duplicates():
df_ratings = pd.read_pickle('df_ratings.obj')
df_books = pd.read_pickle('df_books.obj')
print(df_ratings.shape, df_books.shape)
# elminate all users with < 5 ratings
agg = df_ratings.groupby('user').count()
users_to_keep = set(agg[agg['isbn'] >= 5].index)
df_ratings = df_ratings[df_ratings['user'].isin(users_to_keep)]
# eliminate all books with 0 ratings
isbns = set(df_ratings['isbn'])
df_books = df_books[df_books['isbn'].isin(isbns)]
df_books.index = range(0, df_books.shape[0])
print(df_ratings.shape, df_books.shape)
# compute Jaccard distances between titles
# df_books = df_books.iloc[:2500]
# titles_original = df_books['title'].tolist()
# authors = df_books['author'].tolist()
# isbns = df_books['isbn'].tolist()
# years = df_books['year'].tolist()
# titles = [frozenset(t.lower().split(' ')) for t in titles_original]
# titles = [t - {'the', 'a', 'an'} for t in titles]
# idx2title = {t: i for t, i in enumerate(titles_original)}
# idx2author = {t: i for t, i in enumerate(authors)}
# idx2isbn = {t: i for t, i in enumerate(isbns)}
# idx2year = {t: i for t, i in enumerate(years)}
#
# merges = collections.defaultdict(list)
# for idx1, t1 in enumerate(titles):
# print('\r', idx1, '/', len(titles), end=' | ')
# for idx2, t2 in enumerate(titles):
# if idx2 >= idx1:
# continue
# jcd = (len(t1 & t2) / len(t1 | t2))
# if jcd >= 0.8:
# if idx2year[idx1] != idx2year[idx2]:
# continue
# merges[idx2isbn[idx1]].append(idx2isbn[idx2])
# if 0.8 <= jcd < 1:
# print('%.2f %d %d\n%s (%s)\n%s (%s)\n' %
# (jcd, idx1, idx2, idx2title[idx1], idx2author[idx1],
# idx2title[idx2], idx2author[idx2]))
# duplicates = [[[k] + v] for k, v in merges.items()]
# print('\nfound %d duplicates' % len(duplicates))
# # merge all books with identical titles and authors
# title_author2isbn = collections.defaultdict(list)
# print('finding duplicates...')
# for ridx, row in df_books.iterrows():
# print('\r', ridx+1, '/', df_books.shape[0], end='')
# key = (row['title'].lower(), row['author'].lower())
# title_author2isbn[key].append(row['isbn'])
# print()
# duplicates = [sorted(v) for v in title_author2isbn.values() if len(v) > 1]
# merge all books with identical titles
# df_books = df_books.iloc[:2500]
title2isbn = collections.defaultdict(list)
print('finding duplicates...')
titles = df_books['title'].tolist()
titles = [re.sub(r'[\(\),!\.\?\-]', '', t.lower()) for t in titles]
titles = [frozenset(t.split(' ')) for t in titles]
stopwords = {'the', 'a', 'an', ' ', 'unabridged', 'paperback', 'hardcover'}
titles = [t - stopwords for t in titles]
for ridx, row in df_books.iterrows():
print('\r', ridx+1, '/', df_books.shape[0], end='')
key = titles[ridx]
title2isbn[key].append(row['isbn'])
print()
duplicates = [sorted(v) for v in title2isbn.values() if len(v) > 1]
print('merging duplicates...')
to_drop = set()
for dsidx, ds in enumerate(duplicates):
print('\r', dsidx+1, '/', len(duplicates), end='')
isbn_keep = ds[0]
for d in ds[1:]:
df_ratings['isbn'].replace(d, isbn_keep, inplace=True)
to_drop.add(d)
print()
df_books = df_books[~df_books['isbn'].isin(to_drop)]
df_ratings.to_pickle('df_ratings_merged.obj')
df_books.to_pickle('df_books_merged.obj')
def condense_data(user_ratings=5, book_ratings=20):
df_ratings = pd.read_pickle('df_ratings_merged.obj')
df_books = pd.read_pickle('df_books_merged.obj')
df_books = df_books[df_books['year'] > 1500]
valid_isbns = set(df_books['isbn'])
df_ratings = df_ratings[df_ratings['isbn'].isin(valid_isbns)]
old_shape = (0, 0)
books_to_keep = 0
while old_shape != df_ratings.shape:
print(df_ratings.shape)
old_shape = df_ratings.shape
agg = df_ratings.groupby('isbn').count()
books_to_keep = set(agg[agg['user'] > book_ratings].index)
agg = df_ratings.groupby('user').count()
users_to_keep = set(agg[agg['isbn'] > user_ratings].index)
df_ratings = df_ratings[df_ratings['isbn'].isin(books_to_keep)]
df_ratings = df_ratings[df_ratings['user'].isin(users_to_keep)]
df_books = df_books[df_books['isbn'].isin(books_to_keep)]
print('%d/%d: found %d books with %d ratings' %
(user_ratings, book_ratings, len(books_to_keep), df_ratings.shape[0]))
df_ratings.to_pickle('df_ratings_condensed.obj')
df_books.to_pickle('df_books_condensed.obj')
def export_data():
df_ratings = pd.read_pickle('df_ratings_condensed.obj')
df_books = pd.read_pickle('df_books_condensed.obj')
with open('books.dat', 'w') as outfile:
for ridx, row in df_books.iterrows():
outfile.write(row['isbn'] + '::' + row['title'] + ' (' +
str(row['year']) + ')::' + row['author'] + '\n')
with open('ratings.dat', 'w') as outfile:
for ridx, row in df_ratings.iterrows():
outfile.write(str(row['user']) + '::' + row['isbn'] + '::')
outfile.write(str(row['rating']) + '\n')
def create_database():
"""set up the database scheme (SQLITE)"""
db_file = '../database_new.db'
try:
os.remove(db_file)
except OSError:
pass
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
label = 'books'
# create item table
if label == 'movies':
pkey = 'id INTEGER PRIMARY KEY, '
else:
pkey = 'id VARCHAR(13) PRIMARY KEY, '
create_stmt = """CREATE TABLE """ + label + """ (""" + \
pkey + \
"""original_title TEXT,
cf_title TEXT,
wp_title TEXT,
wp_text TEXT,
wp_id INT)"""
cursor.execute(create_stmt)
conn.commit()
# create category table
cursor.execute(""" PRAGMA foreign_keys = ON;""")
pkey = 'id INTEGER PRIMARY KEY,'
create_stmt = """CREATE TABLE categories (""" + \
pkey + \
"""name TEXT)"""
cursor.execute(create_stmt)
conn.commit()
# create item-category relation table
pkey = 'id INTEGER PRIMARY KEY, '
if label == 'movies':
item_id = 'item_id INTEGER, '
else:
item_id = 'item_id VARCHAR(13),'
create_stmt = """CREATE TABLE item_cat (""" + \
pkey + \
item_id + \
"""cat_id INTEGER,
FOREIGN KEY(item_id) REFERENCES """ + label + \
"""(id),
FOREIGN KEY (cat_id) REFERENCES categories(id))"""
cursor.execute(create_stmt)
conn.commit()
def prune_database():
df_books = pd.read_pickle('df_books_condensed.obj')
df_ids = set(df_books['isbn'])
db_file = 'database_new_full.db'
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# get items already in the database
stmt = 'SELECT id FROM books ORDER BY id ASC'
cursor.execute(stmt)
response = cursor.fetchall()
db_ids = set([i[0] for i in response])
ids_to_delete = db_ids - (df_ids & db_ids)
for isbn in ids_to_delete:
stmt = 'DELETE FROM books WHERE id=?;'
data = (isbn.strip(),)
cursor.execute(stmt, data)
conn.commit()
def populate_database(wp_text=False):
df_books = pd.read_pickle('df_books_condensed.obj')
db_file = '../database_new.db'
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# get items already in the database
stmt = 'SELECT id, wp_id FROM books ORDER BY id ASC'
cursor.execute(stmt)
response = cursor.fetchall()
db_ids = set([i[0] for i in response])
wp_ids = set([i[1] for i in response])
df_books.index = range(0, df_books.shape[0])
max_counter = -1
for db_id in db_ids:
idx = df_books[df_books['isbn'] == db_id].index.tolist()[0]
if idx > max_counter:
max_counter = idx
counter = max_counter if max_counter > 0 else 0
print('starting at id', counter)
if wp_text:
for ridx, row in df_books.iloc[counter:].iterrows():
counter += 1
print(counter, '/', df_books.shape[0],
row['title'], '|', row['author'])
if row['isbn'] in db_ids:
print(' already in database')
continue
if row['year'] < 1000:
print(' no year present')
continue # year of publication must be present
it = Book(row['title'] + ' (' + str(row['year']) + ')', row['isbn'],
row['author'])
it.generate_title_candidates()
it.get_wiki_texts()
it.select_title()
if it.wp_id in wp_ids:
it.wikipedia_text = ''
print('item already in database')
# if it.wikipedia_text:
# it.categories = it.obtain_categories()
if it.wikipedia_text:
it.write_to_database(db_file)
print('YES -', end='')
wp_ids.add(it.wp_id)
else:
print('NO -', end='')
print(it.wikipedia_title)
it.wikipedia_text = ''
print('----------------')
else:
for ridx, row in df_books.iloc[counter:].iterrows():
print('\r', ridx+1, '/', df_books.shape[0], end='')
stmt = 'INSERT OR REPLACE INTO books' +\
'(id, cf_title, original_title)' +\
'VALUES (?, ?, ?)'
data = (row['isbn'], row['title'],
row['title'] + ' (' + str(row['year']) + ')')
cursor.execute(stmt, data)
if (ridx % 100) == 0:
conn.commit()
conn.commit()
def add_genres():
db_file = os.path.join('..', 'database_new.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# get items already in the database
stmt = '''SELECT id, wp_id, original_title, wp_text
FROM books ORDER BY id ASC'''
cursor.execute(stmt)
response = cursor.fetchall()
df = pd.DataFrame(data=response,
columns=['isbn', 'wp_id', 'original_title', 'wp_text'])
stmt = """SELECT id, name from categories"""
cursor.execute(stmt)
db_cat2id = {c[1]: c[0] for c in cursor.fetchall()}
# get items already in the item_cat database
stmt = 'SELECT item_id FROM item_cat'
cursor.execute(stmt)
response = cursor.fetchall()
categories_present = set(r[0] for r in response)
item_count = df.shape[0]
df = df[~df['isbn'].isin(categories_present)]
for ridx, row in df.iterrows():
print(ridx, '/', item_count, row['original_title'])
if DEBUG:
t = 1
print(' DEBUG')
else:
t = random.randint(2, 10)
print(' sleeping for', t, 'seconds')
time.sleep(t)
url = u'http://www.goodreads.com/search?q=' + row['isbn']
try:
request = urllib2.Request(url)
# choose a random user agent
ua = random.choice(Item.url_headers)
request.add_header('User-agent', ua)
data = Item.url_opener.open(request).read()
data = data.decode('utf-8')
except (urllib2.HTTPError, urllib2.URLError) as e:
print(' !+!+!+!+!+!+!+!+ URLLIB ERROR !+!+!+!+!+!+!+!+')
print(' URLError', e)
pdb.set_trace()
rexes = [
r'bookPageGenreLink"\s*href="[^"]+">([^<]+)',
]
re_cat = re.compile('|'.join(rexes))
cats = [e for e in re.findall(re_cat, data)]
# remove duplicates from e.g., "A > AB" and "A" both being present
cats = list(set(cats))
print(' ', row['original_title'])
print(' ', cats)
if not cats: # mark item to delete from books table
print(' no cats found for', row['isbn'])
with open('books_to_delete.txt', 'a') as outfile:
outfile.write(row['isbn'] + '\n')
else:
# write categories to databse
for c in cats:
if c not in db_cat2id:
# insert category if not yet present
stmt = """INSERT INTO categories(id, name) VALUES (?, ?)"""
i = len(db_cat2id)
data = (i, c)
cursor.execute(stmt, data)
conn.commit()
db_cat2id[c] = i
# insert item-category relation
stmt = """INSERT INTO item_cat(item_id, cat_id) VALUES (?, ?)"""
data = (row['isbn'], db_cat2id[c])
cursor.execute(stmt, data)
conn.commit()
def delete_genreless():
with open('books_to_delete.txt') as infile:
isbns = infile.readlines()
db_file = os.path.join('..', 'database_new.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
for isbn in isbns:
print(isbn)
stmt = 'DELETE FROM books WHERE id=?;'
data = (isbn.strip(),)
cursor.execute(stmt, data)
conn.commit()
def delete_yearless():
isbns = [u'0571197639', u'0349101779', u'0099771519', u'0330312367',
u'0450411435', u'0316639842', u'0099521016', u'0099993805',
u'0330306839', u'0330262130', u'0330267388', u'0451082028',
u'0316095133', u'0006480764', u'0140276904', u'0099478110',
u'0553107003', u'0330282565', u'0553227041', u'0330294008',
u'0330305735', u'0553100777', u'0439078415', u'0002242591',
u'0330330276', u'0099479419', u'0099760118', u'0571173004',
u'0140048332', u'0006548539', u'0330345605', u'0001046438',
u'0099201410', u'0002558122', u'014026583X', u'0006546684',
u'0451110129', u'0099288559', u'0440846536', u'059044168X',
u'0590433180', u'0002243962', u'034068478X', u'0684174693',
u'0440118697', u'0140118365', u'0099268817', u'0099283417',
u'0099750813', u'0445002972', u'0006716652', u'0590479865',
u'0553200674', u'0340128720', u'0425043657', u'0739413317',
u'0340546727', u'0140037896']
db_file = '../database_new.db'
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
for isbn in isbns:
stmt = 'DELETE FROM books WHERE id=?;'
data = (isbn.strip(),)
cursor.execute(stmt, data)
conn.commit()
def add_text():
db_file = os.path.join('..', 'database_new.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# get items already in the database
stmt = '''SELECT id, wp_id, original_title, wp_text
FROM books ORDER BY id ASC'''
cursor.execute(stmt)
response = cursor.fetchall()
df = pd.DataFrame(data=response,
columns=['isbn', 'wp_id', 'original_title', 'wp_text'])
item_count = df.shape[0]
df = df[pd.isnull(df['wp_text'])]
for ridx, row in df.iterrows():
print(ridx+1, '/', item_count, row['original_title'], row['isbn'])
if DEBUG:
t = 1
print(' DEBUG')
else:
t = random.randint(2, 10)
print(' sleeping for', t, 'seconds')
time.sleep(t)
url = u'http://www.goodreads.com/search?q=' + row['isbn']
data = ''
trials = 0
while not data:
try:
request = urllib2.Request(url)
# choose a random user agent
ua = random.choice(Item.url_headers)
request.add_header('User-agent', ua)
data = Item.url_opener.open(request).read()
data = data.decode('utf-8')
except (urllib2.HTTPError, urllib2.URLError) as e:
print(' !+!+!+!+!+!+!+!+ URLLIB ERROR !+!+!+!+!+!+!+!+')
print(' URLError', e)
if trials > 5:
pdb.set_trace()
re_text = r'<div id="descriptionContainer">(.+?)(?:</div>|<a)'
text = re.findall(re_text, data, flags=re.DOTALL)[0]
# remove HTML tags
text = re.sub(r'<[^>]+>', '', text)
text = text.strip('\n ')
text = text.replace('\n', '')
# write to database
stmt = 'UPDATE books SET wp_text = ? WHERE id = ?'
data = (text, row['isbn'])
cursor.execute(stmt, data)
conn.commit()
def add_title_to_text():
db_file = os.path.join('..', 'database_new.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# get items already in the database
stmt = '''SELECT id, wp_id, cf_title, wp_text
FROM books ORDER BY id ASC'''
cursor.execute(stmt)
response = cursor.fetchall()
df = pd.DataFrame(data=response,
columns=['isbn', 'wp_id', 'cf_title', 'wp_text'])
item_count = df.shape[0]
for ridx, row in df.iterrows():
print(ridx+1, '/', item_count, row['cf_title'], row['isbn'])
# write to database
stmt = 'UPDATE books SET wp_text = ? WHERE id = ?'
data = (row['wp_text'] + ' ' + row['cf_title'], row['isbn'])
cursor.execute(stmt, data)
conn.commit()
def delete_textless():
pdb.set_trace()
class Item(object):
# init static members
preprocessor = make_prep_parser({})
parser = make_parser()
html_parser = HTMLParser.HTMLParser()
url_opener = urllib2.build_opener()
with io.open('user_agents.txt', encoding='utf-8-sig') as infile:
url_headers = infile.readlines()
url_headers = [u.strip('"\n') for u in url_headers]
# url_headers = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0',
# 'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0',
# 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
# 'Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36',
# 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
# 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
# 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; ar) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
# 'Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.10.229 Version/11.62',
# 'Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10']
db_cat2id = {} # holds the list of categories present in the database
def __init__(self, cf_title):
self.cf_title = cf_title
self.original_title = cf_title
self.wikipedia_title = ''
self.title_candidates = {}
self.wikipedia_text = ''
self.id = -1
self.wp_id = -1
self.categories = []
def get_wiki_text(self, title):
"""download a Wikipedia article for a given title
and resolve any redirects"""
t = BeautifulSoup(title, convertEntities=BeautifulSoup.HTML_ENTITIES)
title = t.contents[0]
title = title.replace(' ', '_')
title = urllib.quote(urllib.unquote(title.encode('utf-8')))
if 'disambig' in title:
return '', title
url = 'http://en.wikipedia.org/wiki/Special:Export/' + title
data = None
trials = 0
while True:
try:
trials += 1
request = urllib2.Request(url)
request.add_header('User-agent',
random.choice(Item.url_headers))
data = Item.url_opener.open(request).read()
data = data.decode('utf-8', 'ignore')
data_l = data.lower()
if not '<text' in data_l or '{{disambig}}' in data_l or\
'{{ disambig }}' in data_l or\
'{{ disambiguation }}' in data_l or\
'{{disambiguation}}' in data_l:
data = ''
break
except (urllib2.HTTPError, urllib2.URLError) as e:
print('!+!+!+!+!+!+!+!+ URLLIB ERROR !+!+!+!+!+!+!+!+')
print('URLError', e)
if trials >= 5: # avoid endless repetition
pdb.set_trace()
print(title)
if '#redirect' in data.lower() and len(data) < 5000:
data = Item.html_parser.unescape(data)
data = data[data.find('<text'):]
r_pos = data.lower().find('#redirect')
r_offset = data[r_pos:].find('[[')
close_pos = data[r_pos:].find(']]')
link = data[r_pos + r_offset + 2:r_pos + close_pos]
data, title = self.get_wiki_text(link.encode('utf-8'))
title = urllib.unquote(title.encode('utf-8')).decode('utf-8')
return data, title
def get_wiki_texts(self):
"""download the Wikipedia pages corresponding to the title candidates"""
new_title_candidates = {}
for t in self.title_candidates:
text, title = self.get_wiki_text(t.encode('utf-8'))
new_title_candidates[title] = text
self.title_candidates = new_title_candidates
def strip_text(self, text):
"""strip the Wikipedia article export (XML) from tags and Wiki markup
return only the plain article text
"""
root = etree.fromstring(text.encode('utf-8'))
for child in root[1]:
if 'export-0.10/}id' in child.tag:
self.wp_id = int(child.text)
elif 'export-0.10/}revision' in child.tag:
for child2 in child:
if '/export-0.10/}text' in child2.tag:
text = child2.text
# heuristics to remove parts that are not relevant but hard to parse
rx = re.compile(r'<!--.*?-->', flags=re.DOTALL)
text = re.sub(rx, r'', text)
for headline in ['References', 'External links', 'Further reading']:
text = text.split('==' + headline + '==')[0]
text = text.split('== ' + headline + ' ==')[0]
text = re.sub(r'<ref[^/]*?/>', r'', text)
rx = re.compile(r'<ref[^<]*?/>|<ref.*?</ref>', flags=re.DOTALL)
text = re.sub(rx, r'', text)
rx = re.compile(r'<gallery.*?</gallery>', flags=re.DOTALL)
text = re.sub(rx, r'', text)
text = text.replace('<', '')
text = text.replace('>', '')
# parse the text from Wikipedia markup to plain text
trials = 0
try:
trials += 1
preprocessed_text = Item.preprocessor.parse(text + '\n')
output = Item.parser.parse(preprocessed_text.leaves())
except (AttributeError, IncompleteParse), e:
print('!+!+!+!+!+!+!+!+ PARSER ERROR !+!+!+!+!+!+!+!+')
print(self.wikipedia_title)
print(e)
if trials >= 5: # avoid endless repetition
pdb.set_trace()
output = unicode(output).replace('Category:', ' Category: ')
output = unicode(output).replace('Template:', ' Template: ')
return output
def select_title(self, relevant_categories):
"""select a title among the candidates,
based on the obtained Wikipedia text.
If several articles exist, choose the one with most relevant categories
"""
def extract_categories(title, relevant_categories):
""" extract all categories from a given article"""
regex = re.compile('\[\[Category:([^#\|\]]+)', flags=re.IGNORECASE)
categories = ' '.join(regex.findall(self.title_candidates[title]))
occurrences = 0
for c in relevant_categories:
occurrences += categories.lower().count(c)
return occurrences
titles = [t for t in self.title_candidates if self.title_candidates[t]]
if len(titles) == 0:
self.wikipedia_title = ''
elif len(titles) == 1:
self.wikipedia_title = titles[0]
elif len(titles) > 1:
categories = {t: extract_categories(t, relevant_categories)
for t in titles}
ranked = sorted(categories.items(),
key=operator.itemgetter(1), reverse=True)
if ranked[0][1] == ranked[1][1]:
if ranked[1] in ranked[0]:
self.wikipedia_title = ranked[0][1]
self.wikipedia_title = ranked[0][0]
if self.wikipedia_title:
self.wikipedia_text = self.title_candidates[self.wikipedia_title]
self.wikipedia_text = self.strip_text(self.wikipedia_text)
print('selected', self.wikipedia_title)
def write_to_database(self, table, db_file):
"""write this object to the database"""
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# create row for category, if required
if not Item.db_cat2id:
stmt = """SELECT id, name from categories"""
cursor.execute(stmt)
Item.db_cat2id = {c[1]: c[0] for c in cursor.fetchall()}
for c in self.categories:
if c not in Item.db_cat2id:
# insert category if not yet present
stmt = """INSERT INTO categories(id, name) VALUES (?, ?)"""
i = len(Item.db_cat2id)
data = (i, c)
cursor.execute(stmt, data)
conn.commit()
Item.db_cat2id[c] = i
# insert item-category relation
stmt = """INSERT INTO item_cat(item_id, cat_id) VALUES (?, ?)"""
data = (self.id, Item.db_cat2id[c])
cursor.execute(stmt, data)
conn.commit()
# write item
stmt = """INSERT OR REPLACE INTO """ + table +\
"""(id, wp_id, cf_title, original_title, wp_title, wp_text)
VALUES (?, ?, ?, ?, ?, ?)"""
data = (self.id, self.wp_id, self.cf_title, self.original_title,
self.wikipedia_title, self.wikipedia_text)
cursor.execute(stmt, data)
conn.commit()
class Book(Item):
def __init__(self, cf_title, bid, author):
super(Book, self).__init__(cf_title)
self.id = bid
self.author = author
def generate_title_candidates(self):
""" generate title candidates for books"""
for c in '{}[]\n.':
self.cf_title = self.cf_title.replace(c, '')
self.cf_title = self.cf_title.split(':')[0]
self.cf_title = self.cf_title.split('(')[0]
if len(self.cf_title) > 1:
if self.cf_title[0] != self.cf_title[0].upper() or \
self.cf_title[1] != self.cf_title[1].lower():
self.cf_title = self.cf_title[0].upper() +\
self.cf_title[1:].lower()
ce = BeautifulSoup.HTML_ENTITIES
self.cf_title = BeautifulSoup(self.cf_title, convertEntities=ce)
self.cf_title = self.cf_title.contents[0]
self.cf_title = self.cf_title.replace('reg;', '')
self.cf_title = self.cf_title.replace(';', '')
self.cf_title = self.cf_title.replace('(R)', '')
self.cf_title = self.cf_title.replace('(r)', '')
keys = {self.cf_title.strip()}
# handle prefix/suffix swaps, e.g., "Haine, La"
prefixes = {'The', 'A', 'An', 'La', 'Le', 'Les', 'Die', 'Das', 'Der',
'Ein', 'Il', "L'", 'Lo', 'Le', 'I', 'El', 'Los', 'Las', 'O'}
new_keys = set()
for k in keys:
parts = k.split(' ')
if len(parts) > 1 and parts[0].strip() in prefixes:
new_keys.add(' '.join(parts[1:]))
keys |= new_keys
# add "The" to the beginning, if it is not already there
new_keys = set()
for k in keys:
p = k.split(' ')[0]
if p not in prefixes:
new_keys.add('The ' + k)
keys |= new_keys
# adapt captialization to the Wikipedia Manual of Style
# (this is only a heuristic)
new_keys = set()
minuscles = {'a', 'an', 'the', 'and', 'but', 'or', 'nor', 'for',
'yet', 'of', 'to', 'in', 'for', 'on', 'with'}
for k in keys:
parts = k.split(' ')
parts = [p for p in parts if p]
parts_new = [parts[0]]
for p in parts[1:]:
if p.lower() not in minuscles:
parts_new.append(p[0].upper() + p[1:])
else:
parts_new.append(p)
new_keys.add(' '.join(parts_new))
keys |= new_keys
author_last = self.author.rsplit(' ', 1)[-1]
book = [k + ' (' + author_last + ' book)' for k in keys]
booka = [k + ' (book)' for k in keys]
novel = [k + ' (novel)' for k in keys]
novela = [k + ' (' + author_last + ' novel)' for k in keys]
keys.update(set(book), set(novel), set(booka), set(novela))
self.title_candidates = {k: '' for k in keys}
def select_title(self):
""" select the title among the candidates
and check if it's actually a book
"""
super(Book, self).select_title(['books', 'novels', 'plays'])
# sanity check - is this really a relevant article?
if self.wikipedia_text:
regex = re.compile('\[\[Category:([^#\|\]]+)', flags=re.IGNORECASE)
data = self.title_candidates[self.wikipedia_title]
categories = ' '.join(regex.findall(data))
occurrences = categories.lower().count('books')
occurrences += categories.lower().count('novels')
occurrences += categories.lower().count('plays')
occurrences += categories.lower().count('short story')
if not occurrences:
self.wikipedia_text = ''
print('did not pass sanity check')
if not self.author.split()[-1].lower() in self.wikipedia_text.lower():
if DEBUG:
pdb.set_trace()
self.wikipedia_text = ''
print('author not in text')
del self.title_candidates
def obtain_categories(self):
"""scrape book categories from Google"""
# sleep in-between to not get banned for too frequent requests
if DEBUG:
t = 1
else:
t = random.randint(10, 19)
print('DEBUG')
print('sleeping for', t, 'seconds')
time.sleep(t)
title = urllib.quote(urllib.unquote(self.wikipedia_title.encode()))
query = '"' + title.replace('_', '+') + '"+' + 'genre'
url = u"https://www.google.com/search?hl=en&biw=1195&bih=918" +\
u"&sclient=psy-ab&q=" + query + u"&btnG=&oq=&gs_l=&pbx=1"
try:
request = urllib2.Request(url)
# choose a random user agent
ua = random.choice(Item.url_headers)
request.add_header('User-agent', ua)
data = Item.url_opener.open(request).read()
data = data.decode('utf-8')
if self.author.split()[-1].lower() not in data.lower(): # sanity check
self.wikipedia_text = ''
return []
except (urllib2.HTTPError, urllib2.URLError) as e:
print('!+!+!+!+!+!+!+!+ URLLIB ERROR !+!+!+!+!+!+!+!+')
print('URLError', e)
pdb.set_trace()
rexes = [
# r'<span class="kno-a-v">([^</]+)',
# r'<span class="answer_slist_item_title nonrich">([^</]+)',
# r'<span class="answer_slist_item_title">([^</]+)',
r'Genres\s*(?:</span>)?(?:</a>)?:\s*(?:</span>)?\s*<span class="[-\_\sa-zA-Z]+">([^</]+)',
r'Genre</td><td(?:[^</]*)>([^</]+)',
r'Genre</th></tr><td(?:[^</]*)>([^</]+)',
]
re_cat = re.compile('|'.join(rexes))
cats = [e for g in re.findall(re_cat, data) for e in g if e]
# cats = [g for g in re.findall(re_cat, data) if g]
print(self.wikipedia_title)
print(cats)
if DEBUG:
pdb.set_trace()
cats = list(set(cats))
if not cats: # sanity check
self.wikipedia_text = ''
return cats
def write_to_database(self, db_file):
super(Book, self).write_to_database('books', db_file)
DEBUG = False # TODO
if __name__ == '__main__':
from datetime import datetime
start_time = datetime.now()
# prepare_data()
# eliminate_duplicates()
# condense_data(user_ratings=20, book_ratings=5)
# prune_database()
# export_data()
# create_database()
# populate_database(wp_text=False)
# add_genres()
# delete_genreless()
# delete_yearless()
# add_text()
add_title_to_text()
# delete_textless()
end_time = datetime.now()
print('Duration: {}'.format(end_time - start_time))
|
from django.db import models
from datetime import timedelta
class User(models.Model):
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Blog(models.Model):
title = models.CharField(max_length=150)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def created_at(self):
return self.created + timedelta(minutes=45, hours=5)
def updated_at(self):
return self.updated + timedelta(minutes=45, hours=5)
|
from django.urls import path, include
from rest_framework import routers
from bandas.models import *
from webservices.views import *
router = routers.DefaultRouter()
router.register(r'bandas', banda_viewset)
router.register(r'roles', rol_viewset)
router.register(r'integrantes', integrante_viewset)
router.register(r'nacionalidades', nacionalidad_viewset)
urlpatterns = [
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
# Class definition of a linked list
class Node:
def __init__(self, data, next=None):
self.data=data
self.next=next
# Reverse a linked list
def reverse_linked_list(node):
prev_node = None
curr_node = node
while curr_node != None:
next_node = curr_node.next
curr_node.next = prev_node
prev_node = curr_node
curr_node = next_node
return prev_node
|
import os
import re
from django.db import models
from Portal.ConfigParser.ConfigParser import ConfigParser
# Create your models here.
configParser = ConfigParser(os.path.dirname(os.path.realpath(__file__))+"/createForm.cfg")
admissionDetailsInfo = configParser.getAdmissionDetails()
class PersonalDetails(models.Model):
personalDetailsInfo = configParser.getPersonalDetailsInfo()
UUID = models.CharField(max_length=255,null=False)
for personalAttr in personalDetailsInfo.listOfPersonalAttr:
for subAttr in personalAttr.ListOfSubAttr:
columnName = personalAttr.PersonalAttrName + "_" + subAttr.SubAttrName
dbType = None
if (subAttr.IsTypeString):
dbType = subAttr.StringConstraints['DBType']
maxLength = int(re.findall(r'\d+', dbType)[0])
locals()[columnName] = models.CharField(max_length=maxLength,null=True)
elif (subAttr.IsTypeInteger):
dbType = subAttr.IntegerConstraints['DBType']
if (dbType.lower() == "smallint" or dbType.lower()=="integer"):
locals()[columnName] = models.IntegerField(null=True, blank=True)
elif (dbType.lower() == "bigint"):
locals()[columnName] = models.BigIntegerField(null=True, blank=True)
elif (subAttr.IsTypeFloat):
dbType = subAttr.FPConstraints['DBType']
locals()[columnName] = models.FloatField(null=True, blank=True)
elif (subAttr.IsTypeDate):
dbType = subAttr.DateConstraints['DBType']
locals()[columnName] = models.DateField(null=True)
elif (subAttr.IsTypeBoolean):
dbType = subAttr.BooleanConstraints['DBType']
locals()[columnName] = models.BooleanField(null=True)
else:
pass
class Meta:
db_table = str(admissionDetailsInfo.AdmissionType + '_' + admissionDetailsInfo.AdmissionDegree + '_' + admissionDetailsInfo.AdmissionMonth + \
'_' + admissionDetailsInfo.AdmissionYear + '_' + 'PersonalDetails').replace(" ", "")
pass
class EducationalQualifications(models.Model):
educationalQualificationsInfo = configParser.getEducationalQualificationsInfo()
UUID = models.CharField(max_length=255,null=False)
for eduAttr in educationalQualificationsInfo.listOfEduAttr:
for subAttr in eduAttr.ListOfSubAttr:
columnName = eduAttr.EduAttrName + "_" + subAttr.SubAttrName
dbType = None
if (subAttr.IsTypeString):
dbType = subAttr.StringConstraints['DBType']
maxLength = int(re.findall(r'\d+', dbType)[0])
locals()[columnName] = models.CharField(max_length=maxLength,null=True)
elif (subAttr.IsTypeInteger):
dbType = subAttr.IntegerConstraints['DBType']
if (dbType.lower() == "smallint" or dbType.lower()=="integer"):
locals()[columnName] = models.IntegerField(null=True, blank=True)
elif (dbType.lower() == "bigint"):
locals()[columnName] = models.BigIntegerField(null=True, blank=True)
elif (subAttr.IsTypeFloat):
dbType = subAttr.FPConstraints['DBType']
locals()[columnName] = models.FloatField(null=True, blank=True)
elif (subAttr.IsTypeDate):
dbType = subAttr.DateConstraints['DBType']
locals()[columnName] = models.DateField(null=True)
elif (subAttr.IsTypeBoolean):
dbType = subAttr.BooleanConstraints['DBType']
locals()[columnName] = models.BooleanField(null=True)
else:
pass
class Meta:
db_table = str(admissionDetailsInfo.AdmissionType + '_' + admissionDetailsInfo.AdmissionDegree + '_' + admissionDetailsInfo.AdmissionMonth + \
'_' + admissionDetailsInfo.AdmissionYear + '_' + 'EducationalQualifications').replace(" ", "")
class WorkExperience(models.Model):
workExperienceInfo = configParser.getWorkExperienceInfo()
UUID = models.CharField(max_length=255,null=False)
for subAttr in workExperienceInfo.ListOfSubAttr:
columnName = subAttr.SubAttrName
dbType = None
if (subAttr.IsTypeString):
dbType = subAttr.StringConstraints['DBType']
maxLength = int(re.findall(r'\d+', dbType)[0])
locals()[columnName] = models.CharField(max_length=maxLength,null=True)
elif (subAttr.IsTypeInteger):
dbType = subAttr.IntegerConstraints['DBType']
if (dbType.lower() == "smallint" or dbType.lower()=="integer"):
locals()[columnName] = models.IntegerField(null=True, blank=True)
elif (dbType.lower() == "bigint"):
locals()[columnName] = models.BigIntegerField(null=True, blank=True)
elif (subAttr.IsTypeFloat):
dbType = subAttr.FPConstraints['DBType']
locals()[columnName] = models.FloatField(null=True, blank=True)
elif (subAttr.IsTypeDate):
dbType = subAttr.DateConstraints['DBType']
locals()[columnName] = models.DateField(null=True)
elif (subAttr.IsTypeBoolean):
dbType = subAttr.BooleanConstraints['DBType']
locals()[columnName] = models.BooleanField(null=True)
else:
pass
class Meta:
db_table = str(admissionDetailsInfo.AdmissionType + '_' + admissionDetailsInfo.AdmissionDegree + '_' + admissionDetailsInfo.AdmissionMonth + \
'_' + admissionDetailsInfo.AdmissionYear + '_' + 'WorkExperience').replace(" ", "")
class Attachments(models.Model):
attachmentsInfo = configParser.getAttachmentsInfo()
UUID = models.CharField(max_length=255,null=False)
for attachmentInfo in attachmentsInfo.listOfAttachmentInfo:
for attachment in attachmentInfo.ListOfAttachment:
columnName = attachmentInfo.AttachmentInfoName + "_" + attachment.AttachmentName
dbType = None
if (attachment.IsTypeFile):
maxLength = 255
locals()[columnName] = models.CharField(max_length=maxLength,null=True)
else:
pass
class Meta:
db_table = str(admissionDetailsInfo.AdmissionType + '_' + admissionDetailsInfo.AdmissionDegree + '_' + admissionDetailsInfo.AdmissionMonth + \
'_' + admissionDetailsInfo.AdmissionYear + '_' + 'Attachments').replace(" ", "")
|
#!/usr/bin/env python
from helpers.string import uppercase, lowercase # 1) importing specifics thing i.e. function
from helpers import variable # 2) importing a specific module
import helpers # 3) importing the entire package
print(f"Uppercase Letter: {uppercase(variable.name)}")
print(f"Lowercase Letter: {lowercase(variable.name)}")
print(f"From packaged helpers: {helpers.string.lowercase(helpers.variable.name)}")
|
#Python Program to Find the Largest Number in a List
List = [1,2,4,5,7,9,10,11,15,20,]
print(List)
List = max(List)
print("The Largest Number is",List)
|
L = ["alen","unix","windows"]
for n in L :
print ("hello,%s!"%n)
|
import socket
# Create a server socket
serverSocket = socket.socket()
print("Server socket created")
# Associate the server socket with the IP and Port
ip = "192.168.0.101"
port = 5000
serverSocket.bind((ip, port))
print("Server socket bound with with ip {} port {}".format(ip, port))
# Make the server listen for incoming connections
serverSocket.listen(5)
# Server incoming connections "one by one"
count = 0
while(True):
(clientConnection, clientAddress) = serverSocket.accept()
count = count + 1
print("Accepted {} connections so far".format(count))
# read from client connection
while(True):
data = clientConnection.recv(1024)
print(data)
if(data!=b''):
msg1 = "Hi Client! Read everything you sent"
msg1Bytes = str.encode(msg1)
msg2 = "Now I will close your connection"
msg2Bytes = str.encode(msg2)
clientConnection.send(msg1Bytes)
clientConnection.send(msg2Bytes)
clientConnection.close()
print("Connection closed")
break
serverSocket.close()
|
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
# We can extent existing user model to consider team member
from django.contrib.auth.models import AbstractUser
#
#
# class User(AbstractUser):
# phone_number = PhoneNumberField()
# role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, default=ROLE_REGULAR)
# Considering team member as separate entity from User model
class TeamMember(models.Model):
ROLE_ADMIN = 1
ROLE_REGULAR = 2
ROLE_CHOICES = [
(ROLE_ADMIN, 'admin'),
(ROLE_REGULAR, 'regular')
]
email = models.EmailField(unique=True, null=False, blank=False)
first_name = models.CharField(max_length=50, blank=False)
last_name = models.CharField(max_length=50, blank=False)
phone_number = PhoneNumberField()
role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, default=ROLE_REGULAR)
|
from django import forms
from .models import Choice, Question
from django.forms import ModelForm, formset_factory
from django.forms.widgets import RadioSelect
class ChoiceForm(ModelForm):
EXCELLENT, GOOD, MEDIUM, BAD = 'EX', 'GO', 'ME', 'BA'
question_choices = (
(EXCELLENT, 'عالی'),
(GOOD, 'خوب'),
(MEDIUM, 'متوسط'),
(BAD, 'ضعیف')
)
choice_text = forms.ChoiceField(choices=question_choices, widget=RadioSelect(attrs={"required":"required"}), required=True)
class Meta:
model = Choice
fields = ['choice_text']
ChoiceFormset = formset_factory(ChoiceForm, extra=Question.objects.count())
|
# -*- encoding:utf-8 -*-
'''Android 代码中的接口搜索'''
import os
#androidCode = "D:\\Code\\Android\\app\\src"
class searchAndroidInterface:
def __init__(self, codePath, resPath):
'''CodePath: Android code path;
resPath: save result of search '''
self.androidCodePath = codePath
self.resultPath = resPath
def searchJavaFile(self):
javaFile = []
for root, dirs, files in os.walk(self.androidCodePath):
for file in files:
javaFilePath = os.path.join(root, file)
if os.path.splitext(javaFilePath)[1] in [".java"]:
javaFile.append(javaFilePath)
return javaFile
def readFile(self, java_file):
Final_interface = []
interface_start_keyword = ["String URL_", '"/']
interface_end_keyword = ['";', '")', '",', '" +']
with open(java_file, 'r', encoding="utf-8") as f:
for line in f:
if interface_start_keyword[0] in line:
interface = line.split("=")[1].lstrip(";")
# print(interface)
Final_interface.append(interface)
elif interface_start_keyword[1] in line:
getinterface = line.split('"/')[1]
for i in interface_end_keyword:
if i in getinterface:
interface = getinterface.split(i)[0]
Final_interface.append(interface)
break
return Final_interface
def run(self):
result_path = self.resultPath + "\\androidInterfaceRes.txt"
result = []
java_path = self.searchJavaFile()
for i in range(len(java_path)):
print(java_path[i])
get_interface = self.readFile(java_path[i])
if get_interface:
result.append(get_interface)
else:
pass
# new_result = set(result)
print(result)
for j in range(len(result)):
for h in range(len(result[j])):
if result[j][h]:
with open(result_path, 'a', encoding="utf-8") as f:
f.write(result[j][h] + "\n")
print("\n + Result of interface in the code by search save to {}".format(result_path))
# if __name__ == "__main__":
# android = "D:\\Code\\Android\\app\\src"
# res = "D:\\ddd"
# a = searchAndroidInterface(codePath=android, resPath=res)
# a.run()
|
import random
# my_list = []
# length = 0
# with open("sowpods.txt", "r") as open_file:
#
# line = open_file.readline()
# my_list.append(line)
# while line:
# length += 1
# my_list.append(line)
# line = open_file.readline()
with open("sowpods.txt", "r") as open_file:
lines = open_file.readlines()
index = random.randint(0, len(lines) + 1)
print(lines[index])
|
import requests
for i in range(1,10):
print(i)
image_name_dir = 'images/' + str(i) + '.png'
f = open(image_name_dir,'wb')
request_url = 'https://static-nft.pancakeswap.com/mainnet/0x0a8901b0E25DEb55A87524f0cC164E9644020EBA/pancake-squad-' + str(i) + '-1000.png'
r = requests.get(request_url)
if r.status_code == 200:
f.write(requests.get(request_url).content)
f.close()
print(image_name_dir + ' is successfully downloaded.')
else:
print('Error. Image cannot be retrieved.')
|
# -*- coding: utf-8 -*-
import json
from django.test import TestCase
from zipcodes.factories.zipcode import ZipCodeFactory
from zipcodes.models import ZipCode
class ZipCodeResourceTest(TestCase):
def setUp(self):
self.ribeirao = ZipCodeFactory.create(
address='Avenida Presidente Vargas',
neighborhood='Jardim América',
city='Ribeirão Preto',
state='SP',
zip_code='14020260'
)
self.bonfim = ZipCodeFactory.create(
city='Bonfim Paulista (Ribeirão Preto)',
state='SP',
zip_code='14110000'
)
def test_list(self):
response = self.client.get('/zipcodes/')
expected_json = {u"objects": [
{
u"zip_code": u"14110000",
u"address": u"",
u"neighborhood": u"",
u"city": u"Bonfim Paulista (Ribeirão Preto)",
u"state": u"SP",
},
{
u"zip_code": u"14020260",
u"address": u"Avenida Presidente Vargas",
u"neighborhood": u"Jardim América",
u"city": u"Ribeirão Preto",
u"state": u"SP",
}
]}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), expected_json)
def test_list_limited(self):
response = self.client.get('/zipcodes/?limit=1')
expected_json = {u"objects": [
{
u"zip_code": u"14110000",
u"address": u"",
u"neighborhood": u"",
u"city": u"Bonfim Paulista (Ribeirão Preto)",
u"state": u"SP",
}
]}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), expected_json)
self.assertEqual(len(response.json()['objects']), 1)
def test_create(self):
response = self.client.post(
'/zipcodes/',
data=json.dumps({'zip_code': '14093010'}),
content_type='application/json'
)
new_element = ZipCode.objects.all()[0]
self.assertEqual(response.status_code, 201)
self.assertEqual(new_element.zip_code, '14093010')
self.assertEqual(new_element.address, 'Rua Zilda de Souza Rizzi')
self.assertEqual(new_element.neighborhood, 'Jardim Interlagos')
self.assertEqual(new_element.city, u'Ribeirão Preto')
self.assertEqual(new_element.state, 'SP')
def test_create_zip_code_without_address_and_neighborhood(self):
response = self.client.post(
'/zipcodes/',
data=json.dumps({'zip_code': '14150000'}),
content_type='application/json'
)
new_element = ZipCode.objects.all()[0]
self.assertEqual(response.status_code, 201)
self.assertEqual(new_element.zip_code, '14150000')
self.assertEqual(new_element.city, 'Serrana')
self.assertEqual(new_element.state, 'SP')
def test_create_zip_code_already_created(self):
response = self.client.post(
'/zipcodes/',
data=json.dumps({'zip_code': '14020260'}),
content_type='application/json'
)
error_msg = response.json()['error']
self.assertEqual(response.status_code, 400)
self.assertEqual(error_msg, 'Zip code already created')
def test_create_invalid_zip_code(self):
response = self.client.post(
'/zipcodes/',
data=json.dumps({'zip_code': '1402026'}),
content_type='application/json'
)
error_msg = response.json()['error']
self.assertEqual(response.status_code, 400)
self.assertEqual(error_msg, 'Incorrect zip code format')
def test_deleta_existed_zip_code(self):
response = self.client.delete('/zipcodes/14020260/')
self.assertEqual(response.status_code, 204)
def test_delete_non_existent_zip_code(self):
response = self.client.delete('/zipcodes/14093010/')
error_msg = response.json()['error']
self.assertEqual(response.status_code, 404)
self.assertEqual(error_msg, 'ZipCode matching query does not exist.')
def test_detail_ribeirao(self):
response = self.client.get('/zipcodes/14020260/')
expected_json = {
u"zip_code": u"14020260",
u"address": u"Avenida Presidente Vargas",
u"neighborhood": u"Jardim América",
u"city": u"Ribeirão Preto",
u"state": u"SP"
}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), expected_json)
def test_detail_bonfim(self):
response = self.client.get('/zipcodes/14110000/')
expected_json = {
u"zip_code": u"14110000",
u"address": u"",
u"neighborhood": u"",
u"city": u"Bonfim Paulista (Ribeirão Preto)",
u"state": u"SP"
}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), expected_json)
def test_detail_zip_code_invalid(self):
response = self.client.get('/zipcodes/1402026/')
error_msg = response.json()['error']
self.assertEqual(response.status_code, 404)
self.assertEqual(error_msg, 'ZipCode matching query does not exist.')
|
import pandas as pd
import os
import scipy.stats
import numpy as np
import json
import h5py
import tensorflow as tf
import chrombpnet.training.utils.argmanager as argmanager
import chrombpnet.training.utils.losses as losses
import chrombpnet.training.metrics as metrics
import chrombpnet.training.data_generators.initializers as initializers
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.models import load_model
from scipy import nanmean, nanstd
def write_predictions_h5py(output_prefix, profile, logcts, coords):
# open h5 file for writing predictions
output_h5_fname = "{}_predictions.h5".format(output_prefix)
h5_file = h5py.File(output_h5_fname, "w")
# create groups
coord_group = h5_file.create_group("coords")
pred_group = h5_file.create_group("predictions")
num_examples=len(coords)
coords_chrom_dset = [str(coords[i][0]) for i in range(num_examples)]
coords_center_dset = [int(coords[i][1]) for i in range(num_examples)]
coords_peak_dset = [int(coords[i][3]) for i in range(num_examples)]
dt = h5py.special_dtype(vlen=str)
# create the "coords" group datasets
coords_chrom_dset = coord_group.create_dataset(
"coords_chrom", data=np.array(coords_chrom_dset, dtype=dt),
dtype=dt, compression="gzip")
coords_start_dset = coord_group.create_dataset(
"coords_center", data=coords_center_dset, dtype=int, compression="gzip")
coords_end_dset = coord_group.create_dataset(
"coords_peak", data=coords_peak_dset, dtype=int, compression="gzip")
# create the "predictions" group datasets
profs_dset = pred_group.create_dataset(
"profs",
data=profile,
dtype=float, compression="gzip")
logcounts_dset = pred_group.create_dataset(
"logcounts", data=logcts,
dtype=float, compression="gzip")
# close hdf5 file
h5_file.close()
def load_model_wrapper(args):
# read .h5 model
custom_objects={"tf": tf, "multinomial_nll":losses.multinomial_nll}
get_custom_objects().update(custom_objects)
model=load_model(args.model_h5, compile=False)
print("got the model")
#model.summary()
return model
def softmax(x, temp=1):
norm_x = x - np.mean(x,axis=1, keepdims=True)
return np.exp(temp*norm_x)/np.sum(np.exp(temp*norm_x), axis=1, keepdims=True)
def predict_on_batch_wrapper(model,test_generator):
num_batches=len(test_generator)
profile_probs_predictions = []
true_counts = []
counts_sum_predictions = []
true_counts_sum = []
coordinates = []
for idx in range(num_batches):
if idx%100==0:
print(str(idx)+'/'+str(num_batches))
X,y,coords=test_generator[idx]
#get the model predictions
preds=model.predict_on_batch(X)
# get counts predictions
true_counts.extend(y[0])
profile_probs_predictions.extend(softmax(preds[0]))
# get profile predictions
true_counts_sum.extend(y[1][:,0])
counts_sum_predictions.extend(preds[1][:,0])
coordinates.extend(coords)
return np.array(true_counts), np.array(profile_probs_predictions), np.array(true_counts_sum), np.array(counts_sum_predictions), np.array(coordinates)
def main(args):
metrics_dictionary = {"counts_metrics":{}, "profile_metrics":{}}
# get model architecture to load - can load .hdf5 and .weights/.arch
model=load_model_wrapper(args)
test_generator = initializers.initialize_generators(args, mode="test", parameters=None, return_coords=True)
true_counts, profile_probs_predictions, true_counts_sum, counts_sum_predictions, coordinates = predict_on_batch_wrapper(model, test_generator)
# generate prediction on test set and store metrics
write_predictions_h5py(args.output_prefix, profile_probs_predictions, counts_sum_predictions, coordinates)
# store regions, their predictions and corresponding pointwise metrics
mnll_pw, mnll_norm, jsd_pw, jsd_norm, jsd_rnd, jsd_rnd_norm, mnll_rnd, mnll_rnd_norm = metrics.profile_metrics(true_counts,profile_probs_predictions)
# including both metrics
if args.peaks != "None" and args.nonpeaks != "None":
spearman_cor, pearson_cor, mse = metrics.counts_metrics(true_counts_sum, counts_sum_predictions,args.output_prefix+"_peaks_and_nonpeaks", "Both peaks and non peaks")
metrics_dictionary["counts_metrics"]["peaks_and_nonpeaks"] = {}
metrics_dictionary["counts_metrics"]["peaks_and_nonpeaks"]["spearmanr"] = spearman_cor
metrics_dictionary["counts_metrics"]["peaks_and_nonpeaks"]["pearsonr"] = pearson_cor
metrics_dictionary["counts_metrics"]["peaks_and_nonpeaks"]["mse"] = mse
metrics_dictionary["profile_metrics"]["peaks_and_nonpeaks"] = {}
metrics_dictionary["profile_metrics"]["peaks_and_nonpeaks"]["median_jsd"] = np.nanmedian(jsd_pw)
metrics_dictionary["profile_metrics"]["peaks_and_nonpeaks"]["median_norm_jsd"] = np.nanmedian(jsd_norm)
metrics.plot_histogram(jsd_pw, jsd_rnd, args.output_prefix+"_peaks_and_nonpeaks", "Both peaks and non peaks")
# including only nonpeak metrics
if args.nonpeaks != "None":
non_peaks_idx = coordinates[:,3] == '0'
spearman_cor, pearson_cor, mse = metrics.counts_metrics(true_counts_sum[non_peaks_idx], counts_sum_predictions[non_peaks_idx],args.output_prefix+"_only_nonpeaks", "Only non peaks")
metrics_dictionary["counts_metrics"]["nonpeaks"] = {}
metrics_dictionary["counts_metrics"]["nonpeaks"]["spearmanr"] = spearman_cor
metrics_dictionary["counts_metrics"]["nonpeaks"]["pearsonr"] = pearson_cor
metrics_dictionary["counts_metrics"]["nonpeaks"]["mse"] = mse
metrics_dictionary["profile_metrics"]["nonpeaks"] = {}
metrics_dictionary["profile_metrics"]["nonpeaks"]["median_jsd"] = np.nanmedian(jsd_pw[non_peaks_idx])
metrics_dictionary["profile_metrics"]["nonpeaks"]["median_norm_jsd"] = np.nanmedian(jsd_norm[non_peaks_idx])
metrics.plot_histogram(jsd_pw[non_peaks_idx], jsd_rnd[non_peaks_idx], args.output_prefix+"_only_nonpeaks", "Only non peaks")
# including only peak metrics
if args.peaks != "None":
peaks_idx = coordinates[:,3] == '1'
spearman_cor, pearson_cor, mse = metrics.counts_metrics(true_counts_sum[peaks_idx], counts_sum_predictions[peaks_idx],args.output_prefix+"_only_peaks", "Only peaks")
metrics_dictionary["counts_metrics"]["peaks"] = {}
metrics_dictionary["counts_metrics"]["peaks"]["spearmanr"] = spearman_cor
metrics_dictionary["counts_metrics"]["peaks"]["pearsonr"] = pearson_cor
metrics_dictionary["counts_metrics"]["peaks"]["mse"] = mse
metrics_dictionary["profile_metrics"]["peaks"] = {}
metrics_dictionary["profile_metrics"]["peaks"]["median_jsd"] = np.nanmedian(jsd_pw[peaks_idx])
metrics_dictionary["profile_metrics"]["peaks"]["median_norm_jsd"] = np.nanmedian(jsd_norm[peaks_idx])
metrics.plot_histogram(jsd_pw[peaks_idx], jsd_rnd[peaks_idx], args.output_prefix+"_only_peaks", "Only peaks")
#ofile = open(args.output_prefix+"_pearson_cor.txt","w")
#ofile.write(str(round(pearson_cor,2)))
#ofile.close()
#ofile = open(args.output_prefix+"_norm_jsd.txt","w")
#ofile.write(str(round(metrics_dictionary["profile_metrics"]["peaks"]["median_norm_jsd"],2)))
#ofile.close()
# store dictionary
with open(args.output_prefix+'_metrics.json', 'w') as fp:
json.dump(metrics_dictionary, fp, indent=4)
if __name__=="__main__":
# read arguments
args=argmanager.fetch_predict_args()
main(args)
|
import os
import sys
import json
import re
import requests
from flask import Flask, request
app = Flask(__name__)
from flaskext.mysql import MySQL
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'XXX'
app.config['MYSQL_DATABASE_PASSWORD'] = 'XXX'
app.config['MYSQL_DATABASE_DB'] = 'XXX'
app.config['MYSQL_DATABASE_HOST'] = 'XXX'
mysql.init_app(app)
def stripUserID(userID):
chars = "1234567890abcdefghijklmnopqrstuvwxyz.!()%,@[]"
newStr = ''
for c in userID:
if c.lower() in chars:
newStr += c
return newStr
def insertHomework(userID, toAdd):
d = getAllData(userID)
if not d:
return "Error, it wasn't added"
toAdd = stripUserID(toAdd)
if not toAdd:
return "Please enter a description"
currentExtra = d["extra"]
if currentExtra:
toAdd = currentExtra+"|"+toAdd
else:
toAdd += "|"
connection = mysql.get_db()
cursor = mysql.get_db().cursor()
query = """UPDATE users SET extra="{0}" where users.userID="{1}" ;""".format(toAdd, stripUserID(userID))
cursor.execute(query)
connection.commit()
return "Homework successfully added"
def removeHomework(userID, toRemoveNumber):
d = getAllData(userID)
try:
toRemoveNumber = int(toRemoveNumber.encode("ascii", "ignore"))
except:
return "Sorry, something went wrong"
if not d:
return "Error, it wasn't removed"
currentExtra = d["extra"]
dLi = currentExtra.split("|")
temp = str(dLi)
if len(dLi) == 0:
return "You've no homework left. Woop woop"
if toRemoveNumber-1>len(dLi) or toRemoveNumber-1<0:
return "Sorry, that number doesn't work"
if len(dLi)==1:
toAdd = ""
else:
del dLi[toRemoveNumber-1]
toAdd = "|".join([i for i in dLi])
connection = mysql.get_db()
cursor = mysql.get_db().cursor()
query = """UPDATE users SET extra="{0}" where users.userID="{1}" ;""".format(toAdd, stripUserID(userID))
cursor.execute(query)
connection.commit()
return "Successfully removed"
def formatHomework(extra):
if not extra:
return "No homework. Congrats"
dLi = extra.split("|")
newStr = "\n".join([str(dLi.index(i)+1)+ ": " + i for i in dLi])
return newStr+"\n"
def getAllData(userID):
try:
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * from users where userID='"+stripUserID(userID)+"';")
returned = cursor.fetchone()
except:
return []
if returned == None:
return []
data = {}
data["UserID"] = userID.encode("ascii", "ignore")
data["email"] = returned[1].encode("ascii", "ignore")
data["gAccess"] = returned[2].encode("ascii", "ignore")
data["timetable"] = returned[3].encode("ascii", "ignore")
data["extra"] = returned[4].encode("ascii", "ignore")
return data
def sendEmail(message):
import smtplib
server = smtplib.SMTP('XXX', 587)
server.starttls()
server.login("XXX", "XXX")
msg = message
server.sendmail("XXX", "XXX", msg)
server.quit()
@app.route('/', methods=['GET', 'POST'])
def welcome():
return "Welcome to Erasmus Technologies, making the world a better place one soul at a time."
@app.route('/listen', methods=['GET'])
def googleCallback():
log(request.args)
return "Heard", 200
@app.route('/webhook', methods=['GET'])
def verify():
log("Webhook called - verify GET")
# when the endpoint is registered as a webhook, it must echo back
# the 'hub.challenge' value it receives in the query arguments
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == "erasmus":
log("Tokens didn't match")
return "Verification token mismatch", 403
log("returning hub.challenge")
return request.args["hub.challenge"], 200
log("didn't have the args")
return "Hello world", 200
@app.route('/webhook', methods=['POST'])
def webhook():
data = request.get_json()
if data["object"] == "page":
for entry in data["entry"]:
for messaging_event in entry["messaging"]:
if messaging_event.get("message"): # someone sent us a message
sender_id = messaging_event["sender"]["id"] # the facebook ID of the person sending you the message
if sender_id == messaging_event["recipient"]["id"]:
break
recipient_id = messaging_event["recipient"]["id"] # the recipient's ID, which should be your page's facebook ID
message_text = messaging_event["message"]["text"] # the message's text
safeUserID = stripUserID(sender_id)
d = getAllData(safeUserID)
#handle user not existing
if d == []:
messageToSend = "Sorry, we don't have any data for you"
else:
if "help" in message_text.lower():
messageToSend = "Hi welcome to ErasmusBot.Enter 'show' to see all you work.\n Enter 'add' followed by homework to add it.\n Enter 'remove', followed by it's number to remove it\nEnter 'timetable' to see what you've got on today\nenter google to see task set on 'google'"
elif "timetable" in message_text.lower():
messageToSend = d["timetable"]
elif "google" in message_text.lower():
messageToSend = d["gAccess"]
elif "email" in message_text.lower():
messageToSend = d["email"]
elif "extra" in message_text.lower():
messageToSend = d["extra"]
elif "add" in message_text.lower():
new = message_text.lower().replace("add homework","")
messageToSend = insertHomework(safeUserID, new)
elif "show" in message_text.lower():
messageToSend = formatHomework(d["extra"])
elif "remove" in message_text.lower():
homeworkNum = re.sub("[^0-9]", "", message_text.lower())
messageToSend = removeHomework(safeUserID, homeworkNum)
else:
messageToSend = "Sorry, unrecognised command"
#Actually sending the message
send_message(sender_id, messageToSend)
if messaging_event.get("delivery"): # delivery confirmation
pass
if messaging_event.get("optin"): # optin confirmation
pass
if messaging_event.get("postback"): # user clicked/tapped "postback" button in earlier message
pass
return "ok", 200
def send_message(recipient_id, message_text):
params = {
"access_token": "XXX"
}
headers = {
"Content-Type": "application/json"
}
data = json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"text": message_text
}
})
r = requests.post("https://graph.facebook.com/v2.6/me/messages", params=params, headers=headers, data=data)
def log(message): # simple wrapper for logging to stdout on heroku
sendEmail(message)
if __name__ == '__main__':
app.run(debug=True)
|
from Interfaz.Window import Window
import os
#class starting the program
class Init():
def __init__(self):
Window()
def __readFile(self, path_file):
with open(path_file, encoding="utf-8") as f:
fileContents = f.read() # Get all the text from file.
# Return text to file contents
return fileContents
return ""
#method main for init program
if __name__ == "__main__":
Init()
#pass
|
from abc import abstractmethod
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import sklearn
from sklearn.svm import SVR
from sklearn.model_selection import KFold
import xgboost as xgb
import optuna
class BaseModelCV(object):
model_cls = None
def __init__(self, n_trials=300):
self.n_trials = n_trials
def fit(self, X, y):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
y = pd.DataFrame(y)
elif isinstance(X, pd.DataFrame):
X = X.reset_index(drop=True)
y = y.reset_index(drop=True)
self.X = X
self.y = y
study = optuna.create_study(direction='maximize')
study.optimize(self, n_trials=self.n_trials)
self.best_trial = study.best_trial
print()
print("Best score:", round(self.best_trial.value, 2))
print("Best params:", self.best_trial.params)
print()
self.best_model = self.model_cls(**self.best_trial.params)
self.best_model.fit(self.X, self.y)
def predict(self, X):
if isinstance(X, pd.Series):
X = pd.DataFrame(X.values.reshape(1, -1))
elif isinstance(X, np.ndarray):
X = pd.DataFrame(X)
return self.best_model.predict(X)
def score(self, X, y):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
y = pd.DataFrame(y)
return self.best_model.score(X, y)
def kfold_cv(self, model, splits=5):
scores = []
kf = KFold(n_splits=splits, shuffle=True)
for train_index, test_index in kf.split(self.X):
X_train, X_test = self.X.iloc[train_index], self.X.iloc[test_index]
y_train, y_test = self.y.iloc[train_index], self.y.iloc[test_index]
model.fit(X_train, y_train)
scores.append(model.score(X_test, y_test))
score = np.array(scores).mean()
return score
@abstractmethod
def __call__(self, trial):
raise NotImplementedError()
class SVRCV(BaseModelCV):
model_cls = SVR
def __call__(self, trial):
kernel = trial.suggest_categorical('kernel', ['rbf', 'linear'])
C = trial.suggest_loguniform('C', 1e-2, 1e2)
epsilon = trial.suggest_loguniform('epsilon', 1e-3, 1e1)
gamma = trial.suggest_loguniform('gamma', 1e-3, 1e3)
model = self.model_cls(kernel=kernel, C=C,
epsilon=epsilon, gamma=gamma)
score = self.kfold_cv(model)
return score
class XGBRCV(BaseModelCV):
model_cls = xgb.XGBRegressor
def __call__(self, trial):
booster = trial.suggest_categorical('booster', ['gbtree', 'dart'])
alpha = trial.suggest_loguniform('alpha', 1e-8, 1.0)
max_depth = trial.suggest_int('max_depth', 1, 9)
eta = trial.suggest_loguniform('eta', 1e-8, 1.0)
gamma = trial.suggest_loguniform('gamma', 1e-8, 1.0)
grow_policy = trial.suggest_categorical(
'grow_policy', ['depthwise', 'lossguide'])
if booster == 'gbtree':
model = self.model_cls(silent=1, booster=booster,
alpha=alpha, max_depth=max_depth, eta=eta,
gamma=gamma, grow_policy=grow_policy)
elif booster == 'dart':
sample_type = trial.suggest_categorical('sample_type',
['uniform', 'weighted'])
normalize_type = trial.suggest_categorical('normalize_type',
['tree', 'forest'])
rate_drop = trial.suggest_loguniform('rate_drop', 1e-8, 1.0)
skip_drop = trial.suggest_loguniform('skip_drop', 1e-8, 1.0)
model = self.model_cls(silent=1, booster=booster,
alpha=alpha, max_depth=max_depth, eta=eta,
gamma=gamma, grow_policy=grow_policy,
sample_type=sample_type,
normalize_type=normalize_type,
rate_drop=rate_drop, skip_drop=skip_drop)
score = self.kfold_cv(model)
return score
if __name__ == '__main__':
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('sample/boston.csv')
y = df['Price']
X = df.drop(['Price'], 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
model = SVRCV(n_trials=300)
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
print(model.predict(X.iloc[1]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.