text stringlengths 8 6.05M |
|---|
import tkinter as tk
import random
self=tk.Tk()
self.geometry("200x40")
self.title("Szamologep")
#variables
op='none'
temp=[]
op=[]
def Plus():
op.append('+')
i=int(str(num.get()))
temp.append(i)
print(op)
print (temp)
num.delete(0,tk.END)
def Minus():
op.append('-')
i=int(str(num.get()))
temp.append(i)
print(op)
print (temp)
num.delete(0,tk.END)
def Multi():
op.append('*')
i=int(str(num.get()))
temp.append(i)
print(op)
print (temp)
num.delete(0,tk.END)
def Divide():
op.append('/')
i=int(str(num.get()))
temp.append(i)
print(op)
print (temp)
num.delete(0,tk.END)
def finish():
count=0
c=0
i=int(str(num.get()))
temp.append(i)
op.append('=')
print(op)
print (temp)
while(op[count]!='='):
if(op[count]=='+'):
c+=temp[count]
elif(op[count]=='-'):
c-=temp[count]
elif(op[count]=='*'):
c*=temp[count]
elif(op[count]=='='):
num.insert(0,str(c))
count+=1
num.insert(0,str(c))
count=0
op.clear()
temp.clear()
c=0
#init widgets
num=tk.Entry(width=50)
plus=tk.Button(self, text="plus", padx=80, pady=50, command=Plus)
minus=tk.Button(self,text="-",padx=80, pady=50,command=Minus)
multi=tk.Button(self,text="*",padx=80, pady=50,command=Multi)
divide=tk.Button(self,text="/",padx=88, pady=50,command=Divide)
equate=tk.Button(self,text="=",padx=80, pady=50,command=finish)
#design
num.grid(row=0,column=0,columnspan=3)
plus.grid(row=1,column=0)
minus.grid(row=1,column=1)
multi.grid(row=1,column=2)
divide.grid(row=2,column=0)
equate.grid(row=2,column=1)
self.update()
self.mainloop() |
import sys
from rosalind_utility import parse_fasta
from lexf import alpha_combs
def kmer_comp(string, k = 4):
all_kmers = alpha_combs(["A", "C", "G", "T"], k, )
result = {}
for kmer in all_kmers:
result[kmer] = 0
for i in range(len(string) - k + 1):
kmer = string[i:i + k]
result[kmer] += 1
return result
if __name__ == "__main__":
'''
Given: A DNA string s in FASTA format (having length at most 100 kbp).
Return: The 4-mer composition of s.
'''
input_lines = sys.stdin.read().splitlines()
DNA_string = list(parse_fasta(input_lines).values())[0]
composition = list(kmer_comp(DNA_string).values())
print(' '.join(map(str, composition)))
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Article(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='publisher', null=False)
publication_date = models.DateTimeField(auto_now_add=True, null=False)
headline = models.CharField(max_length=100, null=False)
content = models.TextField(null=False)
published = models.BooleanField(default=False,)
def __str__(self):
return self.headline
class Like(models.Model):
liked_article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='liked_article', null=False)
liked_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='liked_user', null=False)
like = models.BooleanField(default=False)
class Comment(models.Model):
commented_article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='commented_article',
null=False)
commented_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='commented_user', null=False)
comment = models.CharField(max_length=255, null=False)
commented_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.comment
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
import random
import tractor.sdss as sdss
import astrometry.sdss as asdss
import astrometry.util.fits as aufits
from CelestePy.util.data import make_fits_images
from CelestePy.util.data.get_data import photoobj_to_celestepy_src
from CelestePy.celeste import FitsImage, gen_src_image_with_fluxes
from CelestePy.celeste_src import SrcParams
ITERATIONS = 10000
STAR_FLUXES_FILE = 'star_fluxes_mog.pkl'
GAL_FLUXES_FILE = 'gal_fluxes_mog.pkl'
GAL_SHAPE_FILE = 'gal_shape_mog.pkl'
BANDS = ['u', 'g', 'r', 'i', 'z']
STRIPE_82_DATA_DIR = "../../data/stripe_82_dataset/"
import sys, os
sys.path.append(STRIPE_82_DATA_DIR)
from load_stripe82_square import df_from_fits, create_matched_dataset
TEST = True
def propose_from_gal_prior(star, fluxes_prior, shape_prior,
test=False):
re_prior, ab_prior, phi_prior = shape_prior
fluxes = fluxes_prior.sample()[0]
# do reverse transformation
re = re_prior.sample()[0,0]
ab = ab_prior.sample()[0,0]
phi = phi_prior.sample()[0,0]
# propose star-like shape parameters
if test:
phi = 0.01
rho = 0.01
sigma = 0.01
else:
sigma = np.exp(re)
rho = np.exp(ab) / (1 + np.exp(ab))
phi = np.pi * np.exp(phi) / (1 + np.exp(phi))
return SrcParams(star.params.u,
a = 1,
v = star.params.u,
theta = 0.5,
phi = phi,
sigma = sigma,
rho = rho,
fluxes = np.exp(fluxes))
def propose_from_star_prior(gal, prior, test=False):
fluxes = prior.sample()[0]
return SrcParams(gal.params.u,
a = 0,
fluxes = np.exp(fluxes))
def calc_src_likelihood(src, images, xlims=None, ylims=None):
ll = 0
xlims_new = []
ylims_new = []
for idx,image in enumerate(images):
if xlims:
patch, ylim, xlim = src.compute_model_patch(image,
xlim=xlims[idx],
ylim=ylims[idx])
else:
patch, ylim, xlim = src.compute_model_patch(image)
xlims_new.append(xlim)
ylims_new.append(ylim)
dpatch = image.nelec[ylim[0]:ylim[1], xlim[0]:xlim[1]]
mpatch = patch + image.epsilon
ll += np.sum(dpatch * np.log(mpatch) - mpatch)
return ll, ylims_new, xlims_new
def calc_transition_probs(model_star, model_galaxy,
star_prior, gal_fluxes_prior, gal_shape_prior,
images,
to_star=True,
test=False):
mult = -1
if to_star:
mult = 1
# use the same patch limits for likelihoods
star_ell, ylims, xlims = calc_src_likelihood(model_star, images)
gal_ell, _, _ = calc_src_likelihood(model_galaxy, images, xlims, ylims)
star = model_star.params
galaxy = model_galaxy.params
prior = star_prior.score([star.fluxes])[0] - gal_fluxes_prior.score([galaxy.fluxes])[0]
sigma_prior = gal_shape_prior[0].score([np.log(galaxy.sigma)])[0]
rho_prior = gal_shape_prior[1].score([np.log(galaxy.rho / (1 - galaxy.rho))])[0]
phi_prior = gal_shape_prior[2].score([np.log(galaxy.phi / (np.pi - galaxy.phi))])[0]
shape_prior = sigma_prior + rho_prior + phi_prior
if not test:
prior -= shape_prior
if mult * (star_ell - gal_ell + prior) > 0:
return 1
else:
return np.exp(mult * (star_ell - gal_ell + prior))
if __name__=="__main__":
#star_file = open(STAR_FLUXES_FILE, 'rb')
#gal_shape_file = open(GAL_SHAPE_FILE, 'rb')
#star_prior = pickle.load(star_file)
#gal_shape_prior = pickle.load(gal_shape_file)
#if TEST:
# gal_fluxes_prior = star_prior
#else:
# gal_fluxes_file = open(GAL_FLUXES_FILE, 'rb')
# gal_fluxes_prior = pickle.load(gal_fluxes_file)
# gal_fluxes_file.close()
#star_file.close()
#gal_shape_file.close()
# extract a single source
test_primary_fn = os.path.join(STRIPE_82_DATA_DIR, "square_4263_4.fit")
test_coadd_fn = os.path.join(STRIPE_82_DATA_DIR, "square_106_4.fit")
primary_df = df_from_fits(test_primary_fn)
coadd_df = df_from_fits(test_coadd_fn)
# create a matched dataset - coadd source (ground truth) to
# primary sources (baseline)
primary_matched, coadd_matched, dists = create_matched_dataset(primary_df, coadd_df)
#################################################
# look at the breakdown by field
#################################################
print "Stripe 82 dataset statistics:"
field_info = primary_matched[['run', 'camcol', 'field']].drop_duplicates()
for field in np.sort(field_info.field):
primary_field_df = primary_matched[primary_matched.field == field]
num_stars = np.sum(primary_field_df.type==6)
num_gals = np.sum(primary_field_df.type==3)
########################################################
# subselect stripe field 672 - get existing sources
########################################################
run, camcol, field = 4263, 4, 367
idx = np.where(primary_matched.field == field)[0]
primary_field_df = primary_matched.iloc[idx]
coadd_field_df = coadd_matched.iloc[idx]
imgfits = make_fits_images(run, camcol, field)
# list of images, list of celeste sources
imgs = [imgfits[b] for b in BANDS]
import CelestePy.model_sources as models
reload(models)
model = models.CelesteGMMPrior()
model.initialize_sources(photoobj_df = primary_field_df)
model.add_field(img_dict = imgfits)
bsrcs, bidx = model.get_brightest(object_type='galaxy', num_srcs = 2, return_idx=True)
model.srcs = bsrcs
# create a random galaxy
src = bsrcs[1]
#src.params.fluxes = np.exp(gal_fluxes_prior.sample()[0])
src.params.rho = 0.01
src.params.phi = 0.01
to_star_propose = 0
to_star_trans = 0
to_gal_propose = 0
to_gal_trans = 0
# ....
src.resample_type()
assert False
for i in range(ITERATIONS):
print "iteration", i
#fig, axarr = plt.subplots(2, 3)
#model_src.plot(imgs[2], *axarr[0])
if src.params.a == 1:
to_star_propose += 1
star = propose_from_star_prior(src, star_prior, test=TEST)
star = models.Source(star, model)
prob = calc_transition_probs(star, src,
star_prior, gal_fluxes_prior, gal_shape_prior,
imgs, test=TEST)
print "acceptance prob to star", prob
#new_model_src.plot(imgs[2], *axarr[1])
if random.random() < prob:
src = star
to_star_trans += 1
print "transition to star!"
elif src.params.a == 0:
to_gal_propose += 1
gal = propose_from_gal_prior(src, gal_fluxes_prior, gal_shape_prior, test=TEST)
gal = models.Source(gal, model)
prob = calc_transition_probs(src, gal,
star_prior, gal_fluxes_prior, gal_shape_prior,
imgs, to_star=False, test=True)
print "acceptance prob to galaxy", prob
#new_model_src.plot(imgs[0], *axarr[1])
if random.random() < prob:
src = gal
to_gal_trans += 1
print "transition to galaxy!"
else:
break
print "to-galaxy proposals: %d, transitions: %d" % (to_gal_propose, to_gal_trans)
print "to-star proposals: %d, transitions: %d" % (to_star_propose, to_star_trans)
|
import tensorflow as tf
import numpy as np
class classifier:
def __init__(self,
learning_rate,
hidden_size,
batch_size,
max_time,
embeddings,
global_step):
self.batch_size = batch_size
self.learning_rate = tf.train.exponential_decay(
learning_rate, global_step, 500, 0.8, staircase=True)
self.hidden_size = hidden_size
self.batch_size = batch_size
self.max_time = max_time
self.embeddings = tf.constant(embeddings, name = "emb")
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.global_step = global_step
tf.summary.scalar('Learning Rate', self.learning_rate)
self.make()
self.saver = tf.train.Saver(max_to_keep = 200)
def make(self):
self.sequence_length = tf.placeholder(tf.float32, shape = (self.batch_size))
self.inputs = tf.placeholder(tf.int32, shape = (self.batch_size, self.max_time))
self.embed = tf.nn.embedding_lookup(self.embeddings, self.inputs)
self.targets = tf.placeholder(tf.int64, shape = (self.batch_size,2))
self.keep_prob = tf.placeholder(tf.float32, shape = None)
cell = tf.nn.rnn_cell.LSTMCell(
num_units = self.hidden_size,
use_peepholes=True)
dropout_cell = tf.contrib.rnn.DropoutWrapper(
cell = cell,
output_keep_prob = self.keep_prob
)
self.output, state = tf.nn.dynamic_rnn(
cell = dropout_cell,
inputs = self.embed,
sequence_length = self.sequence_length,
initial_state = cell.zero_state(self.batch_size, tf.float32))
self.sum = tf.reduce_sum(self.output, axis = 1)
self.mean = tf.divide(self.sum, tf.expand_dims(self.sequence_length,1))
self.logits = tf.contrib.layers.fully_connected(self.mean,2,activation_fn = None)
#self.logits = self.mean
self.probability = tf.nn.softmax(self.logits)
self.decision = tf.argmax(self.probability,axis=1)
self.actual = tf.argmax(self.targets,axis=1)
self.probe = self.decision
self.metric_accuracy,self.update_accuracy = \
tf.metrics.accuracy(self.actual,self.decision)
self.xent = tf.nn.softmax_cross_entropy_with_logits(
labels = self.targets,
logits = self.logits)
self.loss = tf.reduce_mean(self.xent)
# warning: changed logits to probability - may be numerically unstable
self.pos_grad = tf.gradients(self.probability[0][0], self.embed)
self.neg_grad = tf.gradients(self.probability[0][1], self.embed)
self.logit_grad = tf.gradients(self.logits[0][0], self.embed)
self.updates = self.optimizer.minimize(self.loss, global_step = self.global_step)
tf.summary.scalar('Metric Accuracy', self.update_accuracy)
tf.summary.scalar('Loss', self.loss)
self.merged = tf.summary.merge_all()
def infer_dpg(self,sess,rv):
decision, probability, grad = \
sess.run([self.decision,self.probability,self.pos_grad],
feed_dict = \
{self.inputs:rv.index_vector,
self.targets:rv.targets,
self.sequence_length:[rv.length],
self.keep_prob:1.0})
return decision, probability, grad
def infer_rep_dpg(self,sess,rv,word_index):
index_matrix = np.tile(rv.index_vector,(rv.length,1) )
np.fill_diagonal(index_matrix,word_index)
target_matrix = np.tile(rv.targets,(rv.length,1))
decision, probability = \
sess.run([self.decision,self.probability],
feed_dict = \
{self.inputs:index_matrix,
self.targets:target_matrix,
self.sequence_length:[rv.length]*rv.length,
self.keep_prob:1.0})
return decision, probability
def infer_batched_prob(self,sess,rv,word_index,per_batch,top_idx):
num_top = self.batch_size//per_batch
index_matrix = np.tile(rv.index_vector,(self.batch_size,1))
target_matrix = np.tile(rv.targets,(self.batch_size,1))
n = 0
for idx in top_idx:
for i in range(per_batch):
index_matrix[n,idx] = word_index + i
n = n + 1
decision, probability, grad = \
sess.run([self.decision,self.probability,self.pos_grad],
feed_dict = \
{self.inputs:index_matrix,
self.targets:target_matrix,
self.sequence_length:[rv.length]*self.batch_size,
self.keep_prob:1.0})
return decision, probability, grad
# inserts all words at index 'insert_location'
def infer_insert(self,sess,rv,insert_location,divs,top_k):
per = rv.length // divs
end = per*divs
new = per+1
index_matrix = np.zeros((self.batch_size,new),'int')
wim = np.reshape(rv.index_vector[0,0:end],(per,divs),'F')
wim = np.insert(wim,insert_location,0,axis=0)
wim = np.reshape(wim,(1,end+divs),'F')
target_matrix = np.tile(rv.targets,(self.batch_size,1))
for i in range(divs):
start = i*top_k; end = (i+1)*top_k
index_matrix[start:end,:] = np.tile(wim[0,new*i:new*(i+1)],(top_k,1))
#index_matrix = np.tile(wim,(self.batch_size//divs,1))
index_matrix[:,insert_location] = \
np.arange(self.batch_size) % (self.batch_size//divs)
d,p,g = sess.run( [self.decision, self.probability, self.pos_grad],
feed_dict = \
{
self.inputs: index_matrix,
self.targets: target_matrix,
self.sequence_length: [new] * self.batch_size,
self.keep_prob: 1.0
})
return d,p,g,index_matrix
def infer_swap(self,sess,rv,swap_location,divs,top_k):
per = rv.length // divs
end = per*divs
index_matrix = np.zeros((self.batch_size,per),'int')
wim = np.copy(np.reshape(rv.index_vector[0,0:end],(per,divs),'F'))
wim[swap_location,:] = 0
wim = np.reshape(wim,(1,end),'F')
target_matrix = np.tile(rv.targets,(self.batch_size,1))
replacement_vector = np.random.choice(np.arange(10000),size=self.batch_size)
for i in range(divs):
start = i*top_k; end = (i+1)*top_k
index_matrix[start:end,:] = np.tile(wim[0,per*i:per*(i+1)],(top_k,1))
index_matrix[:,swap_location] = replacement_vector
#np.arange(self.batch_size) % (self.batch_size//divs)
d,p,g = sess.run( [self.decision, self.probability, self.pos_grad],
feed_dict = \
{
self.inputs: index_matrix,
self.targets: target_matrix,
self.sequence_length: [per] * self.batch_size,
self.keep_prob: 1.0
})
return d,p,g,index_matrix,replacement_vector
def infer_window(self,sess,rv,word_index,window_size):
w = window_size
L = rv.length
n = word_index
n1 = n - w//2; n2 = n1 + w
if n1 >= 0 and n2 <= L:
i1 = n1; i2 = n2
elif n1 < 0 and n2 <= L:
i1 = 0; i2 = w
elif n1 >=0 and n2 > L:
i1 = L-w; i2 = L
else:
i1 = 0; i2 = rv.length
index_matrix = np.tile(rv.index_vector[0,i1:i2],(10000,1))
index_matrix[:,word_index-i1] = np.arange(10000)
target_matrix = np.tile(rv.targets,(self.batch_size,1))
d,p,g = sess.run( [self.decision, self.probability, self.pos_grad],
feed_dict = \
{
self.inputs: index_matrix,
self.targets: target_matrix,
self.sequence_length: [w] * self.batch_size,
self.keep_prob: 1.0
})
return d,p,g
def infer_multi(self,sess,rv,ii,K):
# K is the list of source word indices to be replaced
# batch size is fixed at 10,000 with number of
# destination words = 10,000 / len(K)
N = self.batch_size // K.size
index_matrix = np.tile(rv.index_vector[0,:],(self.batch_size,1))
c = 0
for k in list(K):
index_matrix[c*N:(c+1)*N,k] = ii[np.arange(N),k]
c = c + 1
target_matrix = np.tile(rv.targets,(self.batch_size,1))
d,p,g = sess.run( [self.decision, self.probability, self.pos_grad],
feed_dict = \
{
self.inputs: index_matrix,
self.targets: target_matrix,
self.sequence_length: [rv.length] * self.batch_size,
self.keep_prob: 1.0
})
return d,p,g
|
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import signal
import time
from multiprocessing import Process
from multiprocessing import Queue
from src.daemon import Daemon
from src.daemon import do_command
from src.process_pool import ProcessPool
from src.base_handler import BaseHandler
pid_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample.pid")
log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample.log")
class SampleDaemon(Daemon):
def __init__(self):
super(SampleDaemon, self).__init__(pid_path, stdout=log_file)
self.process_pool = ProcessPool()
def sigterm(self, signum, frame):
self.terminate_process()
def terminate_process(self):
self.process_pool.terminate_process()
def add_handler(self, handler, process_count, **kwargs):
self.process_pool.add_handler(handler, process_count, **kwargs)
def run(self):
signal.signal(signal.SIGTERM, self.sigterm)
self.process_pool.create_process()
self.process_pool.start_process()
self.process_pool.join_process()
class SampleHandler1(BaseHandler):
def do_work(self):
while not self.is_term:
print("I am SampleHandler1 - pid:", os.getpid())
time.sleep(5)
class SampleHandler2(BaseHandler):
def do_work(self):
while not self.is_term:
print("I am SampleHandler2 - pid", os.getpid())
time.sleep(5)
if __name__ == '__main__':
daemon = SampleDaemon()
daemon.add_handler(SampleHandler1, 1)
daemon.add_handler(SampleHandler2, 2)
operation = do_command()
if operation == 'start':
daemon.start()
elif operation == 'restart':
daemon.restart()
elif operation == 'stop':
daemon.stop()
elif operation == 'status':
pid = daemon.get_pid()
if not pid:
print("Daemon is not running;)")
else:
print("Daemon is running (PID: %d)" % pid)
else:
print("Invaild command")
sys.exit(0)
|
from som.interp_type import is_ast_interpreter
from som.primitives.primitives import Primitives
from som.vm.globals import trueObject, falseObject, nilObject
from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive, TernaryPrimitive
if is_ast_interpreter():
from som.vmobjects.block_ast import AstBlock as _Block
else:
from som.vmobjects.block_bc import BcBlock as _Block
def _not(_rcvr):
return trueObject
def _and(_rcvr, _arg):
return falseObject
def _or_and_if_false(_rcvr, arg):
if isinstance(arg, _Block):
block_method = arg.get_method()
return block_method.invoke_1(arg)
return arg
def _if_true(_rcvr, _arg):
return nilObject
def _if_true_if_false(_rcvr, _true_block, false_block):
if isinstance(false_block, _Block):
block_method = false_block.get_method()
return block_method.invoke_1(false_block)
return false_block
class FalsePrimitivesBase(Primitives):
def install_primitives(self):
self._install_instance_primitive(UnaryPrimitive("not", _not))
self._install_instance_primitive(BinaryPrimitive("and:", _and))
self._install_instance_primitive(BinaryPrimitive("&&", _and))
self._install_instance_primitive(BinaryPrimitive("or:", _or_and_if_false))
self._install_instance_primitive(BinaryPrimitive("||", _or_and_if_false))
self._install_instance_primitive(BinaryPrimitive("ifFalse:", _or_and_if_false))
self._install_instance_primitive(
TernaryPrimitive("ifTrue:ifFalse:", _if_true_if_false)
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
``__call__`` ๅฎไนไบ obj()็่กไธบใ
ไนๅฐฑๆฏ::
>>> myclass = MyClass()
>>> myclass(*args, *kwargs) # call myclass.__call__(*args, *kwargs) method
...
"""
class FallingDistanceCalculator(object):
"""distance = 0.5 * g * t * t calculator
"""
def __init__(self, g):
self.g = g
def __call__(self, t):
return (self.g * t ** 2) / 2
if __name__ == "__main__":
g = 9.8
calculator = FallingDistanceCalculator(g)
seconds = 3.0
dist = calculator(seconds)
assert abs(dist - g * seconds ** 2 / 2) <= 0.001
|
# File_name: lambda_function.py
# Purpose: Lambda function to stop all aws services that cost money in dev and test
# Author: Sรธren Wandrup-Bendixen
# Email: soren.wandrup-Bendixen@cybercom.com
# Created: 2019-07-01
# Called from CloudWatch event rule scheduled : cron( 0 18,19 ? * * * )
# Total time to run per execution is 10 minutes. And is set to run twice a day.
# To be developed
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/checklistforunwantedcharges.html
# stop_fargate
# Elastic Beanstalk is designed to ensure that all the resources that you need are running, which means that it automatically relaunches any services that you stop. To avoid this, you must terminate your Elastic Beanstalk environment before you terminate resources that Elastic Beanstalk has created
# Services Started in AWS OpsWorks
# If you use the AWS OpsWorks environment to create AWS resources, you must use AWS OpsWorks to terminate those resources or AWS OpsWorks restarts them. For example, if you use AWS OpsWorks to create an Amazon EC2 instance, but then terminate it by using the Amazon EC2 console, the AWS OpsWorks auto healing feature categorizes the instance as failed and restarts it. For more information, see AWS OpsWorks User Guide.
# stop_cloudfront (stop_stack_set_operation)
# stop_alexaforbusiness
# stop_llearning
# make snapshot of elastic search before delete
# stop_support root account, report if not free
import datetime
print ('Start time: ' + str(datetime.datetime.now()))
import json
import simple_notification
import traceback
import get_list_of_possible_resources
import all_region_names
import stop_autoscaling
import stop_elb
import stop_ecs
import stop_eks
import stop_ec2
import stop_nat_gateway
import stop_sagemaker
import stop_robomaker
import stop_rds
import stop_emr
import stop_dax
import stop_kinesis
import stop_kinesisanalytics
import stop_elasticache
import stop_glue
import stop_elastisearch
import stop_dms
import stop_redshift
import stop_neptune
import stop_batch
import stop_personalize
import stop_shield
import stop_lightsail
import stop_sdb
import stop_dynamodb
import stop_datapipeline
import stop_qldb
import stop_vpc_endpoint
import stop_elastic_ip
import stop_secrets_manager
import stop_cloudwatch
def lambda_handler(event, context):
RunningInstances = []
instanceList = ''
try:
# only in play when a list of clients is wanted.
#get_list_of_possible_resources.fail_with_list('?')
region_names = all_region_names.get_list('ec2',event['region_set'])
# region_names = ['eu-west-1'] # for simple one region testing; Ireland
# region_names = ['us-east-1'] # for simple one region testing; N. Virginia
# region_names = ['us-west-2'] # for simple one region testing; Oregon
# region_names = ['eu-north-1'] # for simple one region testing; Stockholm
# region_names = ['me-south-1'] # for simple one region testing; Bahrain
# region_names = ['ap-east-1'] # for simple one region testing; Hongkong
for region_name_ in region_names:
# RunningInstances.append( str(event['region_set']) + '#' + region_name_ + '#' )
print (region_name_ + ' time: ' + str(datetime.datetime.now()))
stop_dynamodb.change_billing_mode('dynamodb', region_name_, RunningInstances)
# You will still be charged for inactive pipelines. Not a lot but some
#stop_datapipeline.inactivate_pipelines('datapipeline', region_name_, RunningInstances)
stop_datapipeline.delete_pipelines('datapipeline', region_name_, RunningInstances)
stop_qldb.delete_ledgers('qldb', region_name_, RunningInstances)
stop_autoscaling.suspend_processes('autoscaling', region_name_, RunningInstances)
stop_batch.disable_job_queues('batch', region_name_, RunningInstances)
stop_emr.stop_clusters('emr', region_name_, RunningInstances) # Stop EMR before ec2's otherwise the ec2 of emr will be terminated individually
stop_elb.delete_instances('elb', region_name_, RunningInstances) # Delete load balancers
stop_elb.delete_instances('elbv2', region_name_, RunningInstances) # Delete load balancers
stop_ecs.stop_instances('ecs', region_name_, RunningInstances) # Stop: Amazon Elastic Container Service (ECS)
stop_eks.delete_clusters('eks', region_name_, RunningInstances) # Stop: Amazon Elastic Container Service for Kubernetes CreateOperation
stop_ec2.stop_instances('ec2', region_name_, RunningInstances)
stop_nat_gateway.delete_ec2_nat_gateways('ec2', region_name_, RunningInstances) # Stop: Amazon Elastic Compute Cloud NatGateway
stop_sagemaker.stop_jobs('sagemaker', region_name_, RunningInstances)
stop_robomaker.stop_jobs('robomaker', region_name_, RunningInstances)
stop_lightsail.stop_instances('lightsail', region_name_, RunningInstances)
stop_lightsail.stop_relational_databases('lightsail', region_name_, RunningInstances)
stop_lightsail.autostart_relational_databases('lightsail', region_name_, RunningInstances)
stop_rds.stop_instances('rds', region_name_, RunningInstances)
stop_rds.autostart_instances('rds', region_name_, RunningInstances)
stop_rds.stop_clusters('rds', region_name_, RunningInstances)
stop_rds.autostart_clusters('rds', region_name_, RunningInstances)
stop_rds.stop_clusters('docdb', region_name_, RunningInstances) # stop docdb - same logic as rds cluster
stop_sdb.delete_domains('sdb', region_name_, RunningInstances) # stops simpleDb (deletes doains) started when performing debug of EMR!
stop_dax.delete_clusters('dax', region_name_, RunningInstances)
stop_kinesis.delete_streams('kinesis', region_name_, RunningInstances)
stop_kinesisanalytics.stop_applications('kinesisanalytics', region_name_, RunningInstances)
stop_elasticache.delete_clusters('elasticache', region_name_, RunningInstances)
stop_glue.stop_jobs('glue', region_name_, RunningInstances)
stop_elastisearch.delete_domains('es', region_name_, RunningInstances) # ElasticSearch
stop_dms.delete_instances('dms', region_name_, RunningInstances) # Database Migration Service
# stop_redshift.change_to_smallest('redshift', region_name_, RunningInstances) # As I see it either I have to delete the cluster or turn it into a single-node cluster. Cant just stop it.
stop_redshift.delete_clusters('redshift', region_name_, RunningInstances) # As I see it either I have to delete the cluster or turn it into a single-node cluster. Cant just stop it.
stop_neptune.delete_clusters('neptune', region_name_, RunningInstances)
stop_neptune.delete_instances('neptune', region_name_, RunningInstances)
stop_personalize.delete_campaigns('personalize', region_name_, RunningInstances)
stop_vpc_endpoint.delete_vpc_endpoints('ec2', region_name_, RunningInstances)
# Elastiv ip's that are not in use cost 0.01$/hour
stop_elastic_ip.release_inactive_elastic_ips('ec2', region_name_, RunningInstances)
# A secret cost 0.4 $/months
stop_secrets_manager.delete_secrets('secretsmanager', region_name_, RunningInstances)
#stop_cloudwatch.delete_alarms('cloudwatch', region_name_, RunningInstances)
# Global services
stop_shield.delete_advanced('shield', RunningInstances) # WAF shield
if len(RunningInstances) > 0:
instanceList = json.dumps(RunningInstances)
simple_notification.send_info(instanceList)
statusCode = 200
except Exception as exception:
if len(RunningInstances) > 0:
instanceList = json.dumps(RunningInstances)
simple_notification.send_info(instanceList + ' - exception ' + traceback.format_exc() )
#raise exception
statusCode = 404
print ('End time: ' + str(datetime.datetime.now()))
return {
"statusCode": statusCode,
"body": instanceList
}
|
A = list (map (int, input ().split()))
k = int (input ())
mins = sum (A [0:k])
S = mins
for i in range (0, len (A)):
#print (S)
S = S + A [(i + k) % len (A)] - A [i % len (A)]
mins = min (mins, S)
print (mins) |
#
# Luca Soldaini
# ls988@georgetown.edu
# Platform: OS X
# Language/Environment: python 2.7.8
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#
# built-in modules
import math
def polynomial_kernel(x, z, d=2, c=0.0):
""" Polynomial function kernel
args:
x (iteratable object): first operand
z (iteratable object): second operand
d (int): degree of polynomial
c (float): slack for polynomial
return:
v (float): value of the RFB fuction
"""
v = (sum(xi*zi for xi, zi in zip(x, z)) + c) ** d
return v
def rbf_kernel(x, z, sigma=1.0):
""" Gaussian Radial basis function kernel
args:
x (iteratable object): first operand
z (iteratable object): second operand
sigma (float): sigma value for gaussian fuction
return:
v (float): value of the RFB fuction
"""
dsq = sum((xi - zi) ** 2 for xi, zi in zip(x, z))
v = math.exp((- dsq)/ 2 * (sigma ** 2))
return v
|
from __future__ import absolute_import
import os
import tarfile
import zipfile
from distutils.version import StrictVersion # pylint: disable=import-error
import pip
import pkg_resources
import requests
from raincoat.constants import FILE_NOT_FOUND
from raincoat import github_utils
def download_package(package, version, download_dir):
full_package = "{}=={}".format(package, version)
exit_code = pip.main(
["download", "--no-deps", "-d", download_dir, full_package])
if exit_code != 0:
raise ValueError("Error while fetching {} via pip.".format(
full_package))
def open_in_wheel(wheel, pathes):
with zipfile.ZipFile(wheel, 'r') as zf:
sources = {}
for path in pathes:
try:
source = zf.open(path, 'r').read().decode("UTF-8")
except KeyError:
source = FILE_NOT_FOUND
sources[path] = source
return sources
def open_in_tarball(tarball, pathes):
sources = {}
with tarfile.open(tarball, 'r:gz') as tf:
for path in pathes:
top_level_dir = tf.next().name
try:
handler = tf.extractfile(os.path.join(top_level_dir, path))
source = handler.read().decode("UTF-8")
except KeyError:
source = FILE_NOT_FOUND
sources[path] = source
return sources
def open_downloaded(download_path, pathes):
archive_name, = os.listdir(download_path)
archive_path = os.path.join(download_path, archive_name)
ext = os.path.splitext(archive_name)[1]
if ext == ".gz":
return open_in_tarball(archive_path, pathes)
elif ext == ".whl":
return open_in_wheel(archive_path, pathes)
else:
raise NotImplementedError("Unrecognize archive format {}".format(ext))
def open_installed(installed_path, pathes):
sources = {}
for path in pathes:
try:
source = open(os.path.join(installed_path, path)).read()
except IOError:
source = FILE_NOT_FOUND
sources[path] = source
return sources
def get_current_or_latest_version(package):
try:
return True, pkg_resources.get_distribution(package).version
except pkg_resources.DistributionNotFound:
pass
pypi_url = "http://pypi.python.org/pypi/{}/json".format(package)
releases = requests.get(pypi_url).json()["releases"]
versions = []
for version in releases:
try:
parsed_version = StrictVersion(version)
except ValueError:
continue
if parsed_version.prerelease:
continue
versions.append((parsed_version, version))
return False, next(iter(sorted(versions, reverse=True)))[1]
def get_current_path(package):
return pkg_resources.get_distribution(package).location
def get_branch_commit(repo, branch):
# This may fail, but so far, I don't really know how.
url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
with github_utils.get_session() as session:
response = session.get(url)
response.raise_for_status()
return response.json()["commit"]["sha"]
def download_files_from_repo(repo, commit, files):
result = {}
template = "https://raw.githubusercontent.com/{}/{}/{}"
with github_utils.get_session() as session:
for filename in files:
url = template.format(repo, commit, filename)
response = session.get(url)
if response.status_code != 200:
content = FILE_NOT_FOUND
else:
content = response.text
result[filename] = content
return result
|
from typing import List
from typing import Tuple
import numpy as np
import tensorflow as tf
def get_dataset() -> Tuple[np.ndarray, np.ndarray]:
"""XOR dataset."""
x = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
y = np.array([[0], [1], [1], [0]])
return x, y
class Model:
def __init__(
self,
optimizer: tf.keras.optimizers.Optimizer,
loss: tf.keras.losses.Loss,
metric: tf.keras.metrics.Metric
) -> None:
self.optimizer = optimizer
self.loss = loss
self.metric = metric
def _update_weights(self, x: np.ndarray, y: np.ndarray) -> None:
pass
def fit(self, x: np.ndarray, y: np.ndarray, epochs: int = 1) -> None:
for epoch in range(1, epochs + 1):
continue
def predict(self, x: np.ndarray) -> np.ndarray:
pass
def evaluate(self, x: np.ndarray, y: np.ndarray) -> List[float]:
pass
if __name__ == "__main__":
x, y = get_dataset()
num_features = 2
num_targets = 1
learning_rate = 0.5
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
loss = tf.keras.losses.MeanAbsoluteError()
metric = tf.keras.metrics.BinaryAccuracy()
model = Model(optimizer, loss, metric)
model.fit(x, y, epochs=10)
|
from collections import Counter
from game_manager import GameMaster
from unittest import TestCase
import os
os.chdir("../..")
# Move up to the parent directory so that we can access the correct ground files.
class TestGameManager(TestCase):
def test_tile_usage(self):
"""
The game master calls all other pieces, so we'll just run a game to its conclusion and do a few basic checks to
ensure that the correct tiles are being doled out, the game terminates and is scored properly.
"""
gm = GameMaster(computer_count=2)
gm.play_game()
# TODO: Finish tests of completed games.
|
"""
OCI provides a set of services for Oracle Cloud Infrastructure provider.
"""
from diagrams import Node
class _OCI(Node):
_provider = "oci"
_icon_dir = "resources/oci"
fontcolor = "#312D2A"
|
# ์ถ์ฒ : https://github.com/solaris33/deep-learning-tensorflow-book-code/blob/master/Ch06-AutoEncoder/mnist_classification_using_autoencoder_and_softmax_classifier_v2.py
# MNIST ์ซ์ ๋ถ๋ฅ๋ฅผ ์ํ Autoencoder+Softmax ๋ถ๋ฅ๊ธฐ ์์
import tensorflow as tf
# MNIST ๋ฐ์ดํฐ๋ฅผ ๋ค์ด๋ก๋ ํฉ๋๋ค.
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# ์ด๋ฏธ์ง๋ค์ float32 ๋ฐ์ดํฐ ํ์
์ผ๋ก ๋ณ๊ฒฝํฉ๋๋ค.
x_train, x_test = x_train.astype('float32'), x_test.astype('float32')
# 28*28 ํํ์ ์ด๋ฏธ์ง๋ฅผ 784์ฐจ์์ผ๋ก flattening ํฉ๋๋ค.
x_train, x_test = x_train.reshape([-1, 784]), x_test.reshape([-1, 784])
# [0, 255] ์ฌ์ด์ ๊ฐ์ [0, 1]์ฌ์ด์ ๊ฐ์ผ๋ก Normalizeํฉ๋๋ค.
x_train, x_test = x_train / 255., x_test / 255.
# ํ์ต์ ํ์ํ ์ค์ ๊ฐ๋ค์ ์ ์ํฉ๋๋ค.
learning_rate_RMSProp = 0.02
learning_rate_GradientDescent = 0.5
num_epochs = 100 # ๋ฐ๋ณตํ์
batch_size = 256
display_step = 1 # ๋ช Step๋ง๋ค log๋ฅผ ์ถ๋ ฅํ ์ง ๊ฒฐ์ ํฉ๋๋ค.
input_size = 784 # MNIST ๋ฐ์ดํฐ input (์ด๋ฏธ์ง ํฌ๊ธฐ: 28*28)
hidden1_size = 128 # ์ฒซ๋ฒ์งธ ํ๋ ๋ ์ด์ด์ ๋
ธ๋ ๊ฐ์
hidden2_size = 64 # ๋๋ฒ์งธ ํ๋ ๋ ์ด์ด์ ๋
ธ๋ ๊ฐ์
# tf.data API๋ฅผ ์ด์ฉํด์ ๋ฐ์ดํฐ๋ฅผ ์๊ณ batch ํํ๋ก ๊ฐ์ ธ์ต๋๋ค.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.shuffle(60000).batch(batch_size)
# Autoencoder ๋ชจ๋ธ์ ์ ์ํฉ๋๋ค.
class AutoEncoder(object):
# Autoencoder ๋ชจ๋ธ์ ์ํ tf.Variable๋ค์ ์ ์ํฉ๋๋ค.
def __init__(self):
# ์ธ์ฝ๋ฉ(Encoding) - 784 -> 128 -> 64
self.Wh_1 = tf.Variable(tf.random.normal([input_size, hidden1_size]))
self.bh_1 = tf.Variable(tf.random.normal([hidden1_size]))
self.Wh_2 = tf.Variable(tf.random.normal([hidden1_size, hidden2_size]))
self.bh_2 = tf.Variable(tf.random.normal([hidden2_size]))
# ๋์ฝ๋ฉ(Decoding) 64 -> 128 -> 784
self.Wh_3 = tf.Variable(tf.random.normal([hidden2_size, hidden1_size]))
self.bh_3 = tf.Variable(tf.random.normal([hidden1_size]))
self.Wo = tf.Variable(tf.random.normal([hidden1_size, input_size]))
self.bo = tf.Variable(tf.random.normal([input_size]))
def __call__(self, x):
H1_output = tf.nn.sigmoid(tf.matmul(x, self.Wh_1) + self.bh_1)
H2_output = tf.nn.sigmoid(tf.matmul(H1_output, self.Wh_2) + self.bh_2)
H3_output = tf.nn.sigmoid(tf.matmul(H2_output, self.Wh_3) + self.bh_3)
X_reconstructed = tf.nn.sigmoid(tf.matmul(H3_output, self.Wo) + self.bo)
return X_reconstructed, H2_output
# Softmax ๋ถ๋ฅ๊ธฐ๋ฅผ ์ ์ํฉ๋๋ค.
class SoftmaxClassifier(object):
# Softmax ๋ชจ๋ธ์ ์ํ tf.Variable๋ค์ ์ ์ํฉ๋๋ค.
def __init__(self):
self.W_softmax = tf.Variable(tf.zeros([hidden2_size, 10])) # ์๋ณธ MNIST ์ด๋ฏธ์ง(784) ๋์ ์คํ ์ธ์ฝ๋์ ์์ถ๋ ํน์ง(64)์ ์
๋ ฅ๊ฐ์ผ๋ก ๋ฐ์ต๋๋ค.
self.b_softmax = tf.Variable(tf.zeros([10]))
def __call__(self, x):
y_pred = tf.nn.softmax(tf.matmul(x, self.W_softmax) + self.b_softmax)
return y_pred
# MSE(Mean of Squared Error) ์์คํจ์
@tf.function
def pretraining_mse_loss(y_pred, y_true):
return tf.reduce_mean(tf.pow(y_true - y_pred, 2))
# cross-entropy loss ํจ์
@tf.function
def finetuning_cross_entropy_loss(y_pred_softmax, y):
return tf.reduce_mean(-tf.reduce_sum(y * tf.math.log(y_pred_softmax), axis=[1]))
# 1. Pre-Training : MNIST ๋ฐ์ดํฐ ์ฌ๊ตฌ์ถ์ ๋ชฉ์ ์ผ๋กํ๋ ์ตํฐ๋ง์ด์ ์ ์ต์ ํ๋ฅผ ์ํ function ์ ์ํฉ๋๋ค.
pretraining_optimizer = tf.optimizers.RMSprop(learning_rate_RMSProp, epsilon=1e-10)
@tf.function
def pretraining_train_step(autoencoder_model, x):
# ํ๊ฒ๋ฐ์ดํฐ๋ ์ธํ๋ฐ์ดํฐ์ ๊ฐ์ต๋๋ค.
y_true = x
with tf.GradientTape() as tape:
y_pred, _ = autoencoder_model(x)
pretraining_loss = pretraining_mse_loss(y_pred, y_true)
gradients = tape.gradient(pretraining_loss, vars(autoencoder_model).values())
pretraining_optimizer.apply_gradients(zip(gradients, vars(autoencoder_model).values()))
# 2. Fine-Tuning : MNIST ๋ฐ์ดํฐ ๋ถ๋ฅ๋ฅผ ๋ชฉ์ ์ผ๋กํ๋ ์ตํฐ๋ง์ด์ ์ ์ต์ ํ๋ฅผ ์ํ function ์ ์ํฉ๋๋ค.
finetuning_optimizer = tf.optimizers.SGD(learning_rate_GradientDescent)
@tf.function
def finetuning_train_step(autoencoder_model, softmax_classifier_model, x, y):
with tf.GradientTape() as tape:
y_pred, extracted_features = autoencoder_model(x)
y_pred_softmax = softmax_classifier_model(extracted_features)
finetuning_loss = finetuning_cross_entropy_loss(y_pred_softmax, y)
autoencoder_encoding_variables = [autoencoder_model.Wh_1, autoencoder_model.bh_1, autoencoder_model.Wh_2, autoencoder_model.bh_2]
gradients = tape.gradient(finetuning_loss, autoencoder_encoding_variables + list(vars(softmax_classifier_model).values()))
finetuning_optimizer.apply_gradients(zip(gradients, autoencoder_encoding_variables + list(vars(softmax_classifier_model).values())))
# ๋ชจ๋ธ์ ์ ํ๋๋ฅผ ์ถ๋ ฅํ๋ ํจ์๋ฅผ ์ ์ํฉ๋๋ค.
@tf.function
def compute_accuracy(y_pred_softmax, y):
correct_prediction = tf.equal(tf.argmax(y_pred_softmax,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
# Autoencoder ๋ชจ๋ธ์ ์ ์ธํฉ๋๋ค.
AutoEncoder_model = AutoEncoder()
# Softmax ๋ถ๋ฅ๊ธฐ ๋ชจ๋ธ์ ์ ์ธํฉ๋๋ค. (์
๋ ฅ์ผ๋ก Autoencoder์ ์์ถ๋ ํน์ง์ ๋ฃ์ต๋๋ค.)
SoftmaxClassifier_model = SoftmaxClassifier()
# Step 1: MNIST ๋ฐ์ดํฐ ์ฌ๊ตฌ์ถ์ ์ํ ์คํ ์ธ์ฝ๋ ์ต์ ํ(Pre-Training)๋ฅผ ์ํํฉ๋๋ค.
for epoch in range(num_epochs):
# ๋ชจ๋ ๋ฐฐ์น๋ค์ ๋ํด์ ์ต์ ํ๋ฅผ ์ํํฉ๋๋ค.
for batch_x, _ in train_data:
_, pretraining_loss_print = pretraining_train_step(AutoEncoder_model, batch_x), pretraining_mse_loss(AutoEncoder_model(batch_x)[0], batch_x)
# ์ง์ ๋ epoch๋ง๋ค ํ์ต๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
if epoch % display_step == 0:
print("๋ฐ๋ณต(Epoch): %d, Pre-Training ์์ค ํจ์(pretraining_loss): %f" % ((epoch + 1), pretraining_loss_print))
print("Step 1 : MNIST ๋ฐ์ดํฐ ์ฌ๊ตฌ์ถ์ ์ํ ์คํ ์ธ์ฝ๋ ์ต์ ํ ์๋ฃ(Pre-Training)")
# Step 2: MNIST ๋ฐ์ดํฐ ๋ถ๋ฅ๋ฅผ ์ํ ์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ ์ต์ ํ(Fine-tuning)๋ฅผ ์ํํฉ๋๋ค.
for epoch in range(num_epochs + 100):
# ๋ชจ๋ ๋ฐฐ์น๋ค์ ๋ํด์ ์ต์ ํ๋ฅผ ์ํํฉ๋๋ค.
for batch_x, batch_y in train_data:
batch_y = tf.one_hot(batch_y, depth=10)
_, finetuning_loss_print = finetuning_train_step(AutoEncoder_model, SoftmaxClassifier_model, batch_x, batch_y), finetuning_cross_entropy_loss(SoftmaxClassifier_model(AutoEncoder_model(batch_x)[1]), batch_y)
# ์ง์ ๋ epoch๋ง๋ค ํ์ต๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
if epoch % display_step == 0:
print("๋ฐ๋ณต(Epoch): %d, Fine-tuning ์์ค ํจ์(finetuning_loss): %f" % ((epoch + 1), finetuning_loss_print))
print("Step 2 : MNIST ๋ฐ์ดํฐ ๋ถ๋ฅ๋ฅผ ์ํ ์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ ์ต์ ํ ์๋ฃ(Fine-Tuning)")
# ์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ ๋ชจ๋ธ์ ์ ํ๋๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
print("์ ํ๋(์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ): %f" % compute_accuracy(SoftmaxClassifier_model(AutoEncoder_model(x_test)[1]), tf.one_hot(y_test, depth=10))) # ์ ํ๋ : ์ฝ 96%
'''
๊ฒฐ๊ณผ:
๋ฐ๋ณต(Epoch): 1, Pre-Training ์์ค ํจ์(pretraining_loss): 0.053507
๋ฐ๋ณต(Epoch): 2, Pre-Training ์์ค ํจ์(pretraining_loss): 0.048763
๋ฐ๋ณต(Epoch): 3, Pre-Training ์์ค ํจ์(pretraining_loss): 0.035184
๋ฐ๋ณต(Epoch): 4, Pre-Training ์์ค ํจ์(pretraining_loss): 0.036432
๋ฐ๋ณต(Epoch): 5, Pre-Training ์์ค ํจ์(pretraining_loss): 0.030125
๋ฐ๋ณต(Epoch): 6, Pre-Training ์์ค ํจ์(pretraining_loss): 0.034440
๋ฐ๋ณต(Epoch): 7, Pre-Training ์์ค ํจ์(pretraining_loss): 0.028495
๋ฐ๋ณต(Epoch): 8, Pre-Training ์์ค ํจ์(pretraining_loss): 0.028204
๋ฐ๋ณต(Epoch): 9, Pre-Training ์์ค ํจ์(pretraining_loss): 0.027567
๋ฐ๋ณต(Epoch): 10, Pre-Training ์์ค ํจ์(pretraining_loss): 0.027792
๋ฐ๋ณต(Epoch): 11, Pre-Training ์์ค ํจ์(pretraining_loss): 0.026380
๋ฐ๋ณต(Epoch): 12, Pre-Training ์์ค ํจ์(pretraining_loss): 0.029198
๋ฐ๋ณต(Epoch): 13, Pre-Training ์์ค ํจ์(pretraining_loss): 0.028753
๋ฐ๋ณต(Epoch): 14, Pre-Training ์์ค ํจ์(pretraining_loss): 0.026498
๋ฐ๋ณต(Epoch): 15, Pre-Training ์์ค ํจ์(pretraining_loss): 0.023731
๋ฐ๋ณต(Epoch): 16, Pre-Training ์์ค ํจ์(pretraining_loss): 0.022695
๋ฐ๋ณต(Epoch): 17, Pre-Training ์์ค ํจ์(pretraining_loss): 0.027665
๋ฐ๋ณต(Epoch): 18, Pre-Training ์์ค ํจ์(pretraining_loss): 0.027225
๋ฐ๋ณต(Epoch): 19, Pre-Training ์์ค ํจ์(pretraining_loss): 0.026682
๋ฐ๋ณต(Epoch): 20, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021913
๋ฐ๋ณต(Epoch): 21, Pre-Training ์์ค ํจ์(pretraining_loss): 0.024350
๋ฐ๋ณต(Epoch): 22, Pre-Training ์์ค ํจ์(pretraining_loss): 0.023734
๋ฐ๋ณต(Epoch): 23, Pre-Training ์์ค ํจ์(pretraining_loss): 0.022152
๋ฐ๋ณต(Epoch): 24, Pre-Training ์์ค ํจ์(pretraining_loss): 0.025833
๋ฐ๋ณต(Epoch): 25, Pre-Training ์์ค ํจ์(pretraining_loss): 0.020808
๋ฐ๋ณต(Epoch): 26, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021201
๋ฐ๋ณต(Epoch): 27, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021918
๋ฐ๋ณต(Epoch): 28, Pre-Training ์์ค ํจ์(pretraining_loss): 0.022194
๋ฐ๋ณต(Epoch): 29, Pre-Training ์์ค ํจ์(pretraining_loss): 0.020438
๋ฐ๋ณต(Epoch): 30, Pre-Training ์์ค ํจ์(pretraining_loss): 0.022512
๋ฐ๋ณต(Epoch): 31, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021349
๋ฐ๋ณต(Epoch): 32, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021471
๋ฐ๋ณต(Epoch): 33, Pre-Training ์์ค ํจ์(pretraining_loss): 0.017022
๋ฐ๋ณต(Epoch): 34, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021992
๋ฐ๋ณต(Epoch): 35, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021022
๋ฐ๋ณต(Epoch): 36, Pre-Training ์์ค ํจ์(pretraining_loss): 0.020439
๋ฐ๋ณต(Epoch): 37, Pre-Training ์์ค ํจ์(pretraining_loss): 0.021232
๋ฐ๋ณต(Epoch): 38, Pre-Training ์์ค ํจ์(pretraining_loss): 0.017217
๋ฐ๋ณต(Epoch): 39, Pre-Training ์์ค ํจ์(pretraining_loss): 0.019105
๋ฐ๋ณต(Epoch): 40, Pre-Training ์์ค ํจ์(pretraining_loss): 0.018256
๋ฐ๋ณต(Epoch): 41, Pre-Training ์์ค ํจ์(pretraining_loss): 0.017459
๋ฐ๋ณต(Epoch): 42, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016606
๋ฐ๋ณต(Epoch): 43, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016887
๋ฐ๋ณต(Epoch): 44, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016546
๋ฐ๋ณต(Epoch): 45, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016624
๋ฐ๋ณต(Epoch): 46, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015461
๋ฐ๋ณต(Epoch): 47, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016206
๋ฐ๋ณต(Epoch): 48, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014506
๋ฐ๋ณต(Epoch): 49, Pre-Training ์์ค ํจ์(pretraining_loss): 0.017969
๋ฐ๋ณต(Epoch): 50, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015498
๋ฐ๋ณต(Epoch): 51, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014712
๋ฐ๋ณต(Epoch): 52, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016367
๋ฐ๋ณต(Epoch): 53, Pre-Training ์์ค ํจ์(pretraining_loss): 0.017104
๋ฐ๋ณต(Epoch): 54, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015905
๋ฐ๋ณต(Epoch): 55, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016148
๋ฐ๋ณต(Epoch): 56, Pre-Training ์์ค ํจ์(pretraining_loss): 0.018872
๋ฐ๋ณต(Epoch): 57, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014031
๋ฐ๋ณต(Epoch): 58, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015222
๋ฐ๋ณต(Epoch): 59, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015184
๋ฐ๋ณต(Epoch): 60, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012931
๋ฐ๋ณต(Epoch): 61, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016481
๋ฐ๋ณต(Epoch): 62, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012996
๋ฐ๋ณต(Epoch): 63, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012893
๋ฐ๋ณต(Epoch): 64, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014411
๋ฐ๋ณต(Epoch): 65, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016494
๋ฐ๋ณต(Epoch): 66, Pre-Training ์์ค ํจ์(pretraining_loss): 0.016149
๋ฐ๋ณต(Epoch): 67, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014902
๋ฐ๋ณต(Epoch): 68, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014104
๋ฐ๋ณต(Epoch): 69, Pre-Training ์์ค ํจ์(pretraining_loss): 0.015446
๋ฐ๋ณต(Epoch): 70, Pre-Training ์์ค ํจ์(pretraining_loss): 0.013116
๋ฐ๋ณต(Epoch): 71, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014651
๋ฐ๋ณต(Epoch): 72, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012222
๋ฐ๋ณต(Epoch): 73, Pre-Training ์์ค ํจ์(pretraining_loss): 0.011534
๋ฐ๋ณต(Epoch): 74, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012763
๋ฐ๋ณต(Epoch): 75, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012654
๋ฐ๋ณต(Epoch): 76, Pre-Training ์์ค ํจ์(pretraining_loss): 0.011128
๋ฐ๋ณต(Epoch): 77, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012303
๋ฐ๋ณต(Epoch): 78, Pre-Training ์์ค ํจ์(pretraining_loss): 0.010430
๋ฐ๋ณต(Epoch): 79, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012946
๋ฐ๋ณต(Epoch): 80, Pre-Training ์์ค ํจ์(pretraining_loss): 0.013003
๋ฐ๋ณต(Epoch): 81, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012357
๋ฐ๋ณต(Epoch): 82, Pre-Training ์์ค ํจ์(pretraining_loss): 0.011040
๋ฐ๋ณต(Epoch): 83, Pre-Training ์์ค ํจ์(pretraining_loss): 0.011767
๋ฐ๋ณต(Epoch): 84, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012648
๋ฐ๋ณต(Epoch): 85, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012419
๋ฐ๋ณต(Epoch): 86, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012792
๋ฐ๋ณต(Epoch): 87, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012415
๋ฐ๋ณต(Epoch): 88, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012451
๋ฐ๋ณต(Epoch): 89, Pre-Training ์์ค ํจ์(pretraining_loss): 0.014441
๋ฐ๋ณต(Epoch): 90, Pre-Training ์์ค ํจ์(pretraining_loss): 0.013401
๋ฐ๋ณต(Epoch): 91, Pre-Training ์์ค ํจ์(pretraining_loss): 0.013060
๋ฐ๋ณต(Epoch): 92, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012423
๋ฐ๋ณต(Epoch): 93, Pre-Training ์์ค ํจ์(pretraining_loss): 0.011332
๋ฐ๋ณต(Epoch): 94, Pre-Training ์์ค ํจ์(pretraining_loss): 0.010706
๋ฐ๋ณต(Epoch): 95, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012547
๋ฐ๋ณต(Epoch): 96, Pre-Training ์์ค ํจ์(pretraining_loss): 0.009669
๋ฐ๋ณต(Epoch): 97, Pre-Training ์์ค ํจ์(pretraining_loss): 0.013293
๋ฐ๋ณต(Epoch): 98, Pre-Training ์์ค ํจ์(pretraining_loss): 0.010423
๋ฐ๋ณต(Epoch): 99, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012039
๋ฐ๋ณต(Epoch): 100, Pre-Training ์์ค ํจ์(pretraining_loss): 0.012134
Step 1 : MNIST ๋ฐ์ดํฐ ์ฌ๊ตฌ์ถ์ ์ํ ์คํ ์ธ์ฝ๋ ์ต์ ํ ์๋ฃ(Pre-Training)
๋ฐ๋ณต(Epoch): 1, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.325304
๋ฐ๋ณต(Epoch): 2, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.276397
๋ฐ๋ณต(Epoch): 3, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.382290
๋ฐ๋ณต(Epoch): 4, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.216175
๋ฐ๋ณต(Epoch): 5, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.318119
๋ฐ๋ณต(Epoch): 6, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.270805
๋ฐ๋ณต(Epoch): 7, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.240315
๋ฐ๋ณต(Epoch): 8, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.227046
๋ฐ๋ณต(Epoch): 9, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.367014
๋ฐ๋ณต(Epoch): 10, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.234238
๋ฐ๋ณต(Epoch): 11, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.214215
๋ฐ๋ณต(Epoch): 12, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.098432
๋ฐ๋ณต(Epoch): 13, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.129280
๋ฐ๋ณต(Epoch): 14, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.183790
๋ฐ๋ณต(Epoch): 15, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.121607
๋ฐ๋ณต(Epoch): 16, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.210740
๋ฐ๋ณต(Epoch): 17, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.193307
๋ฐ๋ณต(Epoch): 18, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.121441
๋ฐ๋ณต(Epoch): 19, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.094763
๋ฐ๋ณต(Epoch): 20, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.106741
๋ฐ๋ณต(Epoch): 21, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.146501
๋ฐ๋ณต(Epoch): 22, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.069261
๋ฐ๋ณต(Epoch): 23, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.093902
๋ฐ๋ณต(Epoch): 24, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.256915
๋ฐ๋ณต(Epoch): 25, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.189629
๋ฐ๋ณต(Epoch): 26, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.056574
๋ฐ๋ณต(Epoch): 27, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.071725
๋ฐ๋ณต(Epoch): 28, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.135041
๋ฐ๋ณต(Epoch): 29, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.147878
๋ฐ๋ณต(Epoch): 30, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.107991
๋ฐ๋ณต(Epoch): 31, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.123838
๋ฐ๋ณต(Epoch): 32, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.111915
๋ฐ๋ณต(Epoch): 33, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.040454
๋ฐ๋ณต(Epoch): 34, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.096574
๋ฐ๋ณต(Epoch): 35, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.029988
๋ฐ๋ณต(Epoch): 36, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.063732
๋ฐ๋ณต(Epoch): 37, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.108929
๋ฐ๋ณต(Epoch): 38, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.064120
๋ฐ๋ณต(Epoch): 39, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.099476
๋ฐ๋ณต(Epoch): 40, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.073152
๋ฐ๋ณต(Epoch): 41, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.060018
๋ฐ๋ณต(Epoch): 42, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.183250
๋ฐ๋ณต(Epoch): 43, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.058556
๋ฐ๋ณต(Epoch): 44, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.070945
๋ฐ๋ณต(Epoch): 45, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.040676
๋ฐ๋ณต(Epoch): 46, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.043457
๋ฐ๋ณต(Epoch): 47, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.078201
๋ฐ๋ณต(Epoch): 48, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.052978
๋ฐ๋ณต(Epoch): 49, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.043417
๋ฐ๋ณต(Epoch): 50, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.034806
๋ฐ๋ณต(Epoch): 51, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.030281
๋ฐ๋ณต(Epoch): 52, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.049490
๋ฐ๋ณต(Epoch): 53, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.043214
๋ฐ๋ณต(Epoch): 54, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.056933
๋ฐ๋ณต(Epoch): 55, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.045052
๋ฐ๋ณต(Epoch): 56, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.115746
๋ฐ๋ณต(Epoch): 57, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.032351
๋ฐ๋ณต(Epoch): 58, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.032108
๋ฐ๋ณต(Epoch): 59, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.053930
๋ฐ๋ณต(Epoch): 60, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.024719
๋ฐ๋ณต(Epoch): 61, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.024479
๋ฐ๋ณต(Epoch): 62, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.055692
๋ฐ๋ณต(Epoch): 63, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.056284
๋ฐ๋ณต(Epoch): 64, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.067199
๋ฐ๋ณต(Epoch): 65, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.038257
๋ฐ๋ณต(Epoch): 66, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.051612
๋ฐ๋ณต(Epoch): 67, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.063251
๋ฐ๋ณต(Epoch): 68, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.035636
๋ฐ๋ณต(Epoch): 69, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.039104
๋ฐ๋ณต(Epoch): 70, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.083766
๋ฐ๋ณต(Epoch): 71, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.057188
๋ฐ๋ณต(Epoch): 72, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.044378
๋ฐ๋ณต(Epoch): 73, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.063846
๋ฐ๋ณต(Epoch): 74, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023192
๋ฐ๋ณต(Epoch): 75, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.022278
๋ฐ๋ณต(Epoch): 76, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023742
๋ฐ๋ณต(Epoch): 77, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.030204
๋ฐ๋ณต(Epoch): 78, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.061614
๋ฐ๋ณต(Epoch): 79, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.027759
๋ฐ๋ณต(Epoch): 80, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.043625
๋ฐ๋ณต(Epoch): 81, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.037367
๋ฐ๋ณต(Epoch): 82, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.045685
๋ฐ๋ณต(Epoch): 83, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.058006
๋ฐ๋ณต(Epoch): 84, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.043170
๋ฐ๋ณต(Epoch): 85, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.037628
๋ฐ๋ณต(Epoch): 86, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.018788
๋ฐ๋ณต(Epoch): 87, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.028692
๋ฐ๋ณต(Epoch): 88, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.098316
๋ฐ๋ณต(Epoch): 89, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.053149
๋ฐ๋ณต(Epoch): 90, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.037437
๋ฐ๋ณต(Epoch): 91, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014776
๋ฐ๋ณต(Epoch): 92, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.040170
๋ฐ๋ณต(Epoch): 93, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.036712
๋ฐ๋ณต(Epoch): 94, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.027032
๋ฐ๋ณต(Epoch): 95, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.050907
๋ฐ๋ณต(Epoch): 96, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.031556
๋ฐ๋ณต(Epoch): 97, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019665
๋ฐ๋ณต(Epoch): 98, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.031778
๋ฐ๋ณต(Epoch): 99, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.042951
๋ฐ๋ณต(Epoch): 100, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.030048
๋ฐ๋ณต(Epoch): 101, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.009764
๋ฐ๋ณต(Epoch): 102, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.036724
๋ฐ๋ณต(Epoch): 103, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.021788
๋ฐ๋ณต(Epoch): 104, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.028175
๋ฐ๋ณต(Epoch): 105, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.057793
๋ฐ๋ณต(Epoch): 106, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.009467
๋ฐ๋ณต(Epoch): 107, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.010038
๋ฐ๋ณต(Epoch): 108, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.015010
๋ฐ๋ณต(Epoch): 109, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.049014
๋ฐ๋ณต(Epoch): 110, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019715
๋ฐ๋ณต(Epoch): 111, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.036950
๋ฐ๋ณต(Epoch): 112, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023561
๋ฐ๋ณต(Epoch): 113, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.050550
๋ฐ๋ณต(Epoch): 114, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.021130
๋ฐ๋ณต(Epoch): 115, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.047228
๋ฐ๋ณต(Epoch): 116, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023643
๋ฐ๋ณต(Epoch): 117, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.027961
๋ฐ๋ณต(Epoch): 118, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014484
๋ฐ๋ณต(Epoch): 119, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014494
๋ฐ๋ณต(Epoch): 120, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.016496
๋ฐ๋ณต(Epoch): 121, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.062865
๋ฐ๋ณต(Epoch): 122, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.054801
๋ฐ๋ณต(Epoch): 123, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.031489
๋ฐ๋ณต(Epoch): 124, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.010628
๋ฐ๋ณต(Epoch): 125, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023272
๋ฐ๋ณต(Epoch): 126, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.004501
๋ฐ๋ณต(Epoch): 127, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.022569
๋ฐ๋ณต(Epoch): 128, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.020074
๋ฐ๋ณต(Epoch): 129, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.022864
๋ฐ๋ณต(Epoch): 130, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.013387
๋ฐ๋ณต(Epoch): 131, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014080
๋ฐ๋ณต(Epoch): 132, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.029491
๋ฐ๋ณต(Epoch): 133, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.027683
๋ฐ๋ณต(Epoch): 134, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.009023
๋ฐ๋ณต(Epoch): 135, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.024363
๋ฐ๋ณต(Epoch): 136, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.021771
๋ฐ๋ณต(Epoch): 137, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.018908
๋ฐ๋ณต(Epoch): 138, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.010672
๋ฐ๋ณต(Epoch): 139, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.008865
๋ฐ๋ณต(Epoch): 140, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.016291
๋ฐ๋ณต(Epoch): 141, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.020377
๋ฐ๋ณต(Epoch): 142, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.021067
๋ฐ๋ณต(Epoch): 143, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.018236
๋ฐ๋ณต(Epoch): 144, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.024964
๋ฐ๋ณต(Epoch): 145, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014431
๋ฐ๋ณต(Epoch): 146, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023570
๋ฐ๋ณต(Epoch): 147, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012575
๋ฐ๋ณต(Epoch): 148, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011586
๋ฐ๋ณต(Epoch): 149, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.029435
๋ฐ๋ณต(Epoch): 150, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.010515
๋ฐ๋ณต(Epoch): 151, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019928
๋ฐ๋ณต(Epoch): 152, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023447
๋ฐ๋ณต(Epoch): 153, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.017635
๋ฐ๋ณต(Epoch): 154, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014962
๋ฐ๋ณต(Epoch): 155, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.027726
๋ฐ๋ณต(Epoch): 156, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019980
๋ฐ๋ณต(Epoch): 157, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.008557
๋ฐ๋ณต(Epoch): 158, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011651
๋ฐ๋ณต(Epoch): 159, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.007926
๋ฐ๋ณต(Epoch): 160, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.015225
๋ฐ๋ณต(Epoch): 161, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012383
๋ฐ๋ณต(Epoch): 162, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.013008
๋ฐ๋ณต(Epoch): 163, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.017309
๋ฐ๋ณต(Epoch): 164, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.034151
๋ฐ๋ณต(Epoch): 165, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.013219
๋ฐ๋ณต(Epoch): 166, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.021021
๋ฐ๋ณต(Epoch): 167, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.008926
๋ฐ๋ณต(Epoch): 168, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.035952
๋ฐ๋ณต(Epoch): 169, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.008305
๋ฐ๋ณต(Epoch): 170, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.010765
๋ฐ๋ณต(Epoch): 171, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.013321
๋ฐ๋ณต(Epoch): 172, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019369
๋ฐ๋ณต(Epoch): 173, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.033441
๋ฐ๋ณต(Epoch): 174, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012628
๋ฐ๋ณต(Epoch): 175, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.022365
๋ฐ๋ณต(Epoch): 176, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.006675
๋ฐ๋ณต(Epoch): 177, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011718
๋ฐ๋ณต(Epoch): 178, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012873
๋ฐ๋ณต(Epoch): 179, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012914
๋ฐ๋ณต(Epoch): 180, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.005970
๋ฐ๋ณต(Epoch): 181, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.017133
๋ฐ๋ณต(Epoch): 182, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.013680
๋ฐ๋ณต(Epoch): 183, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.020669
๋ฐ๋ณต(Epoch): 184, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.008793
๋ฐ๋ณต(Epoch): 185, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.023708
๋ฐ๋ณต(Epoch): 186, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.007231
๋ฐ๋ณต(Epoch): 187, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011926
๋ฐ๋ณต(Epoch): 188, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.007000
๋ฐ๋ณต(Epoch): 189, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.012216
๋ฐ๋ณต(Epoch): 190, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011874
๋ฐ๋ณต(Epoch): 191, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011675
๋ฐ๋ณต(Epoch): 192, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014625
๋ฐ๋ณต(Epoch): 193, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.014802
๋ฐ๋ณต(Epoch): 194, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.005100
๋ฐ๋ณต(Epoch): 195, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.011093
๋ฐ๋ณต(Epoch): 196, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.016482
๋ฐ๋ณต(Epoch): 197, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.006218
๋ฐ๋ณต(Epoch): 198, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.022026
๋ฐ๋ณต(Epoch): 199, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.019975
๋ฐ๋ณต(Epoch): 200, Fine-tuning ์์ค ํจ์(finetuning_loss): 0.009672
Step 2 : MNIST ๋ฐ์ดํฐ ๋ถ๋ฅ๋ฅผ ์ํ ์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ ์ต์ ํ ์๋ฃ(Fine-Tuning)
์ ํ๋(์คํ ์ธ์ฝ๋+Softmax ๋ถ๋ฅ๊ธฐ): 0.962000
''' |
# Basic commands for webrepl
from sensor.sensor import PIN_LIGHT
from sensor.main import LOOP
from besp import WDT
import machine
import os
SERVICE = 'gate-sensor'
# Stop watch dog when loading cmd module
print('Stopping watchdog process...')
WDT.deinit()
print('Commands: reboot, cd_sensor, cd_root, ls, stop, cat_error, rm_error, sunlight')
# Get current reading from light sensor
def sunlight():
print(PIN_LIGHT.read())
# Reboot ESP
def reboot():
machine.reset()
# Change current dir to sensor module
def cd_sensor():
os.chdir('sensor')
# Change current dir to root
def cd_root():
os.chdir('/')
# List current dir, avoids another import
def ls():
print(os.listdir())
# Stop application event loop
def stop():
if LOOP:
LOOP.stop()
# Print error from file
def cat_error():
try:
f = open('error.log', 'r')
print(f.read())
f.close()
except OSError:
print('No error file.')
# Remove error file
def rm_error():
try:
os.remove('error.log')
print('Error file removed.')
except OSError:
print('No error file.') |
from django.db import models
from django.contrib.auth.models import User
from apps.trivia.models import Trivia
import ast
# Create your models here.
class PracticeResult(models.Model):
trivia = models.ForeignKey(Trivia,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
start_time = models.DateTimeField(blank=True, null=True)
modified_at = models.DateTimeField(blank=True, null=True)
time_taken = models.IntegerField(blank=False, null=False, default=0)
answers = models.TextField(blank=False, null=False, default="{}")
positive_score = models.IntegerField(blank=False, null=False, default=0)
negative_score = models.IntegerField(blank=False, null=False, default=0)
stars = models.IntegerField(blank=False, null=False, default=0)
def __str__(self):
return self.user.username+'_'+self.trivia.code
def calculate_score(self, questions_set=None):
if self.submitted():
return None
if not questions_set:
questions_set = self.trivia.question_set.all()
answers = ast.literal_eval(self.answers)
for ques in questions_set:
if ques.id in answers.keys() and answers[ques.id]["opt_id"]!=0:
if ques.correct_answer == answers[ques.id]["opt_id"]:
self.positive_score+=ques.positive_score
else:
self.negative_score+=ques.negative_score
#self.total_score = self.positive_score - self.negative_score
def get_score(self):
return self.positive_score - self.negative_score
def get_timetaken_string(self):
tt = self.time_taken
hrs = tt//3600
tt=tt%3600
mins = tt//60
tt=tt%60
return "%d:%d:%d"%(hrs,mins,tt)
def submitted(self):
return self.time_taken
|
from join import Join as join
from joindocuments import joindocuments
import pandas as pd
from oddratio import OddRatio as ratio
from topwords import topwords
from ngrams import ngrams
from truncatedsvd import SVDf
from PCAC import pcaf
import arff
import numpy as np
#joinoddratios
def domain(document, crossvalidationundersampling,ArffL,A ):
df1=pd.read_csv(document+'.csv' )
joinc=joindocuments(df1,df1)
top = topwords(df1,'Clean tweet',150)
main_domain = join(df1,'Clean tweet')
bigrams=ngrams(df1,'Clean tweet')
print 'bigrams'
print bigrams.bigrams
main_domain.joinall(bigrams.bigrams,2)
main_domain.joinall(top.top,1)
print main_domain.df
main_domain.df.to_csv(str(crossvalidationundersampling) + '\prueba.csv',index=False)
ratiov=ratio(main_domain.df,'L')
ratios=ratiov.getoddratios(top.top)
print 'ratios'
print ratios
ds=list(ratios.keys())
print ds
oddradiojoin=join(df1,'Clean tweet')
oddradiojoin.joinall(ds,1)
cols=['tweet','url','Clean tweet']
try:
for x in cols:
del oddradiojoin.df[x]
except:
pass
#training, test=joinc.gettrainingandtestp(oddradiojoin.df)
print 'matrix of elements to reduce'
#########################################################
# SVD
X= oddradiojoin.df[ds]
array=SVDf(X)
#######################################
#PCA
#arraypca=pcaf(X)
#array=arraypca.join(array)
########################################
#oddradiojoin.df =oddradiojoin.df.join(array)
#oddradiojoin.df.to_csv('prueba.csv',index=False)
#print array
#print array.shape
undersampleddf1,undersampleddf2=joinc.undersampling(oddradiojoin.df)
print 'undersample '
print undersampleddf1.shape
training, test=joinc.gettrainingandtestp(undersampleddf1)
training, training2=joinc.gettrainingandtestp(training,A)
training=training.replace(['True','False'], [True,False])
test=test.replace(['True','False'], [True,False])
A=str(A)
oddradiojoin.df.to_csv(str(crossvalidationundersampling) + '\crossvalidation'+A+'.csv',index=False)
undersampleddf1.to_csv(str(crossvalidationundersampling) +'\undersampling'+A+'.csv',index=False)
headers_names=list(training.columns.values)
test = test[headers_names]
test.to_csv(str(crossvalidationundersampling) + r'\test'+A+'.csv',index=False)
training.to_csv(str(crossvalidationundersampling) +r'\training'+A+'.csv',index=False)
TRAINING=training.as_matrix(columns=None)
print 'training'
print training.shape
arff.dump(ArffL +r'\training'+A+'.arff',TRAINING, relation="whatever", names=headers_names)
TEST=test.as_matrix(columns=None)
arff.dump(ArffL +r'\test'+A+'.arff',TEST, relation="whatever", names=headers_names)
#domain('documents\csv\divorce\divorce',r'documents\csv\divorce',r'documents\Arff\divorce',1)
#domain('documents\csv\pregnancy\GOOD LABELING 170620151',r'documents\csv\pregnancy',r'documents\Arff\pregnancy',1)#1 indicates the value of A
domain('documents\csv\drunk\drunk labeling 1300',r'documents\csv\drunk',r'documents\Arff\drunk',1)
|
from machine import Pin,PWM,ADC,DAC
from time import sleep
from _thread import start_new_thread as thread
import json,network,urequests
ssid='exceed16_8'
pwd='12345678'
station=network.WLAN(network.STA_IF)
station.active(True)
statBuzzer = 'off'
statDoor = 'close'
infra = Pin(26, Pin.OUT)
ldr = ADC(Pin(32))
R = Pin(21, Pin.OUT)
G = Pin(19, Pin.OUT)
B = Pin(18, Pin.OUT)
SW = Pin(27, Pin.IN)
switch1=Pin(25,Pin.IN)
servo = PWM(Pin(22),freq=50,duty=77)
count=0
def door():
global servo
global statDoor
statDoor='close'
servo.duty(120)
sleep(0.5)
global count
count=0
while(1):
if switch1.value()==0:
count+=1
print(switch1.value())
#print(count)
if count%2 == 0 :#or statDoor=='close':
servo.duty(120)
#print(switch1.value())
sleep(1)
statDoor='close'
elif count%2 == 1 :#or statDoor=='open':
servo.duty(65)
sleep(1)
statDoor='open'
sleep(0.5)
def laser():
global statBuzzer
#statBuzzer = 'off'
while(1):
infra.value(1)
sleep(2)
if ldr.read() > 4000 and infra.value() == 1:
while(1):
buzzer = PWM(Pin(2))
buzzer.freq(20)
#statBuzzer = 'on'
R.value(1)
G.value(0)
B.value(0)
sleep(0.5)
R.value(0)
G.value(0)
B.value(1)
sleep(0.5)
#print(SW.value())
if SW.value()==0 or count%2==1:
buzzer.deinit()
#statBuzzer = 'off'
R.value(0)
G.value(0)
B.value(0)
break
sleep(0.2)
if SW.value()==0 or count%2==1:
infra.value(0)
break
def onofflight():
SWon = False
while(1):
buzzer = PWM(Pin(2))
if SW.value() == 0 and SWon == False:
SWon = True
while(1):
buzzer.freq(20)
R.value(1)
G.value(0)
B.value(0)
sleep(0.5)
R.value(0)
G.value(0)
B.value(1)
sleep(0.5)
if SW.value() == 0:
break
buzzer.deinit()
R.value(0)
G.value(0)
B.value(0)
SWon = False
def onofflaser():
while(1):
if count%2 == 0 :
laser()
sleep(0.5)
def mynetwork():
while(1):
url = 'https://exceed.superposition.pknn.dev/data/eight'
data = {"door":statDoor,"buzzer":statBuzzer}
headers = {'content-type':'application/json'}
while not station.isconnected():
station.connect(ssid,pwd)
print('Connecting ...')
sleep(1)
if station.isconnected():
print('Connected')
js = json.dumps({'data':data})
r = urequests.post(url,data=js,headers=headers)
results = r.json()
print(results)
r = urequests.get(url).json()
print(r)
sleep(1)
thread(door,())
thread(onofflaser,())
thread(mynetwork,())
#thread(onofflight,())
|
from django.apps import AppConfig
class ArticlesConfig(AppConfig):
name = 'zhihu.articles'
verbose_name = 'ๆ็ซ '
def ready(self):
try:
import zhihu.articles.signals
except ImportError:
pass
|
class Solution:
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if nums == []:
return 0
for j in range(1, len(nums) + 1):
if sum(nums[:j]) >= s:
break
# not found such j
if sum(nums[:j]) < s:
return 0
now_min = j
i = 0
now_sum = sum(nums[i:j])
while j <= len(nums):
if now_sum >= s:
i += 1
now_sum -= nums[i-1]
if now_sum >= s and j - i < now_min:
now_min = j - i
else:
j += 1
if j > len(nums):
break
now_sum += nums[j-1]
return now_min
# except this solution, you can sort the nums and from the biggest element side to find the min length
|
# coding:utf-8
import json
from cassandra.query import dict_factory
import crawler
from datacluster import DataCluster
__author__ = 'Baxter'
DEFAULT_KEYSPACE = 'bucket'
DEFAULT_TABLE = 'cache'
class Query:
body = {
'q': '*:*',
'fq': [],
'start': 0,
}
def __init__(self, handler, username=None, password=None):
self.handler = handler
self.username = username
self.password = password
def parse_body(self, kwargs):
self.body['q'] = kwargs.get('q', '*:*')
self.body['fq'] = kwargs.get('fq', [])
self.body['start'] = kwargs.get('start', 0)
if kwargs.get('facet'):
self.body['facet'] = kwargs.get('facet')
class CQLQuery(Query):
def __init__(self):
Query.__init__(self, 'cql')
def parse_body(self, kwargs):
Query.parse_body(self, kwargs)
return self.body
class HttpQuery(Query):
def __init__(self, username=None, password=None):
Query.__init__(self, 'http', username, password)
def parse_body(self, kwargs):
Query.parse_body(self, kwargs)
self.body['rows'] = kwargs.get('rows', 10)
return self.body
def solr_query(query, **config):
class CQLQueryHandler:
def __init__(self, body, limit):
self.keyspace = config.get('keyspace', DEFAULT_KEYSPACE)
self.table = config.get('table', DEFAULT_TABLE)
t = config.get('field_list', ('*',))
self.fl = t if isinstance(t, str) else ",".join(t)
self.body = body
self.limit = limit
def get_cql(self):
return "SELECT %(fl)s FROM %(keyspace)s.%(table)s WHERE solr_query='%(query_body)s' %(limit)s;" % {
'keyspace': self.keyspace,
'table': self.table,
'fl': self.fl,
'query_body': json.dumps(self.body),
'limit': '' if 'count(*)' in self.fl else 'LIMIT %d' % self.limit
}
def execute(self, row_factory):
session = DataCluster().get_session(self.keyspace)
session.row_factory = row_factory
return session.execute(self.get_cql())
class HttpQueryHandler:
def __init__(self, body, node=DataCluster.nodes[0], port=8983):
self.body = body
self.base_url = "http://%(node)s:%(port)s/solr/%(keyspace)s.%(table)s" % {
'node': node,
'port': str(port),
'keyspace': config.get('keyspace', DEFAULT_KEYSPACE),
'table': config.get('table', DEFAULT_TABLE)
}
def execute(self, username=None, password=None):
url = self.base_url + '/select?' + self.parse_body()
print url
return json.loads(crawler.open_url(url, username=username, password=password))
def parse_facet(self):
l = [('indent', 'true')]
for k, v in self.body.get('facet'):
if isinstance(v, str):
l.append((k, v))
elif isinstance(v, list):
l += [(k, tmp) for tmp in v]
return l
def parse_body(self):
l = [('wt', 'json')]
for k, v in self.body.items():
if k == 'facet':
l.append(('facet', 'true'))
l += self.parse_facet()
continue
if isinstance(v, str):
l.append((k, v))
elif isinstance(v, list):
l += [(k, tmp) for tmp in v]
elif isinstance(v, int):
l.append((k, str(v)))
return '&'.join(['='.join(item) for item in l]).replace(':', '%3A').replace(' ', '%2A')
def __call(**kwargs):
if isinstance(query, HttpQuery):
return HttpQueryHandler(query.parse_body(kwargs)).execute(username=query.username, password=query.password)
else:
return CQLQueryHandler(query.parse_body(kwargs),
limit=kwargs.get('rows', 300)).execute(kwargs.get('row_factory', dict_factory))
return __call
if __name__ == '__main__':
query = solr_query(query=CQLQuery(), keyspace='hermes', table='orders')
result_set = query(q='status:FAILED')
for item in result_set:
print type(item), item
query = solr_query(query=HttpQuery(), keyspace='hermes', table='orders')
result_set = query(q='status:SUCCESS', rows=20)
print result_set
# for item in result_set:
# print type(item), item
# print query_body(q='*:*') |
import sedate
from datetime import date, datetime
from freezegun import freeze_time
from morepath import Identity
from onegov.core.security import Public
from onegov.user import User
from onegov.user import UserCollection
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
from onegov.wtfs.collections import MunicipalityCollection
from onegov.wtfs.collections import PaymentTypeCollection
from onegov.wtfs.collections import ScanJobCollection
from onegov.wtfs.models import DailyList
from onegov.wtfs.models import DailyListBoxes
from onegov.wtfs.models import DailyListBoxesAndForms
from onegov.wtfs.models import Invoice
from onegov.wtfs.models import Municipality
from onegov.wtfs.models import Notification
from onegov.wtfs.models import PaymentType
from onegov.wtfs.models import PickupDate
from onegov.wtfs.models import Report
from onegov.wtfs.models import ReportBoxes
from onegov.wtfs.models import ReportBoxesAndForms
from onegov.wtfs.models import ReportFormsByMunicipality
from onegov.wtfs.models import ScanJob
from onegov.wtfs.models import UserManual
from onegov.wtfs.security import AddModel
from onegov.wtfs.security import AddModelUnrestricted
from onegov.wtfs.security import DeleteModel
from onegov.wtfs.security import EditModel
from onegov.wtfs.security import EditModelUnrestricted
from onegov.wtfs.security import ViewModel
from onegov.wtfs.security import ViewModelUnrestricted
from uuid import uuid4
def permits_by_app(app, user, model, permission):
return app._permits(
Identity(
userid=user.username,
groupid=user.group_id.hex if user.group_id else '',
role=user.role,
application_id=app.application_id
),
model,
permission
)
def test_permissions(wtfs_app, wtfs_password):
session = wtfs_app.session()
def permits(user, model, permission):
return permits_by_app(wtfs_app, user, model, permission)
# Remove existing users and group
session.query(User).filter_by(realname='Editor').one().group_id = None
session.query(User).filter_by(realname='Member').one().group_id = None
session.query(Municipality).delete()
# Add new
session.add(Municipality(
name='Municipality',
bfs_number=0,
))
municipality_a_id = uuid4()
session.add(Municipality(
id=municipality_a_id,
name='Municipality A',
bfs_number=10,
))
session.add(PickupDate(
date=date.today(),
municipality_id=municipality_a_id,
))
session.add(ScanJob(
type='normal',
municipality_id=municipality_a_id,
delivery_number=1,
dispatch_date=date(2019, 1, 1))
)
session.add(User(
realname='Admin A',
username='admin-a@example.org',
password_hash=wtfs_password,
role='admin',
group_id=municipality_a_id
))
session.add(User(
realname='Editor A',
username='editor-a@example.org',
password_hash=wtfs_password,
role='editor',
group_id=municipality_a_id
))
session.add(User(
realname='Member A',
username='member-a@example.org',
password_hash=wtfs_password,
role='member',
group_id=municipality_a_id
))
municipality_b_id = uuid4()
session.add(Municipality(
id=municipality_b_id,
name='Municipality B',
bfs_number=20,
))
session.add(User(
realname='Admin B',
username='admin-b@example.org',
password_hash=wtfs_password,
role='admin',
group_id=municipality_b_id
))
session.add(User(
realname='Editor B',
username='editor-b@example.org',
password_hash=wtfs_password,
role='editor',
group_id=municipality_b_id
))
session.add(User(
realname='Member B',
username='member-b@example.org',
password_hash=wtfs_password,
role='member',
group_id=municipality_b_id
))
query = session.query
admin = query(User).filter_by(realname='Admin').one()
admin_a = query(User).filter_by(realname='Admin A').one()
admin_b = query(User).filter_by(realname='Admin B').one()
editor = query(User).filter_by(realname='Editor').one()
editor_a = query(User).filter_by(realname='Editor A').one()
editor_b = query(User).filter_by(realname='Editor B').one()
member = query(User).filter_by(realname='Member').one()
member_a = query(User).filter_by(realname='Member A').one()
member_b = query(User).filter_by(realname='Member B').one()
group = query(UserGroup).filter_by(name='Municipality').one()
group_a = query(UserGroup).filter_by(name='Municipality A').one()
group_b = query(UserGroup).filter_by(name='Municipality B').one()
municipality = query(Municipality).filter_by(name='Municipality').one()
municipality_a = query(Municipality).filter_by(name='Municipality A').one()
municipality_b = query(Municipality).filter_by(name='Municipality B').one()
scan_job = query(ScanJob).one()
# General
model = object()
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# UserGroupCollection / MunicipalityCollection
# MunicipalityCollection
for model in (
MunicipalityCollection(session),
UserGroupCollection(session)
):
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# UserGroup / Municipality
for user in (admin, admin_a, admin_b):
for model in (group_a, group_b, municipality_a, municipality_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for model in (group, municipality):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
for model in (
group_a, group_b, group,
municipality_a, municipality_b, municipality
):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# UserCollection
model = UserCollection(session)
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# User
for user in (admin, admin_a, admin_b):
for model in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
if user == model:
assert not permits(user, model, DeleteModel)
else:
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for model in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, ):
for model in (admin, admin_a, admin_b, editor_a, editor_b,
member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (editor, ):
for model in (editor, ):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (editor_a, ):
for model in (admin, admin_a, admin_b, editor, editor_b,
member, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for model in (editor_a, ):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for model in (member_a,):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (editor_b, ):
for model in (admin, admin_a, admin_b, editor, editor_a,
member, member_a):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for model in (editor_b, ):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for model in (member_b,):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (member, member_a, member_b):
for model in (admin, admin_a, admin_b, editor, editor_a, editor_b,
member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# ScanJobCollection
model = ScanJobCollection(session)
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor_a, editor_b, member_a, member_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (editor, member):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# ScanJob
model = scan_job
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_b, member, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (editor_a, member_a):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# DailyList
for model in (DailyList(), DailyListBoxes(session)):
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for user in (member,):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
for model in (DailyListBoxesAndForms(session), ):
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# Report
for model in (
Report(session),
ReportBoxes(session),
ReportBoxesAndForms(session),
ReportFormsByMunicipality(session)
):
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# Notification
model = Notification()
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# Invoice
model = Invoice(session)
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# PaymentType
model = PaymentType()
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# PaymentTypeCollection
model = PaymentTypeCollection(session)
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert not permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
# UserManual
model = UserManual(wtfs_app)
for user in (admin, admin_a, admin_b):
assert permits(user, model, Public)
assert permits(user, model, AddModel)
assert permits(user, model, AddModelUnrestricted)
assert permits(user, model, EditModel)
assert permits(user, model, EditModelUnrestricted)
assert permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert permits(user, model, ViewModelUnrestricted)
for user in (editor, editor_a, editor_b, member, member_a, member_b):
assert permits(user, model, Public)
assert not permits(user, model, AddModel)
assert not permits(user, model, AddModelUnrestricted)
assert not permits(user, model, EditModel)
assert not permits(user, model, EditModelUnrestricted)
assert not permits(user, model, DeleteModel)
assert permits(user, model, ViewModel)
assert not permits(user, model, ViewModelUnrestricted)
def test_editor_delete_day_before(wtfs_app, wtfs_password):
session = wtfs_app.session()
def permits(user, model, permission):
return permits_by_app(wtfs_app, user, model, permission)
# Remove existing users and group
session.query(User).delete()
session.query(Municipality).delete()
# Add two towns
foo = uuid4()
session.add(Municipality(
id=foo,
name='Foo',
bfs_number=1,
))
session.add(PickupDate(
date=date.today(),
municipality_id=foo,
))
bar = uuid4()
session.add(Municipality(
id=bar,
name='Bar',
bfs_number=1,
))
session.add(PickupDate(
date=date.today(),
municipality_id=bar,
))
# add a single scan job to foo
session.add(ScanJob(
type='normal',
municipality_id=foo,
delivery_number=1,
dispatch_date=date(2019, 1, 1))
)
# an admin with access to all of it
session.add(User(
username='admin@example.org',
password_hash=wtfs_password,
role='admin'
))
# an editor with access to foo
session.add(User(
username='foo-editor@example.org',
password_hash=wtfs_password,
role='editor',
group_id=foo
))
# a member with access to foo
session.add(User(
username='foo-member@example.org',
password_hash=wtfs_password,
role='member',
group_id=foo
))
# an editor with access to bar
session.add(User(
username='bar-editor@example.org',
password_hash=wtfs_password,
role='editor',
group_id=bar
))
# a member with access to bar
session.add(User(
username='bar-member@example.org',
password_hash=wtfs_password,
role='member',
group_id=bar
))
session.flush()
def fetch_user(username):
return session.query(User).filter_by(username=username).one()
job = session.query(ScanJob).one()
admin = fetch_user('admin@example.org')
foo_editor = fetch_user('foo-editor@example.org')
foo_member = fetch_user('foo-member@example.org')
bar_editor = fetch_user('bar-editor@example.org')
bar_member = fetch_user('bar-member@example.org')
dt = sedate.replace_timezone(datetime(2018, 12, 31), 'Europe/Zurich')
with freeze_time(dt.replace(hour=17, minute=0)):
assert permits(admin, job, DeleteModel)
assert permits(foo_editor, job, DeleteModel)
assert not permits(foo_member, job, DeleteModel)
assert not permits(bar_editor, job, DeleteModel)
assert not permits(bar_member, job, DeleteModel)
with freeze_time(dt.replace(hour=17, minute=1)):
assert permits(admin, job, DeleteModel)
assert not permits(foo_editor, job, DeleteModel)
assert not permits(foo_member, job, DeleteModel)
assert not permits(bar_editor, job, DeleteModel)
assert not permits(bar_member, job, DeleteModel)
|
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.building.build_main import (
Analysis,
PYZ,
EXE,
COLLECT,
)
import os
import platform
import re
from typing import (
List,
Iterable,
Tuple
)
global SPEC
def project_path() -> str:
return os.path.realpath(f"{SPEC}/../../")
def enumerate_modules(path: str) -> Iterable[str]:
source_re = re.compile(r"\.(py|pyx)$")
actual_path: str = os.path.realpath(path)
prefix_length: int = len(actual_path.split(os.sep)) - 1
for dirpath, dirnames, filenames in os.walk(actual_path):
pkg_components: List[str] = dirpath.split(os.sep)[prefix_length:]
for filename in filenames:
if filename == "__init__.py":
yield ".".join(pkg_components)
elif source_re.search(filename):
module_name: str = source_re.sub("", filename)
yield ".".join(pkg_components) + f".{module_name}"
def enumerate_data_files(path: str, pattern: str) -> Iterable[Tuple[str, str]]:
actual_path: str = os.path.realpath(path)
prefix_length: int = len(actual_path.split(os.sep)) - 1
pattern_re = re.compile(pattern)
for dirpath, dirnames, filenames in os.walk(actual_path):
dst_path_components: List[str] = dirpath.split(os.sep)[prefix_length:]
dst_dir: str = "/".join(dst_path_components)
for filename in filenames:
src_path: str = os.path.join(dirpath, filename)
if pattern_re.search(src_path) is not None:
yield src_path, dst_dir
if "SPEC" in globals():
system_type: str = platform.system()
block_cipher = None
hidden_imports: List[str] = list(enumerate_modules(os.path.join(project_path(), "hummingbot")))
hidden_imports.extend([
"aiokafka",
"pkg_resources.py2_warn",
])
import _strptime
datas: List[Tuple[str, str]] = list(enumerate_data_files(
os.path.join(project_path(), "hummingbot"),
r"(.+\.json|(?:\/|\\)VERSION|templates(?:\/|\\).+\.yml)$"
))
datas.extend([(_strptime.__file__, ".")])
datas.extend([(os.path.join(project_path(), "bin/path_util.py"), ".")])
binaries: List[Tuple[str, str]] = []
if system_type == "Windows":
import coincurve
binaries.extend([(os.path.realpath(os.path.join(coincurve.__file__, "../libsecp256k1.dll")), "coincurve")])
datas.extend([(os.path.realpath(os.path.join(project_path(), "redist/VC_redist.x64.exe")), "redist")])
a = Analysis([os.path.join(project_path(), "bin/bot")],
pathex=[project_path()],
binaries=binaries,
datas=datas,
hiddenimports=hidden_imports,
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='bot',
icon="hummingbot.ico",
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True)
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='bot')
|
import random, player, game_parser
class Game:
def __init__(self):
self.players = {}
self.default_parser = game_parser.Parser()
# player methods
def create_player(self, key):
return player.Player(key)
def add_player(self):
player_key = self.generate_player_key()
while player_key in self.players:
player_key = self.generate_player_key()
new_player = self.create_player(player_key)
self.players[player_key] = new_player
def generate_player_key(self):
return random.randint(0, 1000)
def remove_player(self, leaving_player):
return self.players.pop(leaving_player.key, False)
# game methods
def parse_string(self, player_input):
tokens = str(player_input).lower().split(" ").append("EOI")
self.default_parser.parse_input(tokens)
|
from .gaussian import gaussian_filter
from .mean_filter import mean_filter
from .sharpening_filter import sharpening_filter |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy
from twisted.trial import unittest
from twisted.internet import defer
from buildbot.db import pool
class DBThreadPool(unittest.TestCase):
def setUp(self):
self.engine = sqlalchemy.create_engine('sqlite://')
self.engine.optimal_thread_pool_size = 1
self.pool = pool.DBThreadPool(self.engine)
def test_do(self):
def add(conn, addend1, addend2):
rp = conn.execute("SELECT %d + %d" % (addend1, addend2))
return rp.scalar()
d = self.pool.do(add, 10, 11)
def check(res):
self.assertEqual(res, 21)
d.addCallback(check)
return d
def test_do_error(self):
def fail(conn):
rp = conn.execute("EAT COOKIES")
return rp.scalar()
d = self.pool.do(fail)
def eb(f):
pass
def cb(r):
self.fail("no exception propagated")
d.addCallbacks(cb, eb)
return d
def test_do_exception(self):
def raise_something(conn):
raise RuntimeError("oh noes")
d = self.pool.do(raise_something)
def eb(f):
f.trap(RuntimeError) # make sure it gets the *right* exception
pass
def cb(r):
self.fail("no exception propagated")
d.addCallbacks(cb, eb)
return d
def test_do_with_engine(self):
def add(engine, addend1, addend2):
rp = engine.execute("SELECT %d + %d" % (addend1, addend2))
return rp.scalar()
d = self.pool.do_with_engine(add, 10, 11)
def check(res):
self.assertEqual(res, 21)
d.addCallback(check)
return d
def test_do_with_engine_exception(self):
def fail(engine):
rp = engine.execute("EAT COOKIES")
return rp.scalar()
d = self.pool.do_with_engine(fail)
def eb(f):
pass
def cb(r):
self.fail("no exception propagated")
d.addCallbacks(cb, eb)
return d
def test_persistence_across_invocations(self):
# NOTE: this assumes that both methods are called with the same
# connection; if they run in parallel threads then it is not valid to
# assume that the database engine will have finalized the first
# transaction (and thus created the table) by the time the second
# transaction runs. This is why we set optimal_thread_pool_size in
# setUp.
d = defer.succeed(None)
def create_table(engine):
engine.execute("CREATE TABLE tmp ( a integer )")
d.addCallback( lambda r : self.pool.do_with_engine(create_table))
def insert_into_table(engine):
engine.execute("INSERT INTO tmp values ( 1 )")
d.addCallback( lambda r : self.pool.do_with_engine(insert_into_table))
return d
|
import re
def check(passCheck):
match = re.match(r"([!@#$%^&*()\-+[\]'\";?/<>,\.=\\\|`~]?[a-zA-Z0-9][!@#$%^&*()\-+[\]'\";?/<>,\.=\\\|`~]?){5,10}", passCheck, )
if(match):
print("true")
else:
print("false")
passW = input("Insert Password: ")
check(passW) |
import requests
from bs4 import BeautifulSoup
#id, pw input์ผ๋ก ๋ฐ๊ธฐ
yourid=input("์์ด๋๋ฅผ ์
๋ ฅํ์ธ์ : ")
yourpwd=input("๋น๋ฐ๋ฒํธ๋ฅผ ์
๋ ฅํ์ธ์ : ")
#๋ฆฌํ์คํธ์์ ์ธ์
๊ฐ์ฒด๋ฅผ ์์ฑ
session=requests.Session()
#ํด๋น url์ GET ๋ฐฉ์์ผ๋ก ์ฌ์ดํธ ์คํ(์ธ์
: ๋ธ๋ผ์ฐ์ ธ์์ ์นํ์ด์ง๋ฅผ ์ฌ๋ ๊ฒ)
r=session.get("http://class.likelion.net/users/sign_in")
#๋์ผ session์์ BeautifulSoup ์ค์
html=BeautifulSoup(r.text,"html.parser")
#๋ก๊ทธ์ธ์ ํ์ํ hidden input์ธ token๊ณผ params ๋ฐ์ดํฐ ๊ฐ์ ธ์ค๊ธฐ
token=html.input.next_sibling["value"]
params={'user[email]':yourid ,'user[password]':yourpwd,'authenticity_token':token}
#๋ก๊ทธ์ธ url์ POST ๋ฐฉ์์ผ๋ก ์ฌ์ดํธ ์คํ
r=session.post("http://class.likelion.net/users/sign_in",params)
try:
for n in range(1,1229):
r=session.get("http://class.likelion.net/home/mypage/{0}".format(n))
html=BeautifulSoup(r.text,"html.parser")
for luniv in html.find("div", {"class":"user-profile"}).span.stripped_strings:
print(luniv)
for lname in html.find("div", {"class":"user-profile"}).a.stripped_strings:
print(str(n)+" "+lname)
for lpercent in html.find("p", {"class":"percent"}).stripped_strings:
print(lpercent)
print("---------------")
except AttributeError:
print(str(n)+": ์กด์ฌํ์ง ์๋ ๋ฒํธ์
๋๋ค")
try:
for n in range(1229,1417):
r=session.get("http://class.likelion.net/home/mypage/{0}".format(n))
html=BeautifulSoup(r.text,"html.parser")
for luniv in html.find("div", {"class":"user-profile"}).span.stripped_strings:
print(luniv)
for lname in html.find("div", {"class":"user-profile"}).a.stripped_strings:
print(str(n)+" "+lname)
for lpercent in html.find("p", {"class":"percent"}).stripped_strings:
print(lpercent)
print("---------------")
except AttributeError:
print(str(n)+": ์กด์ฌํ์ง ์๋ ๋ฒํธ์
๋๋ค")
pass
#try, except๋ก ์๋ฌ ์์ธ์ฒ๋ฆฌํ๊ธฐ
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from hummingbot.client.hummingbot_application import HummingbotApplication
class PaperTradeCommand:
def paper_trade(self, # type: HummingbotApplication
):
self.config("paper_trade_enabled")
|
import numpy as np
from sklearn.utils import class_weight
from .building_step.build_step import build_model
from .testing_step.testing_step import test_model
from .fitting_step.fitting_step import fit_model
def train_model(training_data, optimizer="adam", learning_rate=0.001, loss="binary_crossentropy", num_epo=20, model_type="pooling", class_weights=None, early_stopping=False):
"""
Train a model with pre-processed data
@type training_data: {train: DataFrameIterator, validation: DataFrameIterator, test: DataFrameIterator}
@param training_data: All the data needed for training
@type optimizer: string
@param optimizer: The optimizer used to train the model
@default "adam"
@type learning rate: float
@param learning rate: The learning rate of the optimizer
@default 0.001
@type loss: string
@param loss: The loss used to train the model
@default "binary_crossentropy"
@type num_epo: int
@param num_epo: Number of epochs for training phase
@default 20
@type model_type: string
@param model_type: The type of the model we want to train (e.g. pooling, flatten...)
@default "pooling"
@type class_weights: None | (float, float) | float | "balanced"
@param class_weights: Class weights. Either None for equal weights, (pos_class_weight, neg_class_weight), pos_class_weight (then neg_class_weight = 1-pos_class_weight) or balanced for automatic computation
@default None
@type early_stopping: boolean
@param early_stopping: Stop training if validation loss doesn't change
@default False
@rtype: {model: Keras model, test_predictions: numpy array, test_metrics: dict of metrics}
@return: The model, the predictions, the metrics
"""
train_generator = training_data["train"]
validation_generator = training_data["validation"]
test_generator = training_data["test"]
(img_h, img_w) = train_generator.target_size
# Format class weights
formatted_class_weights = class_weights
if isinstance(class_weights, float):
formatted_class_weights = (class_weights, 1-class_weights)
elif class_weights == "balanced":
if loss == "weighted_binary_crossentropy":
positive_class_weight = train_generator.classes.count(
0)/len(train_generator.classes)
formatted_class_weights = (
positive_class_weight, 1-positive_class_weight)
else:
formatted_class_weights = class_weight.compute_class_weight(
'balanced', classes=np.unique(train_generator.classes), y=train_generator.classes)
elif class_weights == None and loss == "weighted_binary_crossentropy":
formatted_class_weights = (0.5, 0.5)
print("Computed class weights: {}".format(formatted_class_weights))
# First we build the model
model = build_model(img_h, img_w, optimizer,
learning_rate, loss, formatted_class_weights, model_type)
# Then we train it
global_class_weights = None if loss == "weighted_binary_crossentropy" else formatted_class_weights
model, history = fit_model(model, train_generator, validation_generator,
num_epo, global_class_weights, early_stopping)
# Finally we test it
results_test = test_model(model, test_generator)
return {"model": model, "test_predictions": results_test[0], "test_metrics": {"accuracy": results_test[1]}, "history": history}
|
def short_story(idx=0):
print("ะฃ ะฟะพะฟะฐ ะฑัะปะฐ ัะพะฑะฐะบะฐ, ะพะฝ ะตะต ะปัะฑะธะป.")
print("ะะฝะฐ ััะตะปะฐ ะบััะพะบ ะผััะฐ, ะพะฝ ะตะต ัะฑะธะป,")
print("ะ ะทะตะผะปั ะทะฐะบะพะฟะฐะป ะธ ะฝะฐะดะฟะธัั ะฝะฐะฟะธัะฐะป:")
if idx < 100:
short_story(idx + 1)
|
import vkquick as vq
import typing
from src.config import complete_sticker, error_sticker
from src.filters.error_handler import ErrorHandler
from src.misc import app
from src.database.base import location
@app.command("+ะดั", invalid_argument_config=ErrorHandler())
async def friend_add(ctx: vq.NewMessage, user: vq.User):
try:
method = await ctx.api.friends.add(user_id=user.id)
await ctx.edit(f"{complete_sticker} ะัะฟะพะปะฝะตะฝะธะต...")
if method == 1:
await ctx.edit(f"{complete_sticker} ะะฐัะฒะบะฐ ะฒ ะดััะทัั ะพัะฟัะฐะฒะปะตะฝะฐ ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ะพัะฟัะฐะฒะปะตะฝะฐ.")
elif method == 2:
await ctx.edit(f"{complete_sticker} ะะฐัะฒะบะฐ ะฝะฐ ะดะพะฑะฐะฒะปะตะฝะธะต ะฒ ะดััะทัั ะพั {user:@[fullname} ะพะดะพะฑัะตะฝะฐ.")
elif method == 4:
await ctx.edit(f"{error_sticker} ะะพะฒัะพัะฝะฐั ะพัะฟัะฐะฒะบะฐ ะทะฐัะฒะบะธ.")
except vq.APIError[vq.CODE_174_FRIENDS_ADD_YOURSELF]:
await ctx.edit(f"{error_sticker} ะะตะฒะพะทะผะพะถะฝะพ ะดะพะฑะฐะฒะธัั ะฒ ะดััะทัั ัะฐะผะพะณะพ ัะตะฑั.")
@app.command("-ะดั", invalid_argument_config=ErrorHandler())
async def friend_delete(ctx: vq.NewMessage, user: vq.User):
method = await ctx.api.friends.delete(user_id=user.id)
await ctx.edit(f"{complete_sticker} ะัะฟะพะปะฝะตะฝะธะต...")
if method['success']:
await ctx.edit(f"{complete_sticker} {user:@[fullname]} ัะดะฐะปะตะฝ ะธะท ัะฟะธัะบะฐ ะดััะทะตะน.")
elif method['out_request_deleted']:
await ctx.edit(f"{complete_sticker} ะัะผะตะฝะตะฝะฐ ะธัั
ะพะดััะฐั ะทะฐัะฒะบะฐ ะฒ ะดััะทัั ะพั ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]}")
elif method['in_request_deleted']:
await ctx.edit(f"{complete_sticker} ะัะบะปะพะฝะตะฝะฐ ะฒั
ะพะดััะฐั ะทะฐัะฒะบะฐ ะฒ ะดััะทัั ะพั ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]}")
@app.command("ะธะด", invalid_argument_config=ErrorHandler())
async def revolve_user(ctx: vq.NewMessage, user: vq.User):
await ctx.edit(f"{complete_sticker} ะะนะดะธ ะฟะพะปัะทะพะฒะฐัะตะปั {user.fullname}: [id{user.id}|{user.id}]")
@app.command("ะฒะปั", invalid_argument_config=ErrorHandler())
async def send_message(ctx: vq.NewMessage, user: vq.User, *, text: str):
await ctx.api.messages.send(
user_id=user.id,
random_id=0,
message=text
)
await ctx.edit(f"{complete_sticker} ะกะพะพะฑัะตะฝะธะต ะฑัะปะพ ะพัะฟัะฐะฒะปะตะฝะพ ะฟะพะปัะทะพะฒะฐัะตะปั : {user:@[fullname]}")
@app.command("+ะปะฐะนะบ", invalid_argument_config=ErrorHandler())
async def likes_add(ctx: vq.NewMessage, user: vq.User[typing.Literal["photo_id"]]):
photo_id = user.fields["photo_id"].split("_")[1]
count_likes = await ctx.api.likes.add(type='photo', owner_id=user.id, item_id=photo_id)
await ctx.edit(
f"{complete_sticker} ะะฐะนะบ ะฝะฐ ะฐะฒะฐัะฐัะบั ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ะพัะพัะผะปะตะฝ!\n"
f"โ ะกัะฐะปะพ ะปะฐะนะบะพะฒ: {count_likes['likes']}")
@app.command("-ะปะฐะนะบ", invalid_argument_config=ErrorHandler())
async def likes_delete(ctx: vq.NewMessage, user: vq.User[typing.Literal["photo_id"]]):
photo_id = user.fields["photo_id"].split("_")[1]
count_likes = await ctx.api.likes.delete(type='photo', owner_id=user.id, item_id=photo_id)
await ctx.edit(
f"{complete_sticker} ะะฐะนะบ ะฝะฐ ะฐะฒะฐัะฐัะบั ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ัะฑัะฐะฝ!\n"
f"โ ะกัะฐะปะพ ะปะฐะนะบะพะฒ: {count_likes['likes']}")
@app.command("ะดะธะฐะปะพะณะธ", invalid_argument_config=ErrorHandler())
async def dialog_get(ctx: vq.NewMessage):
dialogs = await ctx.api.messages.getConversations()
await ctx.edit(f"{complete_sticker} ะะพะปะธัะตััะฒะพ ะฒะฐัะธั
ะดะธะฐะปะพะณะพะฒ: {dialogs['count']}")
@app.command("ัะฐั", invalid_argument_config=ErrorHandler())
async def get_chat(ctx: vq.NewMessage):
chat = await ctx.api.messages.getChat(chat_id=ctx.msg.chat_id)
await ctx.edit(f"โ ะะฝัะพัะผะฐัะธั ะพ ัะฐัะต\n"
f"๐ก ะะฐะทะฒะฐะฝะธะต ัะฐัะฐ : {chat['title']}\n"
f"{complete_sticker} ะะพะปะธัะตััะฒะพ ััะฐััะฝะธะบะพะฒ : {chat['members_count']}\n"
f"โ ะะนะดะธ ัะฐัะฐ : {chat['id']}")
@app.command("ะบะธะบ")
async def chat(ctx: vq.NewMessage, user: vq.User):
try:
method = await ctx.api.messages.removeChatUser(chat_id=ctx.msg.chat_id, user_id=user.id)
await ctx.edit( f"{complete_sticker} ะัะบะปััะตะฝะธะต")
if method == 1:
await ctx.edit( f"{complete_sticker} ะะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ะธัะบะปััะตะฝ ะธะท ะฑะตัะตะดั.")
except vq.APIError[vq.CODE_925_MESSAGES_CHAT_NOT_ADMIN]:
await ctx.edit( f"{error_sticker} ะะตั ะดะพัััะฟะฐ.")
except vq.APIError[vq.CODE_935_MESSAGES_CHAT_USER_NOT_IN_CHAT]:
await ctx.edit( f"{error_sticker} ะะพะปัะทะพะฒะฐัะตะปั ะฝะตัั ะฒ ะฑะตัะตะดะต.")
except vq.APIError[vq.CODE_945_MESSAGES_CHAT_DISABLED]:
await ctx.edit( f"{error_sticker} MESSAGES_CHAT_DISABLED")
except vq.APIError[vq.CODE_946_MESSAGES_CHAT_UNSUPPORTED]:
await ctx.edit( f"{error_sticker} MESSAGES_CHAT_UNSUPPORTED")
except vq.APIError[vq.CODE_15_ACCESS]:
await ctx.edit( f"{error_sticker} ะะตั ะดะพัััะฟะฐ.")
@app.command("ะดะพะฑะฐะฒะธัั")
async def chat(ctx: vq.NewMessage , user: vq.User):
try:
method = await ctx.api.messages.addChatUser(
chat_id=ctx.msg.chat_id,
user_id=user.id
)
await ctx.edit(f"{complete_sticker} ะะพะฑะฐะฒะปัั ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]}")
if method == 1:
await ctx.edit(f"{complete_sticker} โ
ะะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ะดะพะฑะฐะฒะปะตะฝ.")
except vq.APIError[vq.CODE_925_MESSAGES_CHAT_NOT_ADMIN]:
await ctx.edit(f"{error_sticker} You are not admin of this chat")
except vq.APIError[vq.CODE_932_MESSAGES_GROUP_PEER_ACCESS]:
await ctx.edit(f"{error_sticker} Your community can't interact with this peer")
except vq.APIError[vq.CODE_947_MESSAGES_MEMBER_ACCESS_TO_GROUP_DENIED]:
await ctx.edit(f"{error_sticker} Can't add user to chat, because user has no access to group")
except vq.APIError[vq.CODE_15_ACCESS]:
await ctx.edit(f"{error_sticker} Access denied: can't add this user")
@app.command("ัะตะฟะพัั")
async def report(ctx: vq.NewMessage , user: vq.User):
method = await ctx.api.users.report(
user_id=user.id,
type='spam'
)
await ctx.edit("โ
ะัะฟะพะปะฝะตะฝะธะต...")
if method == 1:
await ctx.edit(f"{complete_sticker} ะะฐะปะพะฑะฐ ะฝะฐ ะฟะพะปัะทะพะฒะฐัะตะปั {user:@[fullname]} ะฑัะปะฐ ััะฟะตัะฝะพ ะพัะฟัะฐะฒะปะตะฝะฐ ะฝะฐ ะผะพะดะตัะฐัะธั.")
|
from datetime import date
import boundaries
boundaries.register('Federal electoral districts',
domain='Canada',
last_updated=date(2011, 11, 28),
name_func=boundaries.clean_attr('FEDENAME'),
id_func=boundaries.attr('FEDUID'),
slug_func=boundaries.attr('FEDUID'),
authority='Her Majesty the Queen in Right of Canada',
source_url='http://data.gc.ca/data/en/dataset/48f10fb9-78a2-43a9-92ab-354c28d30674',
licence_url='http://data.gc.ca/eng/open-government-licence-canada',
data_url='http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/gfed000a11a_e.zip',
encoding='iso-8859-1',
metadata={'geographic_code': '01'},
)
|
import argparse
import random
import os
from itertools import islice
parser = argparse.ArgumentParser(description='Shuffle training paris')
parser.add_argument('-p', '--pair_fp', help='pair file', required=True)
parser.add_argument('-d', '--dist_fp', help='dist file', required=True)
parser.add_argument('-s', '--seed', help='random seed', required=True, type=int)
args = parser.parse_args()
def shuffle_dist(fp, seed):
dists = []
with open(fp) as f:
for line in f:
dists.append(line)
print(len(dists))
random.seed(seed)
random.shuffle(dists)
split_fp = os.path.splitext(fp)
with open(split_fp[0] + '_shuffle' + split_fp[1], 'w') as fo:
for x in dists:
fo.write(x)
def shuffle_pair(fp, seed):
pairs = []
with open(fp) as f:
while True:
next_n = list(islice(f, 4))
if not next_n:
break
pairs.append(''.join(next_n))
print(len(pairs))
random.seed(seed)
random.shuffle(pairs)
split_fp = os.path.splitext(fp)
with open(split_fp[0] + '_shuffle' + split_fp[1], 'w') as fo:
for x in pairs:
fo.write(x)
if __name__ == "__main__":
shuffle_dist(args.dist_fp, args.seed)
shuffle_pair(args.pair_fp, args.seed)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/6 5:31 ไธๅ
# @Author : Anson
# @Contact : 1047053860@qq.com
# @Software: PyCharm
# @content :
import execjs
execjs.get()
user = "1047053860@qq.com"
password = "woainizhongguo135"
with open('./openlaw_login.js', 'r', encoding='utf-8') as f:
login_js = execjs.compile(f.read())
keyEncrypt_password = login_js.call('keyEncrypt', password)
print(keyEncrypt_password)
|
class ReverseWordsInString:
def reverseSubstring(self, s, k):
"""
Reverse the first k characters for every 2k characters
counting from the start of the string. (Leetcode 541. Reverse String II)
Example:
Input: s = "abcdefg", k = 2
Output: "bacdfeg"
Parameters:
s(str): input string.
k(int): length of substring.
Returns:
res_str(str): reversed string.
"""
# corner case
res_str = ""
section_num = 0
while (section_num + 1) * k <= len(s):
cur_str = s[section_num * k : (section_num + 1) * k]
if section_num % 2 == 0:
res_str += cur_str[::-1]
else:
res_str += cur_str
section_num += 1
# remaining substring
rem_str = s[section_num * k:]
if section_num % 2 != 0:
res_str += rem_str
else:
res_str += rem_str[::-1]
return res_str
def reverseCharactersInWords(self, s):
"""
Reverse the order of characters in each word within a sentence while still preserving
whitespace and initial word order.. (Leetcode 557. Reverse Words in a String III)
Example:
Input: "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Parameters:
s(str): input string
Return:
res_str(str): output string
"""
# corner case
res_str = ""
start_idx = 0
while start_idx < len(s):
if s[start_idx] == ' ':
res_str += ' '
start_idx += 1
else:
end_idx = s.find(' ', start_idx) if (s.find(' ', start_idx) != -1) else len(s)
res_str += s[start_idx: end_idx][::-1]
start_idx = end_idx
return res_str
def reverseWordInString(self, s):
"""
Reverse the word orders in a string. (Leetcode 151. Reverse Words in a String)
Example:
Input: "the sky is blue"
Output: "blue is sky the"
Input: " hello world! "
Output: "world! hello"
Explanation: Your reversed string should not contain leading or trailing spaces.
Input: "a good example"
Output: "example good a"
Explanation: You need to reduce multiple spaces between two words to a single space in the reversed string.
Parameters:
s(str): input sentence as a string.
Return:
res_str(str): sentence with reversed word order as input.
"""
# corner case
if not s:
return s
# Input: " hello world! "
# trim leading/tailing space and reverse the whole string
s = s.strip()[::-1]
# "hello world!" -> "!dlrow olleh"
# reverse the word and only append 1 space
res_str = ""
start_idx = 0
while start_idx < len(s):
if s[start_idx] == " ":
start_idx += 1
continue
end_idx = s.find(" ", start_idx) if s.find(" ", start_idx) != -1 else len(s)
res_str += s[start_idx:end_idx][::-1]
res_str += " "
start_idx = end_idx + 1
return res_str.strip()
class ReverseWordsInCharArray:
"""
Reverse array of string - can happen in place.
"""
def reverseStringHelper(self, s):
"""
Reverse the input character array. (Leetcode 344. Reverse String)
Example:
s = ["h","e","l","l","o"]
s = ['o', 'l', 'l', 'e', 'h']
Parameters:
s(str): array of characters that needs to be reversed
Returns:
None. Reverse in place.
"""
if not s:
return s
left = 0
right = len(s) - 1
while left < right:
tmp = s[left]
s[left] = s[right]
s[right] = tmp
left += 1
right -= 1
return
if __name__ == "__main__":
# 541. Reverse String II
str_arr = [("abcde", 2), ("abcdef", 3), ("abcdefg", 4)]
# print(ReverseWordsInString.reverseSubstring.__doc__)
for s, k in str_arr:
print("Input string: " + s + "\nk: " + str(k))
print("Result: " + ReverseWordsInString().reverseSubstring(s, k))
print("------------------")
# 557. Reverse Words in a String III
str_arr2 = ["Let's take LeetCode contest", "Let's take LeetCode contest "]
for s in str_arr2:
print("Input string: " + s)
print("Result: " + ReverseWordsInString().reverseCharactersInWords(s))
print("------------------")
# 151. Reverse Words in a String
str_arr3 = ["the sky is blue", " hello world! ", "a good example"]
for s in str_arr3:
print("Input string: " + s)
print("Result: " + ReverseWordsInString().reverseWordInString(s))
print("------------------")
input_str = "the sky is blue"
input_arr = ["h","e","l","l","o"]
ReverseWordsInCharArray().reverseStringHelper(input_arr)
print(input_arr)
|
"""
Test a few of the features of pyccl upon which we rely, but which might change in
future release of pyccl.
"""
import pytest
import pyccl
# Both sets of cosmological parameters are silly, but they are sufficient to initialize
# a pyccl.Cosmology object.
@pytest.fixture(name="cosmo_params_1")
def fixture_cosmo_params_1():
return {"Omega_c": 0.0, "Omega_b": 0.0, "h": 1.0, "A_s": 0.0, "n_s": 0.0}
@pytest.fixture(name="cosmo_params_2")
def fixture_cosmo_params_2():
return {"Omega_c": 0.0, "Omega_b": 0.0, "h": 1.0, "A_s": 0.0, "n_s": 0.25}
def test_alias_of_cosmology_hashes_equal(cosmo_params_1):
x = pyccl.Cosmology(**cosmo_params_1)
y = x
assert x == y
assert hash(x) == hash(y)
def test_unequal_cosmologies_hash_unequal(cosmo_params_1, cosmo_params_2):
x = pyccl.Cosmology(**cosmo_params_1)
y = pyccl.Cosmology(**cosmo_params_2)
assert x != y
assert hash(x) != hash(y)
def test_equal_cosmologies_hash_equal(cosmo_params_1):
# This test verifies expected (but not really desired) behavior. Two
# Cosmology variables only have equal hashes when they are in fact they
# are aliases for the same object. They also test as equal only if they
# are aliases for the same object.
x = pyccl.Cosmology(**cosmo_params_1)
y = pyccl.Cosmology(**cosmo_params_1)
# This behavior will change in future versions of pyccl,
# making this test always pass. TODO: update this test when
# pyccl is updated.
assert (x != y and hash(x) != hash(y)) or (x == y and hash(x) == hash(y))
|
# -*- coding: utf-8 -*-
__author__ = "Kjersti Rustad Kvisberg & Ida Lunde Naalsund"
__email__ = "kjkv@nmbu.no, idna@nmbu.no"
import numpy
import matplotlib.pyplot as plt
import cv2
import os
# Define global variables
h_bar = 1.055E-34
m = 9.109E-31
def psi(pos, pos_0, E, sigma, x_start_step, V_0):
""" Time-independent wave function for wave packet. """
k = (numpy.sqrt(2*m*(E - V(pos, x_start_step, V_0))))/h_bar
a = 1/(numpy.pi**(1/4)*numpy.sqrt(sigma))
b = (-1/2) * ((pos - pos_0)**2)/(sigma**2)
c = 1j * k * (pos - pos_0)
return a * numpy.exp(b) * numpy.exp(c)
def V(x, x_start_step, V_0):
"""Returns potential for given x value."""
if x >= x_start_step:
return V_0
else:
return 0
def reflection_coef(x_values, phi_values):
"""Calculates reflection coefficient for wave packet."""
x_interval = int(len(x_values)/2)
return abs(numpy.trapz(x_values[:x_interval],
numpy.conj(phi_values[:x_interval]) *
phi_values[:x_interval]))
def transmission_coef(x_values, phi_values):
"""Calculates transmission coefficient for wave packet."""
x_interval = int(len(x_values)/2)
return abs(numpy.trapz(x_values[x_interval:],
numpy.conj(phi_values[x_interval:]) *
phi_values[x_interval:]))
if __name__ == '__main__':
# Initialize constants and arrays
sigma = 1E-8
E = 0.2 * 1.602E-19
V_0 = 0.16 * 1.602E-19
x_start_step = 100E-9
x_0 = 50E-9
L = 200E-9
delta_pos = 1.5E-10
delta_t = 2.25E-19
time_steps = 2E6
plot_step = 5000
x_values = numpy.arange(0, L, delta_pos)
psi_values = numpy.array(
[psi(pos, x_0, E, sigma, x_start_step, V_0) for pos in x_values])
psi_values[0] = 0
psi_values[-1] = 0
V_values = numpy.array([V(pos, x_start_step, V_0) for pos in x_values])
a = delta_t / (1j * h_bar)
b = - (h_bar ** 2) / (2 * m)
counter = 0
img_num = 0
for time in range(int(time_steps)):
# Build the wave packet
sec_deriv_psi = (numpy.pad(psi_values[1:], (0, 1), 'constant',
constant_values=0)
+ numpy.pad(psi_values[:-1], (1, 0), 'constant',
constant_values=0) - 2 * psi_values) \
/ delta_pos ** 2
phi_values = psi_values + a * (b * sec_deriv_psi + V_values *
psi_values)
# Define wave packet at boundaries
phi_values[-1] = 0
phi_values[0] = 0
# Plot the wave packet
if counter % plot_step == 0:
fig = plt.figure()
plt.plot(x_values, (phi_values * numpy.conj(phi_values)))
plt.title("Propagation of wave packet")
plt.xlabel("x [m]")
plt.ylabel("Probability density")
fig.savefig(f'img{img_num:03d}.png')
plt.close(fig)
plt.show()
img_num += 1
# Plot first and last image
if counter == 0:
fig = plt.figure()
plt.plot(x_values, (phi_values * numpy.conj(phi_values)))
plt.title("Propagation of wave packet for t = 0")
plt.xlabel("x [m]")
plt.ylabel("Probability density")
plt.show()
if counter == 1995000:
fig = plt.figure()
plt.plot(x_values, (phi_values * numpy.conj(phi_values)))
plt.title("Propagation of wave packet for time step 2E6")
plt.xlabel("x [m]")
plt.ylabel("Probability density")
plt.show()
psi_values = phi_values
counter += 1
# Calculate reflection and transmission coefficients
# of propagated wave packet
R = reflection_coef(x_values, phi_values)
T = transmission_coef(x_values, phi_values)
print(f"The reflection coefficient is: {R}")
print(f"The transmission coefficient is: {T}")
print(f"The total probability is: {R + T}")
# Create video
image_folder = 'C:/Users/Bruker/Documents/Programmering/Oblig_FYS245'
video_name = 'Wave_packet_propagation.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 1, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
video.release()
cv2.destroyAllWindows()
|
from django import forms
from django.utils import timezone
from .models import Post, Comment, Tag
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text', 'cover', 'attachment', 'tags', )
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Save'))
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author', 'text', )
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Send'))
class TagForm(forms.ModelForm):
posts = forms.MultipleChoiceField(
required=False,
choices=[(post.pk, post.title) for post in Post.objects.filter(published_date__lte=timezone.now())],
)
class Meta:
model = Tag
fields = ('name', 'posts', )
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Save'))
|
#------------------------- Imports -------------------------------
import pandas as pd
import dash
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud, STOPWORDS
from nltk.corpus import stopwords
import webbrowser
from threading import Timer
from layouts import (
header,
footer,
piechart,
check_review,
wordcloud_bargraph
)
from layouts.check_review import load_model
#----------------------------------------------- Global Variables --------------------------------------------
app = dash.Dash(__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}], suppress_callback_exceptions = True)
df2 = pd.read_csv('data/words_frequency.csv', encoding='utf-8')
port = 8050
#----------------------------------------------- Defining App Layout ------------------------------------------
def create_app_ui():
app_layout = html.Div([
header.create_layout(app),
html.Div([
html.H1(['Sentiment Analysis of Customer Reviews on ',
html.A(['Etsy.com'], href='https://www.etsy.com', target='_blank', id='etsy-link')],
id='main-title'),
], id='top'),
html.H3(['Pie Chart Analysis'], id='piechart', className='subtitle'),
html.Div([
piechart.create_layout(app)
], id='piechart-main-div'),
html.Div([
html.H3('Check Sentiment Of Your Review', id='check-review', className='subtitle'),
check_review.create_layout(app)
], id='check-review-main-div'),
html.Div([
html.H3('Most Frequent Words From Etsy.com Reviews', id='wordcloud', className='subtitle'),
wordcloud_bargraph.create_layout()
]),
footer.create_layout(app)
], id='main_app_layout', className='app-layout-div')
return app_layout
#----------------------------------------------- Defining Functions ---------------------------------------------------
def open_browser():
webbrowser.open_new("http://localhost:{}".format(port))
def black_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return("hsl(0,100%, 1%)")
#----------------------------------------------- App Callbacks ---------------------------------------------------
@app.callback(
Output(component_id='review-result', component_property='children'),
[
Input(component_id='textarea-click-button', component_property='n_clicks')
],
[
State('text-area-box', 'value')
])
def update_text_review(n_clicks, textarea_value):
print('Data type of textarea_value: ', str(type(textarea_value)))
print('Value of textarea_value: ', str(textarea_value))
print('Data type of n_clicks for textbox section: ', str(type(n_clicks)))
print('Value of n_clicks for textbox section: ', str(n_clicks))
if n_clicks > 0:
response = check_review.check_review(textarea_value)
if response[0] == 0:
result = html.H4('Sentiment: Review is NEGATIVE!', style={'color':'red'}, className='result')
elif response[0] == 1:
result = html.H4('Sentiment: Review is POSITIVE!', style={'color':'green'}, className='result')
else:
result = ' Result Unknown'
return result
else:
return ''
@app.callback(Output(component_id='dropdown-review-result', component_property='children'),
[
Input(component_id='review-dropdown', component_property='value'),
Input(component_id='dropdown-click-button', component_property='n_clicks')
],
)
def review_dropdown_function(review_dropdown, n_clicks):
print('Data type of review_dropdown: ', str(type(review_dropdown)))
print('Value of review_dropdown: ', str(review_dropdown))
print('Data type of n_clicks for review_dropdown section: ', str(type(n_clicks)))
print('Value of n_clicks for review_dropdown section: ', str(n_clicks))
result = ''
if n_clicks > 0:
response = check_review.check_review(review_dropdown)
if response[0] == 0:
result = html.H4('Sentiment: Review is NEGATIVE!', style={'color':'red'}, className='result')
elif response[0] == 1:
result = html.H4('Sentiment: Review is POSITIVE!', style={'color':'green'}, className='result')
else:
result = 'Result Unknown'
return result
else:
return ''
@app.callback(
Output(component_id='bar-chart', component_property='figure'),
[
Input(component_id='bargraph-dropdown', component_property='value')
],
)
def update_bargraph(no_of_words):
print('Number of words: ', no_of_words)
print(type(no_of_words))
dff = df2.iloc[0:no_of_words, :]
fig = px.bar(dff, x='words', y='frequency')
fig.update_traces(hovertemplate='Word:%{x}<br>Repeated: %{y} times<extra></extra>')
fig.update_layout(title = {'font': {'family': 'Playfair Display', 'size': 26}, 'pad': {'l': 380}, 'text': 'Words v/s Frequency Chart'})
return fig
@app.callback(
Output(component_id='wordcloud-figure', component_property='figure'),
[
Input(component_id='wordcloud-dropdown', component_property='value')
]
)
def wordcloud(words_number):
dff = pd.read_csv('data/words_frequency.csv')
wordcloud = WordCloud(font_path = '../data/arial.ttf', width=3000, height=2000,
background_color ='white',
stopwords = stopwords,
min_font_size = 10, max_words = words_number).generate_from_frequencies(dff.set_index('words')['frequency'].to_dict())
wordcloud.recolor(color_func=black_color_func)
fig_wordcloud = px.imshow(wordcloud, template='seaborn', title='WordCloud of Etsy.com Reviews')
fig_wordcloud.update_layout(margin=dict(l=20, r=20, t=60, b=20), title_font_size=26, title_font_family='Playfair Display', hovermode=False)
fig_wordcloud.update_xaxes(visible=False)
fig_wordcloud.update_yaxes(visible=False)
return fig_wordcloud
#----------------------------------------------- Defining main function ----------------------------------------
def main():
print('Starting the project...')
load_model()
global app
app.title = 'Sentiment Analysis With Insights.'
app.layout = create_app_ui()
Timer(1, open_browser).start()
app.run_server(port=8050)
app = None
project_name = None
print('Ending the project.')
#-------------------------------------------- Calling main function --------------------------------------------
if __name__ == '__main__':
main()
|
#่ฎก็ฎ็ๆนๅผ.gyp
e = pow(3,pow(3,99),10000)
q = int(12.3434)
print (e)
print (q)
|
# @Title: ๅ
จๆๅ II (Permutations II)
# @Author: 2464512446@qq.com
# @Date: 2019-11-29 17:57:08
# @Runtime: 44 ms
# @Memory: 11.5 MB
class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) == 0:
return []
used = [False] * len(nums)
res = []
# ไฟฎๆน 1๏ผ้ฆๅ
ๆๅบ๏ผไนๅๆๆๅฏ่ฝๅ็ฐ้ๅคๅๆฏ๏ผๅๅบใๅๅบๅๅฏ
nums.sort()
self.__dfs(nums,0,[],used,res)
return res
def __dfs(self, nums, index, pre,used,res):
if index == len(nums):
res.append(pre[:])
return
for i in range(len(nums)):
# ไฟฎๆน 2๏ผๅ ไธบๆๅบไปฅๅ้ๅค็ๆฐไธๅฎไธไผๅบ็ฐๅจๅผๅง๏ผๆ
i > 0
# ๅไนๅ็ๆฐ็ธ็ญ๏ผๅนถไธไนๅ็ๆฐ่ฟๆชไฝฟ็จ่ฟ๏ผๅชๆๅบ็ฐ่ฟ็งๆ
ๅต๏ผๆไผๅบ็ฐ็ธๅๅๆฏ
# ่ฟ็งๆ
ๅต่ทณ่ฟๅณๅฏ
# ๅชๆ
# ๅๅฑ็ธ้ป็ธ็ญ๏ผไธ็ธ้ปๅ
็ด ๅทฒ็ปไฝฟ็จ๏ผๅๅฏๅชๆ
if i > 0 and nums[i] == nums[i - 1] and not used[i - 1]:
continue
if not used[i]:
used[i] = True
pre.append(nums[i])
self.__dfs(nums,index+1, pre, used, res)
used[i] = False
pre.pop()
|
'''
IC input shaping sens plots
'''
import warnings
warnings.simplefilter("ignore", UserWarning)
# Import the necessary python library modules
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import os
import sys
import pdb
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
import InputShaping as shaping
import Boom_Crane as BC
import Generate_Plots as genplt
import si_phase_freq as si
# Use lab plot style
plt.style.use('Crawlab')
normalized_phase = np.arange(0.,365.,5.)
normalized_amplitude = 0.8
previous_pos_shaper = None
previous_neg_shaper = None
# Ensure that the folder we want to save to exists
if not os.path.exists('Figures/{}/Amp_{}'.format(sys.argv[0],normalized_amplitude)):
os.makedirs('Figures/{}/Amp_{}'.format(sys.argv[0],normalized_amplitude))
data = open('Figures/{}/Amp_{}/Data_Single.txt'.format(sys.argv[0],normalized_amplitude),'w')
data.write('Duration Phase Amplitude Robust \n')
shapers = open('Figures/{}/Amp_{}/Shapers_Single.txt'.format(sys.argv[0],normalized_amplitude),'w')
shapers.write('Input Shaper \n')
# define constants
DEG_TO_RAD = np.pi / 180
G = 9.81
Boom=0.81
Cable=0.35
Amax=174.0
Vmax=17.4
Luff_vals = np.array([30.,50.])
Tmax=5.0
Tstep=0.01
normalized_amp=0
normphase=0
Startt=np.array([0.])
p = BC.init_crane( Boom,
Cable,
Amax,
Vmax,
Luff_vals,
Tmax,
Tstep,
normalized_amp,
normphase,
Startt=Startt
)
[Amax,Vmax], l, r, StartTime, t_step,t,X0,Distance = p
U0 = np.array(X0)
U0[0] = 0.
U0[1] = 0.
# Array of initial luff angles
length_values = np.arange(1.0,0.05,-.01)
gamma_fin = Vmax*t[-1]*1.5/DEG_TO_RAD
# Initialize empty arrays
unshaped_amp = np.zeros([len(length_values)])
unshaped_response = np.zeros([len(t),len(U0)])
# Iterate across each initial condition
for i in np.arange(0,len(length_values)):
# Force the luff angle and initial conditions to be the appropriate values
l_iter = length_values[i]
# Approximate natural frequency
omega = np.sqrt((G - r * Vmax**2 * np.sin(U0[2]))/ l)
# Pack the relevant values based on the initial condition
p = [[Amax,Vmax], l_iter, r, np.array([0.]), t_step,t,U0,np.array([0.])]
# Find the amplitude of an unshaped response to a step input given the initial conditions
unshaped_response = BC.response(p,['Unshaped Accel'])
#unshaped_response[:,0] /= DEG_TO_RAD
t_peak = np.pi / (omega)
t_peak_step = np.round(t_peak/t_step).astype(int)
# Determine the unshaped amplitude
unshaped_amp[i] = omega * np.sqrt(unshaped_response[t_peak_step,0]**2 + (unshaped_response[t_peak_step,1]/omega)**2)
omega_n_values = np.sqrt(9.81 / length_values)
p = [[Amax,Vmax], l, r, StartTime, t_step,t,X0,Distance]
amp_freq,amp_amp,amp_phase,amp_offset = BC.get_impulse_amp(p)
amp_args = [amp_freq,amp_amp,amp_phase,amp_offset, unshaped_amp,omega_n_values]
def robustness(x, f_min, f_max, zeta, p, amp_args,vtol,omega_n):
amp_freq,amp_amp,amp_phase,amp_offset,unshaped_amp,omega_n_values = amp_args
[Amax,Vmax], l, r, StartTime,t_step,t,X0,Distance = p
sign = np.abs(Distance[0]) / Distance[0]
x = np.asarray(x)
num_impulses = len(x) / 2
freq_min = None
freq_max = None
impulse_amp = lambda x: np.interp(x,omega_n_values,unshaped_amp)
num_points = 50
vib = np.zeros(num_points,)
for ii, freq in enumerate(np.linspace(f_min * (2*np.pi), f_max * (2*np.pi), num_points)):
wd = freq * np.sqrt(1 - zeta**2)
cos_term = np.sum(x[:,1] * impulse_amp(wd) * np.exp(zeta*freq*x[:,0]) * np.cos(wd*x[:,0]))
sin_term = np.sum(x[:,1] * impulse_amp(wd) * np.exp(zeta*freq*x[:,0]) * np.sin(wd*x[:,0]))
# Calculate the phase angle of the initial conditions
phase = -np.arctan2((np.sqrt(1 - zeta**2) * X0[0]),X0[1]/(freq) + zeta * X0[0])
# Shifts the approximate phase angle to account for non-instantaneous acceleration
phase_shift = (0.5*Vmax/Amax) * freq
# Create a normalized error
phase = phase - phase_shift
null_amp = freq * np.sqrt(X0[0]**2 + (X0[1]/freq)**2)
cos_term += sign * null_amp * np.exp(zeta*phase) * np.cos(phase * np.sqrt(1 - zeta**2))
sin_term += sign * null_amp * np.exp(zeta*phase) * np.sin(phase * np.sqrt(1 - zeta**2))
vib[ii] = np.exp(-zeta * freq * x[-1,0]) * np.sqrt((cos_term)**2 + (sin_term)**2)
vib[ii] /= np.sqrt(
( impulse_amp(wd) * sign
+ sign * null_amp * np.exp(zeta*phase) * np.cos(phase))**2
+ (sign * null_amp * np.exp(zeta*phase) * np.sin(phase))**2
)
if vib[ii] < vtol and freq_min == None:
freq_min = freq
elif vib[ii] < vtol and freq_min is not None:
freq_max = freq
elif vib[ii] > vtol and freq_min is not None and freq_max is not None:
continue
if freq_min == None or freq_max == None:
robust = 0
else:
robust = (freq_max - freq_min) / omega_n
return robust
previous_shaper = None
for i in np.arange(0,len(normalized_phase)):
print('Phase = {}'.format(normalized_phase[i]))
# define constants
DEG_TO_RAD = np.pi / 180
G = 9.81
Boom=0.81
Cable=0.35
Amax=174.0
Vmax=17.4
Luff_vals = np.array([30.,50.])
Tmax=5.0
Tstep=0.01
normalized_amp=normalized_amplitude
normphase=normalized_phase[i]
Startt=np.array([0.])
p = BC.init_crane( Boom,
Cable,
Amax,
Vmax,
Luff_vals,
Tmax,
Tstep,
normalized_amp,
normphase,
Startt=Startt
)
[Amax,Vmax], l, r, StartTime, t_step,t,X0,Distance = p
omega = np.sqrt(G/l) / (2*np.pi)
res,shaper = si.si(0,1.,previous_pos_shaper,omega,omega, 0.,0.00,0.01,p,amp_args,iterating=True)
previous_shaper = shaper
robust = robustness(shaper, omega - .1*omega, omega + .1*omega, 0., p, amp_args,0.05,omega * 2 * np.pi)
tau = 1 / omega
shaper_duration = shaper[-1,0] / tau
data.write('{} {} {} {}\n'.format(
shaper_duration,
normalized_phase[i],normalized_amplitude,
robust))
shapers.write('{}\n'.format(np.hstack((shaper[:,1], shaper[:,0]))))
# Plot all of the relevant response values
################################################
################################################
'''
# Determine the folder where the plots will be saved
folder = 'Figures/{}/{}_Hz'.format(
sys.argv[0],
np.round(np.sqrt(G / l) / (2*np.pi),2)
)
frequency,si_robustness = genplt.ic_sensplot(shaper,0.1*omega,5*omega,omega,0.0,p)
freq_small,si_small = genplt.ic_sensplot(shaper,0.5*omega,1.5*omega,omega,0.0,p)
vtol_small = np.ones_like(si_small[:,0]) * 5
genplt.compare_responses(frequency / omega,
si_robustness[:,0],'SI-IC, I=0.1',
name_append='Sens {} {}'.format(normalized_phase[i],normalized_amplitude[ii]),
xlabel=r'Normalized Frequency $\frac{\omega}{\omega_n}$',ylabel='Percent Vibration',
folder=folder,grid=False,save_data=False,ncol=1
)
genplt.compare_responses(freq_small / omega,
si_small[:,0],'SI-IC, I=0.1',
name_append='Sens_Near {} {}'.format(normalized_phase[i],normalized_amplitude[ii]),
xlabel=r'Normalized Frequency $\frac{\omega}{\omega_n}$',ylabel='Percent Vibration',
folder=folder,grid=False,save_data=False,ncol=1
)
'''
data.close()
shapers.close() |
from .ml_models.SVC import OffloadSVM
from .ml_models.PCA import OffloadPCA
from .ml_models.KMeans import OffloadKMeans
from .ml_models.GaussianNB import OffloadGNB
from .ml_models.LinearRegression import OffloadLR
from .ml_models.Perceptron import OffloadPerceptron
from .ml_models.LogisticRegression import OffloadLogit
from .ml_models.LinearDiscriminantAnalysis import OffloadLDA
from .ml_models.PassiveAggressiveClassifier import OffloadPA
from .ml_models.QuadraticDiscriminantAnalysis import OffloadQDA
class Offload:
supported_algorithms = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis", "GaussianNB", "SVC", "LinearSVC", "Perceptron", "LinearRegression", "LogisticRegression", "PassiveAggressiveClassifier", "KMeans", "PCA"]
def __init__(self, model, optional=None):
self.optional = optional
self.check_model_validity(model)
self.model = model
self.algorithm = self.get_algorithm(model)
self.offloader = self.get_offloader()
def get_algorithm(self, model):
return model.__repr__().split('(')[0]
def is_algorithm_supported(self, model):
try:
model.__getstate__()
except AttributeError:
return False
if "_sklearn_version" not in model.__getstate__():
return False
return self.get_algorithm(model) in self.supported_algorithms
def is_model_trained(self, model):
if self.get_algorithm(model) == "StandardScaler":
return "n_samples_seen_" in model.__dict__
elif self.get_algorithm(model) == "LinearRegression":
return "singular_" in model.__dict__
elif self.get_algorithm(model) == "KMeans":
return "cluster_centers_" in model.__dict__
elif self.get_algorithm(model) == "PCA":
return "n_components_" in model.__dict__
else:
return "classes_" in model.__dict__
def is_model_binary(self, model):
if self.get_algorithm(model) == "LinearRegression" or self.get_algorithm(model) == "KMeans" or self.get_algorithm(model) == "PCA":
return True
else:
return len(model.__dict__["classes_"]) == 2
def get_offloader(self):
if self.algorithm == self.supported_algorithms[0]: #LDA
return OffloadLDA(self.model)
elif self.algorithm == self.supported_algorithms[1]: #QDA
return OffloadQDA(self.model)
elif self.algorithm == self.supported_algorithms[2]: #GNB
return OffloadGNB(self.model)
elif self.algorithm == self.supported_algorithms[3] or self.algorithm == self.supported_algorithms[4]: #SVM
return OffloadSVM(self.model, self.optional)
elif self.algorithm == self.supported_algorithms[5]: #Perceptron
return OffloadPerceptron(self.model)
elif self.algorithm == self.supported_algorithms[6]: #LR
return OffloadLR(self.model)
elif self.algorithm == self.supported_algorithms[7]: #Logit
return OffloadLogit(self.model)
elif self.algorithm == self.supported_algorithms[8]: #PA
return OffloadPA(self.model)
elif self.algorithm == self.supported_algorithms[9]: #KMeans
return OffloadKMeans(self.model)
elif self.algorithm == self.supported_algorithms[10]: #PCA
return OffloadPCA(self.model, self.optional)
def check_model_validity(self, model):
if not self.is_algorithm_supported(model):
raise TypeError("Input ML model not supported! Only LDA, QDA, GNB, LR, Logit, SVM, PA, KMeans, PCA and Perceptron of scikit-learn are supported.")
if not self.is_model_trained(model):
raise TypeError("Input ML model not trained on a dataset! First .fit() on a dataset and then offload.")
if not self.is_model_binary(model):
raise TypeError("Input ML model trained on a multiclass dataset! Only binary-class models are supported.")
if self.optional:
if self.get_algorithm(self.optional) != "StandardScaler":
raise TypeError("Only StandardScaler is supported as the second argument.")
elif not self.is_model_trained(self.optional):
raise TypeError("First fit StandardScaler on the training dataset and then offload.")
def get_params(self):
return self.offloader.get_params()
def export_to_arduino(self, path):
preamble = "//This code was autogenerated using micro-learn.\n//Use the dummy variable array 'data' to interact with the ML inference.\n\n"
code = preamble + self.offloader.get_arduino_code()
f = open(path, 'a')
f.write(code)
f.close()
|
from flask_script import Manager
from bookstore import app, db, Author, Book
manager = Manager(app)
# reset the database and create two artists
@manager.command
def deploy():
db.drop_all()
db.create_all()
jane = Author(name='Jane Austen', intro='Jane Austen was an English novelist known primarily for her six major novels, got the British landed gentry at the end of the 18th century.')
yh = Author(name='Yu Hua', intro='Yu Hua is a Chinese author, born April 3, 1960 in Hangzhou, Zhejiang province.')
scott = Author(name='F. Scott Fitzgerald', intro='an American novelist and short story writer, whose works illustrate the Jazz Age')
book1=Book(name='Pride and Prejudice', year='1813', author_id=1, summary='A romance novel by Jane Austen, first published in 1813. The story charts the emotional development of the protagonist, Elizabeth Bennet, who learns the error of making hasty judgments and comes to appreciate the difference between the superficial and the essential. The comedy of the writing lies in the depiction of manners, education, marriage, and money in the British Regency.')
book2=Book(name='To Live', year='1993', author_id=2, summary='To Live is one of the most representative work by Yu Hua. The story begins with the narrator traveling through the countryside to collect folk songs and local legends and starting to hear a old peasant talking about his experiences, which encompass many significant historical events in China including the Great Leap Forward, Three-anti and Five-anti Campaigns and Cultural Revolution. ')
book5=Book(name='The Great Gatsby', year='1922', author_id=3, summary='The Great Gatsby is a 1925 novel written by American author F. Scott Fitzgerald that follows a cast of characters living in the fictional town of West Egg on prosperous Long Island in the summer of 1922. ')
db.session.add(yh)
db.session.add(jk)
db.session.add(scott)
db.session.add(book1)
db.session.add(book2)
db.session.add(book5)
db.session.commit()
if __name__ == "__main__":
manager.run()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Device(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
uuid = models.CharField(max_length=255)
name = models.CharField(max_length=255)
class Position(models.Model):
device = models.ForeignKey(Device, related_name='positions', on_delete=models.CASCADE)
lat = models.FloatField()
long = models.FloatField()
timestamp = models.FloatField() |
from itertools import product
import random
class ButtonState(object):
X = "X"
O = "O"
empty = "0"
class GameStatus(object):
Won = 'W'
Loss = 'L'
InProgress = 'I'
class TicTacToe():
def __init__(self):
self.grid = [[ButtonState.empty for i in range(3)] for j in range(3)]
def input_to_grid(self,value,row, column):
self.grid[row][column] = value;
return self.grid
def divide(self, i, j):
try:
return self.x/self.y
except ZeroDivisionError:
return None
def get_game_status(self, user):
#check the columns, diagonals, rows with a counter"
row_counter = 0
col_counter = 0
diagonal_counter = 0
#1 - row
#2 - col
#3 - diagonals
if(user == "X"):
search_value = "X"
for i in range(3):
for j in range(3):
if(self.grid[i][j] == search_value):
row_counter+=1
if(self.grid[j][i] == search_value):
col_counter += 1
if((i + j) % 2 == 0):
if(self.grid[i][j] == search_value):
diagonal_counter += 1
if((row_counter == 3 ) or (col_counter == 3) or (diagonal_counter == 3)):
return GameStatus.Won
row_counter=0
col_counter=0
a = TicTacToe()
# # a.input_to_grid("X",0,0)
# # a.input_to_grid("X",1,0)
# # a.input_to_grid("X",2,0)
# print(a.grid)
# a.get_game_status(ButtonState.X)
|
import random
import pprint
import time
import sys # sys.setrecursionlimit() | We will use it to avoid RecursionError for quick sort
def spaceUp(n):
for i in range(n):
print()
class QuickSort(object):
def __init__(self, arr):
self.arr = arr
def medianOfThree(self, arr, low, high):
if len(arr) >= 3:
# Create the Start Middle End array
start_middle_end = [ arr[low], arr[ ( low + high ) // 2 ], arr[high] ]
# Sort it
start_middle_end.sort()
# Return the index of the middle value
return arr.index(start_middle_end[1])
else:
return random.randint(low, high)
def partitionStandard(self, arr, low, high):
# For the standrad quick sort, pick the pivot as the last element of the array
pivot = arr[high]
# Create memory value ( i ) and set it to low - 1 | Create scan value ( j ) and set it to low
i, j = low - 1, low
# Scan through all the values of the array
while j < high:
if arr[j] < pivot:
# Increment memory value
i += 1
# Swap [i] with [j]
arr[i], arr[j] = arr[j], arr[i]
# Increment scan value
j += 1
# Swap [i + 1] with the pivot ( pivot index is the last index from the array -- > high )
arr[i + 1], arr[high] = arr[high], arr[i + 1]
# Return border split index value ( i + 1, where the pivot was placed )
return i + 1
def partitionStrategy(self, arr, low, high, strategy):
'''
~ Get the pivot index
# strategy == 'random' => random pivot strategy ( pick a random index for the pivot )
# strategy == 'medianOfThree' => median of three strategy ( pick the index for the pivot using self.medianOfThree(*args)
'''
if strategy == 'random':
pivotIndex = random.randint(low, high)
elif strategy == 'medianOfThree':
pivotIndex = self.medianOfThree(arr, low, high)
# Get the pivot value
pivot = arr[pivotIndex]
# Swap the pivot with the last value from the array to get the pivot from our way
arr[pivotIndex], arr[high] = arr[high], arr[pivotIndex]
# Update the pivot index
pivotIndex = high
# Create the scan values ( LS & RS )
LS = low # Left scan value ( > pivot )
RS = high - 1 # Right scan value ( < pivot )
while LS <= RS:
if arr[LS] > pivot and arr[RS] < pivot:
# Swap [LS] with [RS]
arr[LS], arr[RS] = arr[RS], arr[LS]
# Increment left scan value
LS += 1
# Decrement right scan value
RS -= 1
else:
if arr[LS] < pivot:
# Increment left scan value
LS += 1
if arr[RS] > pivot:
# Decrement right scan value
RS -= 1
# Swap [LS] with pivot value
arr[LS], arr[pivotIndex] = arr[pivotIndex], arr[LS]
# Return border split value
return LS
def quickSort(self, arr, low, high, strategy):
if low < high:
if strategy == "standard":
partitionBorderSplit_IndexValue = self.partitionStandard(arr, low, high)
else:
partitionBorderSplit_IndexValue = self.partitionStrategy(arr, low, high, strategy)
self.quickSort(arr, partitionBorderSplit_IndexValue + 1, high, strategy)
self.quickSort(arr, low, partitionBorderSplit_IndexValue - 1, strategy)
def analyseBehaviorAndRuntime():
# Arrays to analyse
array0 = None # Standard strategy
array1 = None # Random pivot strategy
array2 = None # The median of three strategy
while True:
# Ask the user if he wants to give an array or the amount of elements that will be completly unsorted ( [::-1] )
print("1. Give an array that you want to sort")
print("2. Give a number of elements that will be completely unsorted in an array")
userChoice = input("> ")
try:
# Check user input
userChoice = eval(userChoice)
if userChoice not in list(range(1, 3)):
raise Exception
else:
if userChoice == 1:
# Given array
while True:
userGivenArray = input("Write here the array that you want to sort with all different strategies ( e. g : [3, 2, 1] ) -- > ")
try:
# Check user input for the given array
userGivenArray = eval(userGivenArray)
# Try to see if the ".append()" method is in the userGivenArray, if that is the case then we know the user gave us a list as input
if userGivenArray.append:
# The user gave us a list, so all the arrays ( 0 1 and 2 ) will be this list
array0 = userGivenArray.copy()
array1 = userGivenArray.copy()
array2 = userGivenArray.copy()
break
raise Exception
except Exception:
print("The input must be an array ( e. g : [3, 2, 1] )")
spaceUp(2)
continue
elif userChoice == 2:
# Amount of items
while True:
amountOfItems = input("Write here the amount of items that you want your array to have that will be completely unsorted -- > ")
try:
# Check user input
amountOfItems = int(amountOfItems)
if amountOfItems <= 0:
raise Exception
else:
array0 = list(range(1, amountOfItems + 1, 1))[::-1]
array1 = array0.copy()
array2 = array0.copy()
break
except Exception:
print("Your amount of items must be a positive integer bigger than 0")
spaceUp(2)
continue
break
except Exception:
print("Choose something between 1 and 2")
spaceUp(2)
# See if the user wants to see the sorted arrays
seeArrays = None
while True:
userInput = input("Do you want to see the sorted arrays ( y | n ) -- > ")
if userInput.lower() == "y" or userInput.lower() == "yes":
seeArrays = True
elif userInput.lower() == "n" or userInput.lower() == "no":
seeArrays = False
else:
print("Choose something between y ( yes ) or n ( no )")
spaceUp(2)
continue
break
spaceUp(5)
# Create the quick sort objects
QuickSort_StandardAlgorithm = QuickSort(array0)
QuickSort_RandomPivot_Strategy = QuickSort(array1)
QuickSort_TheMedianOfThree_Strategy = QuickSort(array2)
sys.setrecursionlimit(pow(len(array0), 2)) # Set the recursion limit to another number in order to avoid RecursionError for quick sort
if seeArrays:
print("***** STANDARD ALGORITHM *****")
spaceUp(3)
print("Array before quick sort -- > ")
pprint.pprint(array0, indent = 20)
startTime_QuickSort_StandardAlgorithm = time.perf_counter()
QuickSort_StandardAlgorithm.quickSort(array0, 0, len(array0) - 1, 'standard')
timeNeeded_QuickSort_StandardAlgorithm = time.perf_counter() - startTime_QuickSort_StandardAlgorithm
print("Array after quick sort -- > ")
pprint.pprint(array0, indent = 20)
spaceUp(3)
print("***** STANDARD ALGORITHM *****")
spaceUp(5)
print("***** RANDOM PIVOT STRATEGY *****")
spaceUp(3)
print("Array before quick sort -- > ")
pprint.pprint(array1, indent = 20)
startTime_QuickSort_RandomPivotStrategy = time.perf_counter()
QuickSort_RandomPivot_Strategy.quickSort(array1, 0, len(array1) - 1, 'random')
timeNeeded_QuickSort_RandomPivot_Strategy = time.perf_counter() - startTime_QuickSort_RandomPivotStrategy
print("Array after quick sort -- > ")
pprint.pprint(array1, indent = 20)
spaceUp(3)
print("***** RANDOM PIVOT STRATEGY *****")
spaceUp(5)
print("***** THE MEDIAN OF THREE STRATEGY *****")
spaceUp(3)
print("Array before quick sort -- >")
pprint.pprint(array2, indent = 20)
startTime_QuickSort_TheMedianOfThreeStrategy = time.perf_counter()
QuickSort_TheMedianOfThree_Strategy.quickSort(array2, 0, len(array2) - 1, 'medianOfThree')
timeNeeded_QuickSort_TheMedianOfThree_Strategy = time.perf_counter() - startTime_QuickSort_TheMedianOfThreeStrategy
print("Array after quick sort -- > ")
pprint.pprint(array2, indent = 20)
spaceUp(3)
print("***** THE MEDIAN OF THREE STRATEGY *****")
spaceUp(15)
print("Runtime behavior for : ")
print(" ~ Standard algorithm -- > {0}".format(timeNeeded_QuickSort_StandardAlgorithm))
print(" ~ Random pivot strategy -- > {0}".format(timeNeeded_QuickSort_RandomPivot_Strategy))
print(" ~ The Median Of Three strategy -- > {0}".format(timeNeeded_QuickSort_TheMedianOfThree_Strategy))
elif not seeArrays:
startTime_QuickSort_StandardAlgorithm = time.perf_counter()
QuickSort_StandardAlgorithm.quickSort(array0, 0, len(array0) - 1, 'standard')
timeNeeded_QuickSort_StandardAlgorithm = time.perf_counter() - startTime_QuickSort_StandardAlgorithm
startTime_QuickSort_RandomPivot_Strategy = time.perf_counter()
QuickSort_RandomPivot_Strategy.quickSort(array1, 0, len(array1) - 1, 'random')
timeNeeded_QuickSort_RandomPivot_Strategy = time.perf_counter() - startTime_QuickSort_RandomPivot_Strategy
startTime_QuickSort_TheMedianOfThree_Strategy = time.perf_counter()
QuickSort_TheMedianOfThree_Strategy.quickSort(array2, 0, len(array2) - 1, 'medianOfThree')
timeNeeded_QuickSort_TheMedianOfThree_Strategy = time.perf_counter() - startTime_QuickSort_TheMedianOfThree_Strategy
print("Runtime behavior for : ")
print(" ~ Standard algorithm -- > {0}".format(timeNeeded_QuickSort_StandardAlgorithm))
print(" ~ Random pivot strategy -- > {0}".format(timeNeeded_QuickSort_RandomPivot_Strategy))
print(" ~ The Median Of Three strategy -- > {0}".format(timeNeeded_QuickSort_TheMedianOfThree_Strategy))
while True:
spaceUp(10)
print("Choose something : ")
print("1. Analyse runtime and behavior for the quick sort strategies")
print("2. Exit")
userChoice = input("> ")
try:
# Check user input
userChoice = eval(userChoice)
if userChoice not in list(range(1, 3)):
raise Exception
if userChoice == 1:
spaceUp(10)
analyseBehaviorAndRuntime()
spaceUp(5)
elif userChoice == 2:
break
except Exception as _e_:
print("Error -- > {0}".format(str(_e_)))
print("Choose something between 1 and 2")
continue |
# import modules
import csv
import pandas as pd
# csv delimiter to be used
csv_delimiter = ','
# csv reader fn
def csv_reader(file_path):
return pd.read_csv(
file_path, delimiter=csv_delimiter, header=None).values
# write csv to disk
def csv_writer(contents, file_name):
with open(file_name, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=csv_delimiter,
quoting=csv.QUOTE_ALL)
for content in contents:
writer.writerow(content)
|
UPPER_PAGE_ELEMENT = '.b-lay-notifications'
BOTTOM_PAGE_ELEMENT = '.b-footer'
PRELOADER_SPINNER = '[data-cid="spinner"]'
DIALOG_MASK_H = '.h-dialog-mask'
DIALOG_MASK_JS = '.js-dialog-mask'
NEW_DOCUMENT_YELLOW_ROW = '.h-row_blink_yellow'
|
from __future__ import print_function
import numpy as np
from sklearn.externals import joblib
from sklearn import preprocessing
import tensorflow as tf
import os
import imageio
from skimage.transform import resize
import re
import timeit
from ops import *
from helpers import (tokenize, selectFrequentAnswers, get_tokens, final_tokens,
encode_questions, loadGloveModel, getQuesVecFromModel)
import sys
temp = '*'*10
print(temp)
imdir='%s/COCO_%s_%012d.jpg'
questions_train = \
open('data/preprocessed/questions_train2014.txt', 'r').read().decode('utf8').splitlines()
questions_lengths_train = \
open('data/preprocessed/questions_lengths_train2014.txt', 'r').read().decode('utf8').splitlines()
answers_train = \
open('data/preprocessed/answers_train2014_modal.txt', 'r').read().decode('utf8').splitlines()
images_train = \
open('data/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines()
images_train_path = \
open('data/preprocessed/images_train2014_path.txt', 'r').read().decode('utf8').splitlines()
answers_train_all = \
open('data/preprocessed/answers_train2014_all.txt', 'r').read().decode('utf8').splitlines()
#get_unique_images_train()
max_answers = 1000
questions_train, questions_lengths_train, answers_train, images_train, \
images_train_path, answers_train_all = selectFrequentAnswers(questions_train,
questions_lengths_train,
answers_train,
images_train,
images_train_path,
answers_train_all,
max_answers)
print ('ques_train, size = {}, sample = {}'.format(len(questions_train), questions_train[0]))
print ('ques_lengths, size = {}, sample = {}'.format(len(questions_lengths_train), questions_lengths_train[0]))
print ('ans_train, size = {}, sample = {}'.format(len(answers_train), answers_train[0]))
print ('imag_train, size = {}, sample = {}'.format(len(images_train), images_train[0]))
print ('imag_train_path, size = {}, sample = {}'.format(len(images_train_path), images_train_path[0]))
print ('ans_train_all, size = {}, sample = {}'.format(len(answers_train_all), answers_train_all[0]))
print(temp)
print(temp)
questions_val = \
open('data/preprocessed/questions_val2014.txt', 'r').read().decode('utf8').splitlines()
questions_lengths_val = \
open('data/preprocessed/questions_lengths_val2014.txt', 'r').read().decode('utf8').splitlines()
answers_val = \
open('data/preprocessed/answers_val2014_modal.txt', 'r').read().decode('utf8').splitlines()
images_val = \
open('data/preprocessed/images_val2014_all.txt', 'r').read().decode('utf8').splitlines()
images_val_path = \
open('data/preprocessed/images_val2014_path.txt', 'r').read().decode('utf8').splitlines()
answers_val_all = \
open('data/preprocessed/answers_val2014_all.txt', 'r').read().decode('utf8').splitlines()
print ('ques_val, size = {}, sampel = {}'.format(len(questions_val), questions_val[0]))
print ('ques_lengths_val, size = {}, sample = {}'.format(len(questions_lengths_val), questions_lengths_val[0]))
print ('ans_val, size = {}, sample = {}'.format(len(answers_val), answers_val[0]))
print ('imag_val, size = {}, sample = {}'.format(len(images_val), images_val[0]))
print ('imag_val_path, size = {}, sample = {}'.format(len(images_val_path), images_val_path[0]))
print ('ans_val_all, size = {}, sample = {}'.format(len(answers_val_all), answers_val_all[0]))
print(temp)
print(temp)
ques_tokens_train = get_tokens(questions_train)
ques_tokens_val = get_tokens(questions_val)
'''
counts = {}
count_thr = 5
for i, tokens in enumerate(ques_tokens_train):#change to train
for token in tokens:
counts[token] = counts.get(token, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
# print some stats
total_words = sum(counts.itervalues())
print('total words:', total_words)
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words))
print('inserting the special UNK token')
vocab.append('UNK')
vocab_file = 'data/preprocessed/vocab_list.txt'.format(count_thr)
with open(vocab_file, 'w') as f:
for word in vocab:
f.write((word + '\n').encode('utf8'))
itow = {i+1:w for i,w in enumerate(vocab)}
wtoi = {w:i+1 for i,w in enumerate(vocab)}
ques_tokens_train_final = final_tokens(ques_tokens_train, counts, count_thr)
print('Sample train question tokens ==> {}'.format(ques_tokens_train[0]))
print('Total number of train questions ==> {}'.format(len(ques_tokens_train)))
ques_tokens_val_final = final_tokens(ques_tokens_val, counts, count_thr)
print('Sample validation question tokens ==> {}'.format(ques_tokens_val[0]))
print('Total number of validation questions ==> {}'.format(len(ques_tokens_val)))
ques_array_train = encode_questions(ques_tokens_train_final, wtoi, 25)
ques_array_val = encode_questions(ques_tokens_val_final, wtoi, 25)
print('Encoded train questions array shape ==> {}'.format(ques_array_train.shape))
print('Encoded validation questions array shape ==> {}'.format(ques_array_val.shape))
print(temp)
'''
model = loadGloveModel()
ques_vec_train = getQuesVecFromModel(ques_tokens_train, model)
ques_vec_val = getQuesVecFromModel(ques_tokens_val, model)
print('{} Creating labels for training answers {}'.format('*'*10, '*'*10))
print('Number of training answers ==> {}'.format(len(answers_train)))
print('A sample training answer ==> {}'.format(answers_train[5]))
labelencoder = preprocessing.LabelEncoder()
labelencoder.fit(answers_train)
nb_classes = len(list(labelencoder.classes_))
ans_array_train = labelencoder.transform(answers_train)
joblib.dump(labelencoder,'data/labelencoder.pkl')
print('{} Done creating labels for training answers {}'.format('*'*10, '*'*10))
print('')
img_shape = [256, 256, 3]
print('{} Writing tfrecord file for validation data {}'.format(temp, temp))
N = len(images_val_path)
out_filepath = 'data/val_data.tfrecords'
if os.path.exists(out_filepath):
os.unlink(out_filepath)
out_file = tf.python_io.TFRecordWriter(out_filepath)
start = timeit.default_timer()
for i in range(N):
img_path = os.path.join('/fs/project/PAS1315/VQA/Images/', images_val_path[i])
img = imageio.imread(img_path)
if len(img.shape) == 2:
print('Image {} is an greyscale image. Converted to RGB. Image shape ==> {}'.format(i+1, img.shape))
img = np.stack([img, img, img], axis = 2)
img = resize(img, img_shape[:2], order = 3)
img = img.astype(np.float32)
ques = ques_vec_val[i].astype(np.float32)
#ques = ques_array_val[i]
#ques = ques.astype(np.int32)
#ques_len = questions_lengths_val[i]
#ques_len = np.array(int(ques_len)).astype(np.int32)
ans_all = answers_val_all[i].encode('utf8')
ques_str = questions_val[i].encode('utf8')
#write_tfrecords_val(out_file, [img, ques, ques_len, ans_all, ques_str],
#['img', 'ques', 'ques_len', 'ans_all', 'ques_str'])
write_tfrecords_val(out_file, [img, ques, ans_all, ques_str],
['img', 'ques', 'ans_all', 'ques_str'])
print('{}/{} written.'.format(i+1, N), end = '\r')
sys.stdout.flush()
out_file.close()
end = timeit.default_timer()
print('{} Done writing tfrecord file for validation data. Time = {:.2f} s. {}'.format(temp, end - start, temp))
print('')
print('{} Writing tfrecord file for training data {}'.format(temp, temp))
N = len(images_train_path)
out_filepath = 'data/train_data.tfrecords'
if os.path.exists(out_filepath):
os.unlink(out_filepath)
out_file = tf.python_io.TFRecordWriter(out_filepath)
start = timeit.default_timer()
sum_stats = np.zeros(img_shape)
sum_2_stats = np.zeros(img_shape)
count = 0
for i in range(N):
img_path = os.path.join('/fs/project/PAS1315/VQA/Images/', images_train_path[i])
img = imageio.imread(img_path)
if len(img.shape) == 2:
print('Image {} is a greyscale image. Converted to RGB. Image shape ==> {}'.format(i+1, img.shape))
img = np.stack([img, img, img], axis = 2)
img = resize(img, img_shape[:2], order = 3)
img = img.astype(np.float32)
sum_stats += img
sum_2_stats += img*img
count += 1
ques = ques_vec_train[i].astype(np.float32)
#ques = ques_array_train[i]
#ques = ques.astype(np.int32)
#ques_len = questions_lengths_train[i]
#ques_len = np.array(int(ques_len)).astype(np.int32)
ans = ans_array_train[i]
ans = np.array(ans).astype(np.int32)
#write_tfrecords(out_file, [img, ques, ques_len, ans], ['img', 'ques', 'ques_len', 'ans'])
write_tfrecords(out_file, [img, ques, ans], ['img', 'ques', 'ans'])
print('{}/{} written.'.format(i+1, N), end = '\r')
sys.stdout.flush()
img_mean = sum_stats / float(count)
img_std = np.sqrt((sum_2_stats / float(count)) - (img_mean ** 2))
train_stats_path = 'data/train_stats.npz'
np.savez(train_stats_path, img_mean = img_mean, img_std = img_std)
out_file.close()
end = timeit.default_timer()
print('{} Done writing tfrecord file for Training data. Time = {:.2f} s. {}'.format(temp, end - start, temp))
print('')
|
from functions import my_functions
my_functions.hello("s")
|
'''
numbers=[1,2,3,4,5]
sum = 0
for i in numbers:
sum=sum+i
print sum
'''
'''
sentence="now is the time for all the good people to come to aid"
count=0
for letter in sentence:
if letter =="a" or letter =="e" or letter =="i" or letter =="o" or letter =="u":
count=count +1
print("No of vowels :"+str(count))
'''
'''
numbers=[1,2,3,4,5,6,7,8,9,10]
for i in range(0,len(numbers),2):
print numbers[i]
'''
'''
numbers=(1,2,3,4,5,6,7,8,9,10)
for i in range(0,len(numbers),2):
print numbers[i]
'''
'''
words=("now","is", "the","time")
max=0
for i in range(0,len(words)):
if len(words[i])>max:
max=i
print ("longest word is " + words[max])
'''
'''
maps={"ashwani":"1","nandini":"2"}
#print (maps.keys())
for i in maps.keys():
print (i + " extension is :" +maps[i])
'''
#use stdout to remove new line characters at end of output of for loop in 2.7
'''
import sys
print sys.version
sys.stdout.flush()
#from __future__ import print_function
for i in open('grades.txt'):
sys.stdout.write(i)
'''
print("Enter a number:")
num = int(input())
fact=1
for i in range(1,num+1):
fact=fact*i
print (str(num)+"!= "+str(fact))
|
# ์๊ฐ ๋ณต์ก๋ O(n**2)
class Solution:
def twoSum(self, nums: List[int], target: int):
for i in range(0, len(nums)):
a = nums[i]
for j in range(i+1, len(nums)):
b = nums[j]
if a+b == target:
return [i, j]
# ์๊ฐ ๋ณต์ก๋ O(n)
class Solution:
# @return a tuple, (index1, index2)
# 8:42
def twoSum(self, num, target):
map = {}
for i in range(len(num)):
if num[i] not in map:
map[target - num[i]] = i + 1
else:
return map[num[i]], i + 1
return -1, -1 |
import simplejson as json
from api.v1.endpoints.workout_day import CustomWorkoutDaySerializer
from api.v1.endpoints.workout_day import WorkoutDaySerializer
from api.views import ProfileAuthedAPIView
from db_models.models.custom_workout_day import CustomWorkoutDay
from db_models.models.custom_workout_day import CustomWorkoutDayException
from db_models.models.custom_workout_log import CustomWorkoutLog
from db_models.models.custom_workout_program import CustomWorkoutProgram
from db_models.models.workout_log import WorkoutLog
from db_models.models.workout_program import WorkoutProgram
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from rest_framework import status
from rest_framework.response import Response
from utils.models import get_model_for_profile
from utils.query import get_query_switches
class WorkoutProgramSerializer(serializers.ModelSerializer):
class Meta:
model = WorkoutProgram
fields = (
'id',
'name',
'length',
'description',
)
def _process_days_string(program, days_string):
try:
workout_days = json.loads(days_string)
except json.scanner.JSONDecodeError:
raise CustomWorkoutDayException('Invalid JSON.')
for workout_day in workout_days:
workout_day['workout_program'] = program.pk
return workout_days
class WorkoutProgramsView(ProfileAuthedAPIView):
def get(self, request):
"""Get a list of workout programs
#### Query Parameters
* default (optional)
* custom (optional)
`?default` will return all default programs.
`?custom` will return all custom programs available to the user.
You can `&` parameters together.
No parameters is same as having all parameters.
#### Sample Response
```
{
"default": [
{
"id": 1,
"name": "StrongLifts 5x5",
"length": 30,
"description": "Hello."
}
],
"custom": [
{
"id": 1,
"name": "Gary's Custom",
"length": 15,
"description": "World."
}
]
}
```
"""
response_dict = {}
query_switches = get_query_switches(
request.query_params,
['default', 'custom'],
all_true_on_none=True,
)
if 'default' in query_switches:
response_dict['default'] = WorkoutProgramSerializer(
WorkoutProgram.objects.all(),
many=True,
).data
if 'custom' in query_switches:
response_dict['custom'] = WorkoutProgramSerializer(
request.profile.customworkoutprogram_set.all(),
many=True,
).data
return Response(response_dict)
def post(self, request):
"""Create a custom workout program
#### Body Parameters
* name: string
* description: string (optional)
* days: json string (optional)
##### Days format
```
[
{
"week": 1,
"day": 1,
"exercise": 0,
"sets": 5,
"reps": 5,
"weight": 45
},
...
]
```
#### Sample Response
```
{
"program": {
"id": 1,
"name": "StrongLifts 5x5",
"length": 30,
"description": "Hello"
},
"weeks": {
"1": {
"1": [
{
"id": 0,
"exercise": 0,
"sets": 5,
"reps": 5,
"weight": 45,
},
...
],
...
},
...
}
}
```
"""
if not (
request.data
and request.data.get('name')
and isinstance(request.data['name'], str)
):
return Response(
{'name': 'Must be a non-empty string.'},
status=status.HTTP_400_BAD_REQUEST,
)
try:
with transaction.atomic():
program = CustomWorkoutProgram.objects.create(
profile=request.profile,
name=request.data['name'],
length=0,
description=request.data.get('description') or '',
)
days_string = request.data.get('days')
if days_string:
days = _process_days_string(program, days_string)
serializer = CustomWorkoutDaySerializer(
data=days,
many=True,
)
if serializer.is_valid():
serializer.save()
program.length = len(days)
program.save()
else:
raise CustomWorkoutDayException(serializer.errors)
except CustomWorkoutDayException as e:
return Response(
{'days': e.errors}, status=status.HTTP_400_BAD_REQUEST,
)
return Response(
WorkoutProgramView.get_workout_program(
program,
program.customworkoutday_set.all(),
), status=status.HTTP_201_CREATED,
)
class WorkoutProgramView(ProfileAuthedAPIView):
@staticmethod
def get_workout_program(program, workout_days, profile=None, default=None):
weeks = {}
for workout_day in workout_days:
if workout_day.week not in weeks:
weeks[workout_day.week] = {}
if workout_day.day not in weeks[workout_day.week]:
weeks[workout_day.week][workout_day.day] = []
weeks[workout_day.week][workout_day.day].append(
WorkoutDaySerializer(workout_day).data,
)
if profile:
for week in weeks.values():
for days in week.values():
for i, day in enumerate(days):
day['log'] = None
try:
if default:
day['log'] = WorkoutLog.objects.get(
profile=profile,
workout_day=day['id'],
).reps
else:
day['log'] = CustomWorkoutLog.objects.get(
profile=profile,
workout_day=day['id'],
).reps
except (
WorkoutLog.DoesNotExist,
CustomWorkoutLog.DoesNotExist,
):
pass
days[i] = day
return {
'program': WorkoutProgramSerializer(program).data,
'weeks': weeks,
}
def get(self, request, pk):
"""Get a specific workout program's details
#### Query Parameters
* default (or custom)
* custom (or default)
`?default` will return a default program of given id.
`?custom` will return a custom program of given id, if
available to the user; 404 otherwise.
`default` takes precedence over `custom`.
#### Sample Response
```
{
"program": {
"id": 1,
"name": "StrongLifts 5x5",
"length": 30,
"description": "Hello"
},
"weeks": {
"1": {
"1": [
{
"id": 0,
"exercise": 0,
"sets": 5,
"reps": 5,
"weight": 45,
"log": "5,5,5,4,3"
},
{
"id": 1,
"exercise": 1,
"sets": 5,
"reps": 5,
"weight": 45,
"log": null
},
...
],
...
},
...
}
}
```
"""
query_switches = get_query_switches(
request.query_params,
['default', 'custom'],
raise_on_none=True,
)
if 'default' in query_switches:
program = get_object_or_404(WorkoutProgram, pk=pk)
days = program.workoutday_set.all()
else:
program = get_model_for_profile(
CustomWorkoutProgram,
request.profile,
pk=pk,
)
days = program.customworkoutday_set.all()
return Response(
WorkoutProgramView.get_workout_program(
program,
days,
profile=request.profile,
default=('default' in query_switches),
),
)
def patch(self, request, pk):
"""Update a custom workout program
#### Body Parameters
* name: string (optional)
* description: string (optional)
* days: json string (optional)
##### Days format
```
[
{
"week": 1,
"day": 1,
"exercise": 0,
"sets": 5,
"reps": 5,
"weight": 45
},
{
"week": 1,
"day": 2,
"exercise" 0,
"delete": true
},
...
]
```
Delete a day's exercise with `"delete":true`
#### Sample Response
```
{
"program": {
"id": 4,
"name": "asd",
"length": 2,
"description": ""
},
"weeks": {
"1": {
"1": [
{
"id": 4,
"exercise": 0,
"sets": 56,
"reps": 5,
"weight": 45,
"log": null
}
]
}
}
}
```
"""
program = get_model_for_profile(
CustomWorkoutProgram,
request.profile,
pk=pk,
)
if request.data:
name = request.data.get('name')
if name is not None and name != program.name:
if not (name and isinstance(name, str)):
return Response(
{'name': 'Must be a non-empty string.'},
status=status.HTTP_400_BAD_REQUEST,
)
program.name = name
if 'description' in request.data:
program.description = request.data['description'] or ''
program.full_clean()
program.save()
days_string = request.data.get('days')
if days_string:
try:
days = _process_days_string(program, days_string)
with transaction.atomic():
for day_data in days:
try:
# TODO: Slow.
day_set = program.customworkoutday_set
existing_day = day_set.get(
week=day_data['week'],
day=day_data['day'],
exercise=day_data['exercise'],
)
except CustomWorkoutDay.DoesNotExist:
existing_day = None
if day_data.get('delete') is True:
if existing_day:
existing_day.delete()
continue
else:
serializer = CustomWorkoutDaySerializer(
existing_day,
data=day_data,
)
if serializer.is_valid():
serializer.save()
else:
raise CustomWorkoutDayException(
serializer.errors,
)
except CustomWorkoutDayException as e:
return Response(
{'days': e.errors}, status=status.HTTP_400_BAD_REQUEST,
)
return Response(
WorkoutProgramView.get_workout_program(
program,
program.customworkoutday_set.all(),
profile=request.profile,
default=False,
),
)
def delete(self, request, pk):
"""Delete a custom workout program
"""
program = get_model_for_profile(
CustomWorkoutProgram,
request.profile,
pk=pk,
)
program.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python
'''
The sum of the squares of the first ten natural numbers is,
12 + 22 + ... + 102 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)2 = 552 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
sumOfSquare = summ = 0
for num in xrange(1, 101):
sumOfSquare += num * num
summ += num
print 'The difference between the sum of the squares of the first one hundred natural numbers and the square of the sum is', summ * summ - sumOfSquare
#25164150
|
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def main():
npoints = 150
sigsize = 0.000
noise = 0.0005
bunches = 8
signal = np.random.normal(0.0, sigsize, size=npoints)
g_arrs = np.zeros([bunches+1, npoints])
for i in range(1, bunches+1):
g_arrs[i] = np.random.normal(1.0, noise, size=npoints) + signal
g_arrs[0] = g_arrs[1:].mean(axis=0)
m_corr = np.zeros([bunches+1, bunches+1])
for i in range(bunches+1):
for j in range(bunches+1):
m_corr[i, j] = np.dot(g_arrs[i] - g_arrs[i].mean(), g_arrs[j] - g_arrs[j].mean())
m_corr[i, j] /= g_arrs[i].shape[0]
m_corr[i, j] /= (g_arrs[i].std() * g_arrs[j].std())
plt.clf()
sns.heatmap(m_corr)
plt.title('Model of Bunch Correlations with No Signal')
plt.savefig('gain_model_corr.pdf')
plt.clf()
plt.plot(g_arrs.T)
plt.savefig('gain_model_curv.png')
if __name__ == '__main__':
sys.exit(main())
|
#title : mergesort.py
#description : Sorting a set of numbers stored inside an array using merge sort algorithm.
#author : Ramadhi Irawan
#date : 2014-10-07
#version : 0.2
#usage : python mergesort.py
#notes : Information about merge sort: http://en.wikipedia.org/wiki/Merge_sort
#python_version : 2.7.x
class Mergesort:
'Sorting an array implementing Fibonacci algorithm'
def __init__(self, name):
self.name = name
def sort(self, array):
result = []
if len(array) < 2:
return array
mid = int(len(array) / 2)
upper = self.sort(array[:mid])
lower = self.sort(array[mid:])
while (len(upper) > 0) or (len(lower) > 0):
if len(upper) > 0 and len(lower) > 0:
if upper[0] > lower[0]:
result.append(lower[0])
lower.pop(0)
else:
result.append(upper[0])
upper.pop(0)
elif len(lower) > 0:
for i in lower:
result.append(i)
lower.pop(0)
else:
for i in upper:
result.append(i)
upper.pop(0)
return result
import random
my_array = random.sample(range(512), 45)
print "Your array before sorting: ", my_array
my_result = Mergesort("Merge Sort")
print "You array after sorting: ", my_result.sort(my_array) |
#์ฝ๊ฒ ์ค๋ช
ํ ์ฝ์
์ ๋ ฌ
#์
๋ ฅ:๋ฆฌ์คํธa
#์ถ๋ ฅ:์ ๋ ฌ๋ ์ ๋ฆฌ์คํธ
def find_ins_idx(r,v):
for i in range(0,len(r)):
if v<r[i]:
return i
return len(r)
def ins_sort(a):
result=[]
while a:
value=a.pop(0)
ins_idx=find_ins_idx(result,value)
result.insert(ins_idx,value)
return result
d=[2,4,5,1,3]
print(ins_sort(d))
#์ฝ์
์ ๋ ฌ
#์
๋ ฅ:๋ฆฌ์คํธa
#์ถ๋ ฅ:์์(์
๋ ฅ์ผ๋ก ์ฃผ์ด์ง a๋ฅผ ์ ๋ ฌํ๋ค)
def ins_sort1(a):
n=len(a)
for i in range(1,n):
key=a[i]
j=i-1
while j>=0 and a[j]>key:
a[j+1]=a[j]
j=j-1
a[j+1]=key
d=[2,4,5,1,3]
ins_sort1(d)
print(d)
#O(n^2) |
from Pages.ContentPages.BasePage import Page
from selenium.webdriver.common.by import By
class GalleryItemPreviewPage(Page):
def __init__(self, driver):
self.driver = driver
self.locators = {
'info_message': {'by': By.XPATH, 'value': '//body[1]/div[3]/div[1]/div[1]/div[2]'},
'view_button': {'by': By.XPATH, 'value': 'id("block-tabs")/ul[1]/li[1]'},
'edit_button': {'by': By.XPATH, 'value': 'id("block-tabs")/ul[1]/li[2]'},
'delete_button': {'by': By.XPATH, 'value': 'id("block-tabs")/ul[1]/li[3]'},
'devel_button': {'by': By.XPATH, 'value': 'id("block-tabs")/ul[1]/li[4]'},
'translate_button': {'by': By.XPATH, 'value': 'id("block-tabs")/ul[1]/li[5]'},
'confirm_delete': {'by': By.XPATH, 'value': 'id("edit-submit")'},
}
def get_info_message(self):
info_message = self.driver.find_element(**self.locators['info_message'])
return info_message
def view_button(self):
view_button = self.driver.find_element(**self.locators['view_button'])
return view_button
def edit_button(self):
edit_button = self.driver.find_element(**self.locators['edit_button'])
return edit_button
def delete_button(self):
delete_button = self.driver.find_element(**self.locators['delete_button'])
return delete_button
def confirm_delete_button(self):
confirm_delete_button = self.driver.find_element(**self.locators['confirm_delete'])
return confirm_delete_button |
from .token import Token
class Utterance:
def __init__(self, text, tokens, speaker=None, offsets=None):
self._text = text
self._tokens = tuple(tokens)
self._speaker = speaker
if offsets is not None:
if isinstance(offsets, (list, tuple)) and len(offsets) == 2:
self._offsets = tuple(offsets)
else:
raise ValueError("offsets must be an iterable of length 2.")
else:
self._offsets = None
for t in self.tokens:
t.set_utterance(self)
@property
def tokens(self):
return self._tokens
@property
def text(self):
return self._text
@property
def speaker(self):
return self._speaker
@property
def offsets(self):
return self._offsets
def __str__(self):
import textwrap
token_string = " ".join([str(t) for t in self.tokens])
buff = [
"Utterance: {",
textwrap.fill(" text: {}".format(self.text),
subsequent_indent=" "),
textwrap.fill(" tokens: [{}]".format(token_string),
subsequent_indent=" "),
"speaker: {}".format(self.speaker),
]
if self.offsets:
buff.append(" offset: ({}, {})".format(*self.offsets))
buff.append("}")
return "\n".join(buff)
|
import hilo, parser_generator, grammar_parser, sys, time
from distorm3 import Decode, Decode32Bits
from di3_interp import interpret
# create a parser for output from distorm3
di3_out_grammar_file = './di3_out.grm'
di3parser = parser_generator.makeParser(grammar_parser.parse(open(di3_out_grammar_file).read()))
callCounter = 1 # can only trace one function at a time (scoping issues with Python 2.7)
#
# Represents an open and attached process.
# Provides some operations:
# memory derefencing (derefence)
# making anonymous functions for remote calls (remoteFunction)
# Requires (kind of) that detach() is called before exiting
# Static methods:
# getRunningProcesses( )
# Returns a list of running processes
#
class Process:
def __init__( self, name = None, pid = 0 ):
self.pid = pid # process id
self.hProc = None # handle to the (open)
self.open = False
if ( name != None or pid != 0 ):
self.attach( name )
def derefInt( self, address, size ):
return DerefInt( self, address, size )
def attach( self, name ):
if ( self.open ):
raise Exception()
if name != None:
self.pid = hilo.getPid( name )
self.hProc = hilo.openProcess( self.pid )
self.open = True
def detach( self ):
if ( self.open ):
hilo.closeHandle( self.hProc )
self.pid = 0
self.hProc = None
self.open = False
def writeInt( self, address, length, val ):
if ( self.open ):
hilo.writeInt( self.hProc, address, length, val )
def writeMemory( self, address, byteArray ):
if ( self.open ):
hilo.writeMemory( self.hProc, address, len(byteArray), byteArray )
def readMemory( self, address, length ):
if ( self.open ):
return hilo.readMemory( self.hProc, address, length )
def readInt( self, address, length ):
if ( self.open ):
return hilo.readInt( self.hProc, address, length )
def isAttached( self ):
return self.open
def genCallbackTrace( self, code, grabAmount = 10 ):
def shouldContinueDebugging( curInstr ):
global callCounter
if curInstr[2][0:3].lower() == "ret":
callCounter -= 1
elif curInstr[2][0:4].lower() == "call":
callCounter += 1
return callCounter
# code is another function that takes an instruction and context and instruction counter
# callbacks take: instr. address, context, and an instruction counter
def fn( address, context, instrCounter ):
# grab the code
instructions = []
hexString = ""
grabbedAmount = grabAmount
# guess the instruction length
while len(instructions) <= 1:
hexString = str( self.readMemory( address, grabbedAmount ) ).encode('hex')
instructions = Decode( address, hexString.decode('hex'), Decode32Bits )
grabbedAmount += 5
curInstr = instructions[0] # use current instruction
# determine whether to continue or not
continueVal = shouldContinueDebugging( curInstr )
if continueVal != 0:
code( curInstr, context, instrCounter ) # call our code if we continue
return continueVal
return fn
def remoteFunction( self, address, argFormat ):
if ( self.open == False):
raise Exception()
def fn( args, tf = 0):
hThread = hilo.makeRemoteThread( self.hProc, tf )
cContext = hilo.getThreadContext( hThread )
# place exitthread arg onto stack (to avoid segfaulting)
cContext.Esp -= 4
self.writeInt( cContext.Esp, 4, 0 )
codeCave = hilo.virtualAlloc( self.hProc, 32 )
myCodecave = codeCave
self.writeInt( codeCave, 1, 0xE9 )
self.writeInt( codeCave + 1, 4, address - 5 - codeCave )
# set next instruction to execute
cContext.Eip = codeCave
# modify context based on argObject
for reg in argFormat.regActions:
setattr( cContext, reg[0].upper() + reg[1:], args[argFormat.regActions[reg]] )
for (length,name) in argFormat.stackActions:
cContext.Esp -= length
self.writeInt( cContext.Esp, length, args[name] )
# push a return address onto the stack (ExitThread)
cContext.Esp -= 4
self.writeInt( cContext.Esp, 4, hilo.getDefaultReturnAddress() )
hilo.setThreadContext( hThread, cContext )
hilo.resumeAndWait( hThread )
hilo.virtualFree( self.hProc, 32, codeCave )
hilo.closeHandle( hThread )
return fn
def determineNextInstruction(self, instr, context):
continueVal = (context.Eip != hilo.getDefaultReturnAddress())
return continueVal
def traceIntoCall( self, remoteFunction, argFormat, argsToFunc, outputFileName, address = 0 ):
# note, when address is not 0, remoteFunction simply waits until tracing is done, else it should be a call to a remote function
# this behaviour is taken care of within this function
global callCounter
callCounter = 1
traceStarted = False
outputFile = open(outputFileName, "w")
outputFile.write('get(I,A,V) :- put(I,A,V);C is I-1,get(C,A,V).\n')
outputFile.write('put(0,esp,0).\n')
outputFile.write('put(-1,X,undef(X)).\n\n')
for reg in argFormat.regActions:
regVal = argFormat.regActions[reg]
outputFile.write('put(0,%s,%s).\n' % (reg,str(regVal)))
esp = 4
argFormat.stackActions.reverse()
for (length,name) in argFormat.stackActions:
outputFile.write('put(0,deref(%d),%s).\n' % (esp,name))
esp += length
argFormat.stackActions.reverse()
def reformatInstr( instrObj ):
strInstr = instrObj[2].lower()
startInd = strInstr.find(',')
if startInd == -1:
startInd += len(strInstr)
while strInstr[startInd] != ' ' and startInd >= 0:
startInd -= 1
if startInd > 0:
save = strInstr[startInd:]
rep = strInstr[0:startInd].replace(' ', '')
strInstr = rep + save
return strInstr
def analysisFun( instr, context, instrCounter ):
#traceStarted = True
#print instr[2]
refStr = reformatInstr(instr)
#print refStr
ast = di3parser.parse(refStr)
#print str(ast)
#outputFile.write( str(instrCounter) + "\t " + hex(instr[0]) + "\t " + str(ast) + "\n" )
outputFile.write( str(interpret( ast, instrCounter )) + "\n" )
hilo.traceIntoCall( self.hProc, self.genCallbackTrace( analysisFun ), remoteFunction, argsToFunc, address )
outputFile.close( )
@staticmethod
def getRunningProcesses( ):
listProcesses = hilo.getProcesses( ) # list of tuples (image,pid)
retDict = {}
for (imageName,pid) in listProcesses: # build dictionary of lists
if imageName in retDict:
retDict[imageName] += [pid]
else:
retDict[imageName] = [pid]
return retDict
#
# Defines a way to move argument values in Python to a context for a thread in C++
# Returns calling object to enable call chaining.
#
class ArgFormat:
def __init__( self ):
# format for stackActions elements:
# tuples: (length,name)
# * regActions:
# associate regName -> argName
self.stackActions = []
self.regActions = {}
def setReg( self, regName, argName ):
self.regActions[regName] = argName
return self
def pushVal( self, length, argName ):
self.stackActions.append( (length, argName) )
return self
def push( self, length, argName ):
self.stackActions.append( (length, argName) )
return self
class DerefInt:
def __init__( self, process, address, size ):
self.process = process
self.address = address
self.size = size
def write( self, val ):
self.process.writeInt( self.address, self.size, val )
def read( self ):
return (self.process).readInt( self.address, self.size )
|
from flask import Flask
from flask_socketio import SocketIO, emit, send
from .routes import routes, session
from .config import Config
app = Flask(__name__, static_folder='public')
app.config.from_object(Config)
app.register_blueprint(routes)
server = SocketIO(app)
@server.on('message', namespace='/')
def on_my_event(res):
emit('sendmessage', res, broadcast=True)
print("received message: %s" %str(res))
@server.on('connect')
def on_connect():
print('new client')
nick = session.get('nickname') or 'none'
emit('joined', {'user': nick}, broadcast=True)
@server.on('disconnect')
def on_disconnect():
print('bye bye client')
|
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = 1 if (dividend >= 0) == (divisor >= 0) else -1
dd = abs(dividend)
dr = abs(divisor)
if dd < dr:
return 0
if dd == dr:
return sign
if dr == 1:
dd = dd if sign > 0 else -dd
return min(2 ** 31 - 1, dd)
res = 1
while dd > dr + dr:
dr += dr
res += res
return sign * (res + self.divide(dd - dr, abs(divisor))) |
import os
import csv
from glob import glob
import torch
import pandas as pd
class TabularTrans:
def __init__(self):
self.x_max, self.x_min = 0, 0
self.y_max, self.y_min = 0, 0
def transform(self, origin, is_x=True):
"""
original 2D torch data
"""
col_max = torch.max(origin, dim=0)[0]
col_min = torch.min(origin, dim=0)[0]
transed = (origin - col_min) / (col_max - col_min)
if is_x:
self.x_max = col_max
self.x_min = col_min
else:
self.y_max = col_max
self.y_min = col_min
return transed
def inverse(self, transed, is_x=True):
if not isinstance(transed, torch.Tensor):
transed = torch.Tensor(transed)
if is_x:
inversed = transed * (self.x_max - self.x_min) + self.x_min
else:
inversed = transed * (self.y_max - self.y_min) + self.y_min
return inversed
class CatalogDataset(torch.utils.data.Dataset):
def __init__(self, src_dir="../catalog_data", feature_col=["coat"], targets=["V"], files=["d5_soku_lbyD_less3.csv"]):
self.features = []
self.targets = []
for f in files:
df = pd.read_csv(os.path.join(src_dir, f))
df = df.dropna()
self.features.extend(df[feature_col].values)
self.targets.extend(df[targets].values)
#print(self.features[:2])
#print(self.targets[:2])
self.features = torch.Tensor(self.features)
self.targets = torch.Tensor(self.targets)
#print(self.features[:2])
#print(self.targets[:2])
self.trans = TabularTrans()
self.features = self.trans.transform(self.features)
self.targets = self.trans.transform(self.targets, is_x=False)
print("dataset shape x: {}, y: {}".format(self.features.shape, self.targets.shape))
#print(self.features[:2])
#print(self.targets[:2])
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
x = self.features[idx]
y = self.targets[idx]
return x, y |
from injector.models import Technology
from injector.serializers import *
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class DataInjector(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
snippets = Technology.objects.all()
serializer = TechnologySerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""
eventSerializer = EventSerializer()
eventStorySerializer = EventStorySerializer()
guidSerializer = GuidSerializer()
pageSerializer = PageSerializer()
referrerSerializer = ReferrerSerializer()
technologySerializer = TechnologySerializer()
"""
#return Response(request.data)
#serializer = TechnologySerializer(data=request.data)
"""
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
"""
eventSerializer = EventSerializer(data=request.data)
if eventSerializer.is_valid():
eventSerializer.save()
return Response(eventSerializer.data, status=status.HTTP_201_CREATED)
return Response(eventSerializer.errors, status=status.HTTP_400_BAD_REQUEST)
"""
eventSerializer = TempSerializer(data=request.data)
if eventSerializer.is_valid():
eventSerializer.save()
return Response(eventSerializer.data, status=status.HTTP_201_CREATED)
return Response(eventSerializer.errors, status=status.HTTP_400_BAD_REQUEST)
""" |
#import sys
#input = sys.stdin.readline
from math import log2
def main():
N = int( input())
M = int( log2(N))
x = 1
for i in range(M):
if M%2 == 0:
if i%2 == 1:
x = 2*x
else:
x = 2*x+1
else:
if i%2 == 0:
x = 2*x
else:
x = 2*x+1
if M%2 == 0:
if x <= N:
print("Aoki")
else:
print("Takahashi")
else:
if x <= N:
print("Takahashi")
else:
print("Aoki")
if __name__ == '__main__':
main()
|
from mongo import *
from changeset import Revisioned
class Classifier(Revisioned):
@property
def context(self):
return self.get('scheme')
id = dictproperty('_id')
name = dictproperty('name')
taxonomy = dictproperty('taxonomy')
level = dictproperty('level')
label = dictproperty('label')
description = dictproperty('notes')
parent = dictproperty('parent')
required_filters = ("taxonomy", "name")
|
import os
import pygame
class SnakeImage:
"""Class containing proper image of snake: in ball or without ball"""
def __init__(self):
"""Loads new snake in ball"""
self.folder = os.path.dirname(os.path.realpath(__file__))
self.image = pygame.image.load(os.path.join(self.folder, "snakeBall.PNG"))
def change_ball_into_snake(self):
"""Changes image with big ball to snake"""
self.image = pygame.image.load(os.path.join(self.folder, "waz.PNG"))
|
#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:yaoli
@file: hello-pyqt.py
@time: 2018/08/02
"""
import untitled
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
if __name__=='__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = untitled.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
__author__ = 'natalie'
config = dict(jira_server=None,
jira_user=None,
jira_pass=None,
jira_default_project=None,
jira_default_issue_type='Bug',
jira_default_labels=['fire', ],
slack_token=None,
loglevel=None,
logformat=None,
logfile=None
)
if any([config.get(key) is None for key in ['jira_server', 'jira_user', 'jira_pass', 'slack_token']]):
raise Exception('You should update config.py')
|
#!/usr/bin/python3.9
import textwrap
from pylib import iter_headerguards
content = "#ifdef UIT_VENDORIZE_EMP\n"
for headerguard in sorted(iter_headerguards()):
content += textwrap.dedent(f"""\
#pragma push_macro("{ headerguard }")
#undef {headerguard}
#ifdef UIT_{headerguard}_HEADERGUARD
#define {headerguard}
#endif
""")
content += "\n"
content += "#endif // #ifdef UIT_VENDORIZE_EMP"
content += "\n"
with open(f"vendorization/push_headerguards.hh", "w") as f:
f.write(content)
|
import fastzy
import time
import Levenshtein
searcher = fastzy.Searcher(
file_path='500mb',
separator='.',
)
start = time.perf_counter()
results = searcher.search(
pattern='text',
max_distance=1,
)
end = time.perf_counter()
print(f'fastzy took: {end - start} seconds, found {len(results)}')
start = time.perf_counter()
with open('500mb') as lines_file:
results = []
for line in lines_file:
prefix, postfix = line.split('.')
if Levenshtein.distance(prefix, 'text') <= 1:
results.append(line)
end = time.perf_counter()
print(f'Levenshtein took: {end - start} seconds, found {len(results)}')
|
#! /usr/bin/env python3
# Core imports
import time
import sys
import ev3dev.ev3 as ev3
import ev3dev.core as ev3core
def open_food_container(food_container,direction):
if direction == 'left':
food_container.run_to_rel_pos(position_sp=80,speed_sp=300)
time.sleep(0.2)
food_container.run_to_rel_pos(position_sp=-80,speed_sp=300)
if direction == 'right':
food_container.run_to_rel_pos(position_sp=80,speed_sp=300)
time.sleep(0.2)
food_container.run_to_rel_pos(position_sp=-80,speed_sp=-300)
def open_chute_hatch(chute_hatch):
chute_hatch.run_to_rel_pos(position_sp=80,speed_sp=300)
time.sleep(0.5)
chute_hatch.run_timed(speed_sp=-200,time_sp=700)
def rotate_chute(chute,direction):
time.sleep(1.0)
if direction == '1':
chute.run_timed(speed_sp=-200,time_sp=250)
time.sleep(2.0)
if direction == '2':
chute.run_timed(speed_sp=200, time_sp=200)
time.sleep(2.0)
time.sleep(1.0)
def return_chute(chute,direction):
time.sleep(1.0)
if direction == '1':
chute.run_timed(speed_sp=200,time_sp=200)
time.sleep(2.0)
if direction == '2':
chute.run_timed(speed_sp=-200,time_sp=200)
time.sleep(2.0)
if __name__ == '__main__':
bowl = sys.argv[1]
right_food_container = ev3.LargeMotor('outA')
left_food_container = ev3.LargeMotor('outD')
weighing_chamber = ev3.LargeMotor('outC')
chute = ev3.LargeMotor('outB')
open_food_container(right_food_container, 'right')
open_food_container(left_food_container,'left')
open_chute_hatch(weighing_chamber)
rotate_chute(chute,bowl)
return_chute(chute,bowl)
|
import spacy
import nltk
import os
import key_parser as kp
nltk.download('stopwords')
en_stop = set([word.lower() for word in nltk.corpus.stopwords.words('english')])
en = spacy.load('en')
def clean_text(text, min_length=1):
# lowercase
text = text.lower()
# filter out stop words and words that are too small
text = " ".join([word for word in text.split(" ") if len(word) > min_length and word not in en_stop])
words = en(text)
return " ".join([word.lemma_ for word in words if word.lemma_ != '-PRON-'])
def get_text(path):
res = {}
for subdir, dirs, files in os.walk(path):
for f in files:
f_name = subdir + os.sep + f
with open(f_name, 'r') as fi:
txt = clean_text(" ".join(fi.readlines()))
key_metadata = kp.key[f[:-5]]
res[f] = {
'txt': txt,
'metadata': key_metadata
}
return res
|
#!/usr/bin/python
##
# Copyright 2010-2017 JetBrains s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# (lldb) command script import llvmDebugInfoC/src/scripts/konan_lldb.py
# (lldb) p kotlin_variable
#
import lldb
import struct
NULL = 'null'
def lldb_val_to_ptr(lldb_val):
addr = lldb_val.GetValueAsUnsigned()
return '((struct ObjHeader *) {:#x})'.format(addr)
def evaluate(expr):
return lldb.debugger.GetSelectedTarget().EvaluateExpression(expr, lldb.SBExpressionOptions())
def is_instance_of(addr, typeinfo):
return evaluate("(bool)IsInstance({}, {})".format(addr, typeinfo)).GetValue() == "true"
def is_string(value):
return is_instance_of(lldb_val_to_ptr(value), "theStringTypeInfo")
def is_array(value):
return int(evaluate("(int)Konan_DebugIsArray({})".format(lldb_val_to_ptr(value))).GetValue()) == 1
def check_type_info(addr):
"""This method checks self-referencing of pointer of first member of TypeInfo including case when object has an
meta-object pointed by TypeInfo. """
result = evaluate("**(void ***){0} == ***(void****){0}".format(addr))
return result.IsValid() and result.GetValue() == "true"
#
# Some kind of forward declaration.
__FACTORY = {}
def kotlin_object_type_summary(lldb_val, internal_dict):
"""Hook that is run by lldb to display a Kotlin object."""
fallback = lldb_val.GetValue()
if str(lldb_val.type) != "struct ObjHeader *":
return fallback
if not check_type_info(fallback):
return NULL
ptr = lldb_val_to_ptr(lldb_val)
if ptr is None:
return fallback
return select_provider(lldb_val).to_string()
def select_provider(lldb_val):
return __FACTORY['string'](lldb_val) if is_string(lldb_val) else __FACTORY['array'](lldb_val) if is_array(
lldb_val) else __FACTORY['object'](lldb_val)
class KonanHelperProvider(lldb.SBSyntheticValueProvider):
def __init__(self, valobj):
self._target = lldb.debugger.GetSelectedTarget()
self._process = self._target.GetProcess()
self._valobj = valobj
self._ptr = lldb_val_to_ptr(self._valobj)
if is_string(valobj):
return
self._children_count = 0
self._children = []
self._type_conversion = {0: lambda address, _: "<invalid>{:#x}".format(address),
1: lambda address, _: kotlin_object_type_summary(evaluate("(*(struct ObjHeader **){:#x})".format(address)), {}),
2: lambda address, error: self.__read_memory(address, "<c", 1, error),
3: lambda address, error: self.__read_memory(address, "<h", 2, error),
4: lambda address, error: self.__read_memory(address, "<i", 4, error),
5: lambda address, error: self.__read_memory(address, "<q", 8, error),
6: lambda address, error: self.__read_memory(address, "<f", 4, error),
7: lambda address, error: self.__read_memory(address, "<d", 8, error),
8: lambda address, _: "(void *){:#x}".format(address),
# TODO: or 1?
9: lambda address, error: self.__read_memory(address, "<?", 4, error)}
self._types = [
valobj.GetType().GetBasicType(lldb.eBasicTypeVoid).GetPointerType(),
valobj.GetType(),
valobj.GetType().GetBasicType(lldb.eBasicTypeChar),
valobj.GetType().GetBasicType(lldb.eBasicTypeShort),
valobj.GetType().GetBasicType(lldb.eBasicTypeInt),
valobj.GetType().GetBasicType(lldb.eBasicTypeLongLong),
valobj.GetType().GetBasicType(lldb.eBasicTypeFloat),
valobj.GetType().GetBasicType(lldb.eBasicTypeDouble),
valobj.GetType().GetBasicType(lldb.eBasicTypeVoid).GetPointerType(),
valobj.GetType().GetBasicType(lldb.eBasicTypeBool)
]
def update(self):
self._children_count = int(evaluate("(int)Konan_DebugGetFieldCount({})".format(self._ptr)).GetValue())
def _read_string(self, expr, error):
return self._process.ReadCStringFromMemory(long(evaluate(expr).GetValue(), 0), 0x1000, error)
def _read_value(self, index, error):
value_type = evaluate("(int)Konan_DebugGetFieldType({}, {})".format(self._ptr, index)).GetValue()
address = long(evaluate("(void *)Konan_DebugGetFieldAddress({}, {})".format(self._ptr, index)).GetValue(), 0)
return self._type_conversion[int(value_type)](address, error)
def __read_memory(self, address, fmt, size, error):
content = self._process.ReadMemory(address, size, error)
return struct.unpack(fmt, content)[0] if error.Success() else "error: {:#x}".format(address)
def _read_type(self, index):
return self._types[int(evaluate("(int)Konan_DebugGetFieldType({}, {})".format(self._ptr, index)).GetValue())]
class KonanStringSyntheticProvider(KonanHelperProvider):
def __init__(self, valobj):
super(KonanStringSyntheticProvider, self).__init__(valobj)
fallback = valobj.GetValue()
buff_len = evaluate(
'(int)Konan_DebugObjectToUtf8Array({}, (char *)Konan_DebugBuffer(), (int)Konan_DebugBufferSize());'.format(self._ptr)
).unsigned
if not buff_len:
self._representation = fallback
return
buff_addr = evaluate("(char *)Konan_DebugBuffer()").unsigned
error = lldb.SBError()
s = self._process.ReadCStringFromMemory(long(buff_addr), int(buff_len), error)
self._representation = s if error.Success() else fallback
def update(self):
pass
def num_children(self):
return 0
def has_children(self):
return False
def get_child_index(self, _):
return None
def get_child_at_index(self, _):
return None
def to_string(self):
return self._representation
class KonanObjectSyntheticProvider(KonanHelperProvider):
def __init__(self, valobj):
super(KonanObjectSyntheticProvider, self).__init__(valobj)
self.update()
def update(self):
super(KonanObjectSyntheticProvider, self).update()
error = lldb.SBError()
self._children = [
self._read_string("(const char *)Konan_DebugGetFieldName({}, (int){})".format(self._ptr, i), error) for i in
range(0, self._children_count) if error.Success()]
return True
def num_children(self):
return self._children_count
def has_children(self):
return self._children_count > 0
def get_child_index(self, name):
if not name in self._children:
return -1
return self._children.index(name)
def get_child_at_index(self, index):
if index < 0 or index >= self._children_count:
return None
error = lldb.SBError()
type = self._read_type(index)
base = evaluate("(long){})".format(self._ptr)).unsigned
address = evaluate("(long)Konan_DebugGetFieldAddress({}, (int){})".format(self._ptr, index)).unsigned
child = self._valobj.CreateChildAtOffset(self._children[index], address - base, type)
child.SetSyntheticChildrenGenerated(True)
return child if error.Success() else None
# TODO: fix cyclic structures stringification.
def to_string(self):
error = lldb.SBError()
return dict([(self._children[i], self._read_value(i, error)) for i in range(0, self._children_count)])
class KonanArraySyntheticProvider(KonanHelperProvider):
def __init__(self, valobj):
super(KonanArraySyntheticProvider, self).__init__(valobj)
if self._ptr is None:
return
valobj.SetSyntheticChildrenGenerated(True)
self.update()
def update(self):
super(KonanArraySyntheticProvider, self).update()
self._children = [x for x in range(0, self.num_children())]
return True
def num_children(self):
return self._children_count
def has_children(self):
return self._children_count > 0
def get_child_index(self, name):
index = int(name)
return index if (0 <= index < self._children_count) else -1
def get_child_at_index(self, index):
if index < 0 or index >= self._children_count:
return None
error = lldb.SBError()
return self._read_value(index, error) if error.Success() else None
def to_string(self):
error = lldb.SBError()
return [self._read_value(i, error) for i in range(0, self._children_count)]
class KonanProxyTypeProvider:
def __init__(self, valobj, _):
fallback = int(valobj.GetValue(), 0)
if not check_type_info(fallback):
return
self._proxy = select_provider(valobj)
self.update()
def __getattr__(self, item):
return getattr(self._proxy, item)
def __lldb_init_module(debugger, _):
__FACTORY['object'] = lambda x: KonanObjectSyntheticProvider(x)
__FACTORY['array'] = lambda x: KonanArraySyntheticProvider(x)
__FACTORY['string'] = lambda x: KonanStringSyntheticProvider(x)
debugger.HandleCommand('\
type summary add \
--no-value \
--expand \
--python-function konan_lldb.kotlin_object_type_summary \
"ObjHeader *" \
--category Kotlin\
')
debugger.HandleCommand('\
type synthetic add \
--python-class konan_lldb.KonanProxyTypeProvider\
"ObjHeader *" \
--category Kotlin\
')
debugger.HandleCommand('type category enable Kotlin')
|
"""
RGB To Hex Conversion
The rgb function is incomplete. Complete it so that passing in RGB decimal values will result
in a hexadecimal representation being returned. Valid decimal values for RGB are 0 - 255.
Any values that fall out of that range must be rounded to the closest valid value.
Note: Your answer should always be 6 characters long, the shorthand with 3 will not work here.
The following are examples of expected output values:
rgb(255, 255, 255) # returns FFFFFF
rgb(255, 255, 300) # returns FFFFFF
rgb(0,0,0) # returns 000000
rgb(148, 0, 211) # returns 9400D3
"""
def getVal(r):
out = ''
if r <= 0 :
out = "00"
elif r >=255:
out = "FF"
else:
out = hex(r)[-2:]
return out.upper().replace('x', "0")
def rgb(r, g, b):
return "{}{}{}".format(getVal(r),getVal(g),getVal(b))
def getVal(r):
return 0 if r <=0 else r if r >=255 255
def rgb2(r,g,b):
return "{}{}{}".format(format(r,'02x'),format(g,'02x'),format(b,'02x'))
def limit(num):
if num < 0:
return 0
if num > 255:
return 255
return num
# def rgb(r, g, b):
# return "{:02X}{:02X}{:02X}".format(limit(r), limit(g), limit(b))
print(rgb(148, 0, 211))
print(rgb(255, 255, 255))
#print(rgb(255, 255, 300))
print(rgb(1,2,3)) |
from datetime import date
from freezegun import freeze_time
from onegov.ballot import ElectionCompound
from onegov.ballot import ElectionCompoundPart
from onegov.ballot import ElectionResult
from onegov.ballot import PartyResult
from onegov.ballot import ProporzElection
from onegov.election_day.layouts import ElectionCompoundPartLayout
from tests.onegov.election_day.common import DummyRequest
from unittest.mock import Mock
def test_election_compound_part_layout_general(session):
date_ = date(2011, 1, 1)
election = ProporzElection(
title="election",
domain='region',
domain_segment='Allschwil',
domain_supersegment='Region 1',
date=date_
)
session.add(election)
session.add(ElectionCompound(title="e", domain='canton', date=date_))
session.flush()
compound = session.query(ElectionCompound).one()
part = ElectionCompoundPart(compound, 'superregion', 'Region 1')
request = DummyRequest()
layout = ElectionCompoundPartLayout(part, request)
assert layout.all_tabs == (
'districts',
'candidates',
'party-strengths',
'statistics',
)
assert layout.title() == ''
assert layout.title('undefined') == ''
assert layout.title('districts') == '__districts'
assert layout.title('candidates') == 'Elected candidates'
assert layout.title('party-strengths') == 'Party strengths'
assert layout.title('statistics') == 'Election statistics'
assert layout.main_view == 'ElectionCompoundPart/districts'
assert layout.majorz is False
assert layout.proporz is True
assert layout.has_party_results is False
assert layout.tab_visible('statistics') is False
# test results
compound.elections = [election]
election.results.append(
ElectionResult(
name='1',
entity_id=1,
counted=True,
eligible_voters=500,
)
)
layout = ElectionCompoundPartLayout(part, DummyRequest())
assert layout.has_results
# test party results
compound.party_results.append(
PartyResult(
domain='superregion',
domain_segment='Region 1',
year=2017,
number_of_mandates=0,
votes=10,
total_votes=100,
name_translations={'de_CH': 'A'},
party_id='1'
)
)
layout = ElectionCompoundPartLayout(part, DummyRequest())
assert layout.has_party_results is True
# test main view
layout = ElectionCompoundPartLayout(part, request)
assert layout.main_view == 'ElectionCompoundPart/districts'
compound.show_party_strengths = True
layout = ElectionCompoundPartLayout(part, request)
assert layout.main_view == 'ElectionCompoundPart/districts'
compound.horizontal_party_strengths = True
layout = ElectionCompoundPartLayout(part, request)
assert layout.main_view == 'ElectionCompoundPart/party-strengths'
request.app.principal.hidden_tabs = {'elections-part': ['party-strengths']}
layout = ElectionCompoundPartLayout(part, request)
assert layout.hide_tab('party-strengths') is True
assert layout.main_view == 'ElectionCompoundPart/districts'
# test file paths
with freeze_time("2014-01-01 12:00"):
compound = ElectionCompound(
title="ElectionCompound",
domain='canton',
date=date(2011, 1, 1),
)
session.add(compound)
session.flush()
part = ElectionCompoundPart(compound, 'superregion', 'Region 1')
hs = '2ef359817c8f8a7354e201f891cd7c11a13f4e025aa25239c3ad0cabe58bc49b'
ts = '1388577600'
request = DummyRequest()
request.app.filestorage = Mock()
layout = ElectionCompoundPartLayout(part, request, 'party-strengths')
assert layout.svg_path == (
f'svg/elections-{hs}-region-1.{ts}.party-strengths.de.svg'
)
assert layout.svg_link == 'ElectionCompoundPart/party-strengths-svg'
assert layout.svg_name == (
'electioncompound-region-1-party-strengths.svg'
)
# test table links
for tab, expected in (
('districts', 'ElectionCompoundPart/districts-table'),
('candidates', 'ElectionCompoundPart/candidates-table'),
('party-strengths', 'ElectionCompoundPart/party-strengths-table'),
('statistics', 'ElectionCompoundPart/statistics-table')
):
layout = ElectionCompoundPartLayout(part, DummyRequest(), tab=tab)
assert not expected or f'{expected}?locale=de' == layout.table_link()
def test_election_compound_part_layout_menu(session):
election = ProporzElection(
title="Election",
domain='region',
domain_segment='Allschwil',
domain_supersegment='Region 1',
date=date(2011, 1, 1)
)
compound = ElectionCompound(
title="Elections",
domain='canton',
date=date(2011, 1, 1)
)
session.add(election)
session.add(compound)
session.flush()
compound.elections = [election]
part = ElectionCompoundPart(compound, 'superregion', 'Region 1')
# No results yet
request = DummyRequest()
assert ElectionCompoundPartLayout(part, request).menu == []
assert ElectionCompoundPartLayout(part, request, 'data').menu == []
# Results available
election.results.append(
ElectionResult(
name='1',
entity_id=1,
counted=True,
eligible_voters=500,
)
)
assert ElectionCompoundPartLayout(part, request).menu == [
('__districts', 'ElectionCompoundPart/districts', False, []),
('Elected candidates', 'ElectionCompoundPart/candidates', False, []),
('Election statistics', 'ElectionCompoundPart/statistics', False, []),
]
assert ElectionCompoundPartLayout(part, request, 'statistics').menu == [
('__districts', 'ElectionCompoundPart/districts', False, []),
('Elected candidates', 'ElectionCompoundPart/candidates', False, []),
('Election statistics', 'ElectionCompoundPart/statistics', True, []),
]
# Party results available, but no views enabled
compound.party_results.append(
PartyResult(
domain='superregion',
domain_segment='Region 1',
year=2017,
number_of_mandates=0,
votes=10,
total_votes=100,
name_translations={'de_CH': 'A'},
party_id='1'
)
)
assert ElectionCompoundPartLayout(part, request).menu == [
('__districts', 'ElectionCompoundPart/districts', False, []),
('Elected candidates', 'ElectionCompoundPart/candidates', False, []),
('Election statistics', 'ElectionCompoundPart/statistics', False, []),
]
# All views enabled
compound.show_party_strengths = True
compound.horizontal_party_strengths = True
assert ElectionCompoundPartLayout(part, request).menu == [
('Party strengths', 'ElectionCompoundPart/party-strengths', False, []),
('__districts', 'ElectionCompoundPart/districts', False, []),
('Elected candidates', 'ElectionCompoundPart/candidates', False, []),
('Election statistics', 'ElectionCompoundPart/statistics', False, []),
]
|
"""
Reescreva a funรงรฃo leiaint() incluindo agora a possibilidade
da digitaรงรฃo de um nรบmero de tipo invรกlido. Aproveite e crie
uma funรงรฃo leiafloat() com a mesma funcionalidade.
"""
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except KeyboardInterrupt:
print(f'\033[31;3mO usuรกrio preferiu nรฃo informar o nรบmero. \033[m')
n = 0
result = f'O nรบmero inteiro digitado foi {n}'
break
else:
result = f'O nรบmero inteiro digitado foi {n}'
break
return result
def leiaFloat(msg):
while True:
try:
n = float(input(msg))
except (ValueError, TypeError):
print(f'\033[31;3mTipo de dado errado, digite apenas nรบmeros inteiros. \033[m')
except KeyboardInterrupt:
print(f'\033[31;3mO usuรกrio preferiu nรฃo informar o nรบmero. \033[m')
n = 0
result = f'O nรบmero real digitado foi {n}'
break
else:
result = f'O nรบmero real digitado foi {n}'
break
return result
i = leiaInt('Digite um nรบmero inteiro: ')
f = leiaFloat('Digite um nรบmero real: ')
print(i)
print(f)
|
import tensorflow as tf
from tensorflow import keras
from net import PeTraNet
from dataset_processing import parse_sample, preprocess_dataset
from show_result import show_result
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(
gpu_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=1 * 1024)])
# Configuration.
# Whether to generate correct dataset for this training from
# author's train_global_labels.npy and train_global_points.npy files.
# This may take some time D:
# But speeds up training a lot
# and frees a lot of memory from GPU.
preprocess_dataset_flag = False
# Training parameters.
epochs = 5000
train_samples_per_epoch = 64
batch_size = 1
#lr = 0.1
lr = tf.keras.optimizers.schedules.PolynomialDecay(0.05, 30000)
# Preprocess dataset.
# It must be processed as three whole arrays don't fit
# into my GPU's memory ._.
if preprocess_dataset_flag:
preprocess_dataset()
train_dataset = tf.data.Dataset.list_files("./dataset/inputs/*.npy", shuffle=True)
train_dataset = train_dataset.map(
lambda x: tf.py_function(
parse_sample, [x], (tf.float32, tf.float32, tf.float32))
).batch(batch_size).prefetch(tf.data.AUTOTUNE)
# Optimizer.
# SGD with momentum as described in unet's paper.
optimizer = keras.optimizers.SGD(learning_rate=lr, momentum=0.99)
# Loss function.
@tf.function(jit_compile=True)
def loss_fcn(labels, preds, weights):
# @brief Modified function from unet's paper.
# Replaces softmax with sigmoid and sum with mean for stability.
# @param labels True labels.
# @param preds Predictions.
# @param weights Weights tensors.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels, preds)
loss = tf.multiply(weights, loss)
loss = tf.reduce_mean(loss)
return loss
# Network model.
net = PeTraNet()
# Restore training from checkpoint if possible
ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=net, optimizer=optimizer)
manager = tf.train.CheckpointManager(ckpt, "./.tf_ckpts", max_to_keep=100)
path = manager.restore_or_initialize()
if path:
print("Restored checkpoint from %s" % path)
else:
print("Initializing training from scratch.")
@tf.function
def train_step(input_imgs, real_outputs, weights):
# @brief Perform single training step.
# @param input_imgs Batch of inputs.
# @param real_outputs Batch of labels.
# @param weights Batch of weights.
# @return Predictions and loss function's value.
with tf.GradientTape() as tape:
preds = net(input_imgs, True)
loss = loss_fcn(real_outputs, preds, weights)
grads = tape.gradient(loss, net.trainable_weights)
optimizer.apply_gradients(zip(grads, net.trainable_weights))
return preds, loss
train_batches_per_epoch = train_samples_per_epoch / batch_size
# Perform training.
for _ in range(epochs):
for __ in range(int(train_batches_per_epoch)):
input_imgs, output_imgs, weights = next(iter(train_dataset))
preds, loss = train_step(input_imgs, output_imgs, weights)
# Save checkpoint and show some results.
ckpt.step.assign_add(1)
if int(ckpt.step) % 100 == 0:
path = manager.save()
print("Checkpoint saved: %s." % path)
show_result(input_imgs[0], output_imgs[0], preds[0], weights[0])
print("Loss function on training batch: %f." % float(loss))
|
#!/usr/bin/env python
'''
Copyright (c) 2019, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
'''
import argparse
from regionfinder import Config, TSDBClient, HBaseUIClient, RegionFinderError
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Outputs CSV-formatted region servers and names of all matching timeseries (delimited by the | character )')
parser.add_argument("expression", type=str,
help="The TSDB metric name")
parser.add_argument("-t", "--time", default='1h-ago',
help="TSDB relative time-string or absolute epoch time to use in rowkey.\nSee http://opentsdb.net/docs/build/html/user_guide/query/dates.html")
parser.add_argument("-c", "--config", default='',
help="Path to config yaml")
args = parser.parse_args()
config = Config(args.config)
try:
tsdb_client = TSDBClient(config.tsdb_url, config.tsdb_metric_width, config.tsdb_salt_width)
rowkeys = tsdb_client.get_rowkeys_of(args.expression, args.time)
hbase_ui_client = HBaseUIClient(config.hbase_url, config.hbase_table_name, config.cache_dir, autorefresh=False)
rs_infos = set()
for rowkey in rowkeys:
rs_infos.add(hbase_ui_client.get_rs_of_rowkey(rowkey))
print('RegionServer|RegionName')
for rs_info in rs_infos:
print('|'.join(rs_info))
except RegionFinderError as err:
print(str(err))
|
import base64
class BasicAuth(object):
"""
Adds a Basic authentication header to each request.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, request):
auth = '{0}:{1}'.format(self.username, self.password)
# According to RFC2617 the username and password are *TEXT, which
# RFC2616 says may contain characters from outside of ISO-8859-1 if
# they are MIME-encoded. Let's make life easier and assume this means
# that the username and password will be latin-1
encoded = base64.b64encode(auth.encode('latin-1')).decode('latin-1')
header = 'Basic {0}'.format(encoded)
request.headers['Authorization'] = header
|
1#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-12-04 11:18:57
# Project: spiderForcnbeta
from pyspider.libs.base_handler import *
from pyspider.libs.result_mysql import insert_result
import time
import datetime
t= time.time()
mt=int(round(t*1000))
from selenium import webdriver
class Handler(BaseHandler):
crawl_config = {
'Host': 'www.cnbeta.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.cnbeta.com/topics.htm',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0'
}
@every(minutes=24 * 60)
def on_start(self):
driver = webdriver.PhantomJS()
driver.set_page_load_timeout(10)
try:
driver.get('http://www.cnbeta.com/topics/157.htm')
except:
print "error!"
for i in range(5):
driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(1)
urls=driver.find_elements_by_css_selector("dt > a")
for url in urls:
self.crawl(url.get_attribute("href"), callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
self.send_message(self.project_name, {
"news_url": response.url,
"title": response.doc('.title > h1').text(),
"abstraction": response.doc('.article-summary > p').text(),
"publish_time": response.doc('.meta > span').text()[0:17],
"source":"www.cnbeta.com"
}, url="%s" % (response.url))
def on_message(self, project, msg):
if not msg or not msg['title']:
return
insert_result(self,"SpiderForIS","SpiderForCnbeta",msg)
|
from fastapi import APIRouter
router = APIRouter(
prefix='/movie',
tags=['Movie'],
responses={404: {'description': 'Not found'}}
)
# @router.get('/')
|
"""
This script contains a function to plot a specific time period of ACE data.
A vertical marker, labels the arrival time, can be plotted on this data
"""
# Import libaries
import HUXt as H
import tables
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import moviepy.editor as mpy
from moviepy.video.io.bindings import mplfig_to_npimage
import numpy as np
import pandas as pd
import os
from astropy.time import Time, TimeDelta
from astropy.visualization import time_support
import scipy.stats as st
from scipy.interpolate import interp1d
import glob
import math
import sunpy.coordinates.sun as sn
import h5py
import ensemble as ens
import matplotlib.ticker as ticker
from datetime import datetime, timedelta
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import matplotlib.lines as mlines
import Analysis2 as ana
import cmath
def plotACEinsitudata(arrivaltime = "2008-12-16 07:00:00", save=False, saveformat='pdf'):
"""
Plot the in-situ data from ACE for the December 12th CME. We have downloaded data from //cdaweb.gsfc.nans.gov and used the following instruments:
โข AC_H2_MFI for the magnetic field in GSE coordinates at 1-hour intervals
โข AC_H6_SWI for the proton density and speed at 12-minute intervals
"""
# We load the .csv data in panda dataframes for easy data access
# Load in ACE MAGNETIC observation
df_aceMAG = pd.read_csv(r"AC_H1_MFI_91226.csv")
df_aceMAG = df_aceMAG.rename(columns={'EPOCH__yyyy-mm-ddThh:mm:ss.sssZ' : 'time',
'BX_GSE_(@_x_component_)_nT':'Bx (nT)',
'BY_GSE_(@_y_component_)_nT':'By (nT)',
'BZ_GSE_(@_z_component_)_nT':'Bz (nT)'})
df_aceMAG['time'] = pd.to_datetime(df_aceMAG['time'])
# Load in ACE PROTON DENSITY observation
df_aceDEN = pd.read_csv(r"AC_H6_SWI_226960.csv")
df_aceDEN = df_aceDEN.rename(columns={'EPOCH_yyyy-mm-ddThh:mm:ss.sssZ' : 'time',
'PROTON_DENSITY_#/cm^3':'proton density (/cm3)'})
df_aceDEN['time'] = pd.to_datetime(df_aceDEN['time'])
# Load in ACE SOLAR WIND SPEED observation
df_aceSWS = pd.read_csv(r"AC_H6_SWI_91226.csv")
df_aceSWS = df_aceSWS.rename(columns={'EPOCH_yyyy-mm-ddThh:mm:ss.sssZ' : 'time',
'PROTON_SPEED_km/s':'Speed (km/s)'})
df_aceSWS['time'] = pd.to_datetime(df_aceSWS['time'])
# Define the arrival time of the CME - this will be plotted as a red line later in the script
arrival = Time(arrivaltime, format='iso').datetime
# Setup figure
plt.rcParams.update({'font.size': 22, 'axes.labelsize':14, 'legend.fontsize':16,'xtick.labelsize': 12.0,'ytick.labelsize': 12.0,"font.family":"Times New Roman"})
fig, axs = plt.subplots(5, 1, sharex=True, sharey=False, figsize=(8.27, 11.69)) # Paper size is equal to A4 portrait
axs[0].set_ylabel("Speed\n (Km s$^{-1}$)")
axs[0].plot(df_aceSWS["time"],df_aceSWS["Speed (km/s)"], 'k', lw=0.5)
axs[0].set_ylim(bottom=300, top=450)
axs[0].yaxis.set_major_locator(MultipleLocator(50))
axs[0].yaxis.set_minor_locator(MultipleLocator(10))
axs[1].set_ylabel("Proton density\n (cm$^{-3}$)")
axs[1].plot(df_aceDEN["time"],df_aceDEN["proton density (/cm3)"], 'k', lw=0.5)
axs[1].set_ylim(bottom=0, top=25)
axs[1].yaxis.set_major_locator(MultipleLocator(5))
axs[1].yaxis.set_minor_locator(MultipleLocator(1))
axs[2].set_ylabel("Magnetic Field,\n Bx (nT)")
axs[2].plot(df_aceMAG["time"],df_aceMAG["Bx (nT)"], 'k', lw=0.5)
axs[2].set_ylim(bottom=-10.0, top=10)
axs[2].yaxis.set_major_locator(MultipleLocator(5))
axs[2].yaxis.set_minor_locator(MultipleLocator(1))
axs[3].set_ylabel("Magnetic Field,\n By (nT)")
axs[3].plot(df_aceMAG["time"],df_aceMAG["By (nT)"], 'k', lw=0.5)
axs[3].set_ylim(bottom=-10, top=10)
axs[3].yaxis.set_major_locator(MultipleLocator(5))
axs[3].yaxis.set_minor_locator(MultipleLocator(1))
axs[4].set_ylabel("Magnetic Field,\n Bz (nT)")
axs[4].plot(df_aceMAG["time"],df_aceMAG["Bz (nT)"], 'k', lw=0.5)
axs[4].set_ylim(bottom=-10, top=10)
axs[4].yaxis.set_major_locator(MultipleLocator(5))
axs[4].yaxis.set_minor_locator(MultipleLocator(1))
axs[4].set_xlabel("Time")
axs[4].set_xlim(left= df_aceMAG.time.min() , right= df_aceDEN.time.max())
axs[4].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M\n%m/%d'))
axs[4].xaxis.set_minor_locator(mdates.HourLocator(interval=3))
axs[0].axvline(x=arrival, ymin=-10, ymax=10,color='r', lw=0.75)
axs[1].axvline(x=arrival, ymin=-10, ymax=10,color='r', lw=0.75)
axs[2].axvline(x=arrival, ymin=-10, ymax=10,color='r', lw=0.75)
axs[3].axvline(x=arrival, ymin=-10, ymax=10,color='r', lw=0.75)
axs[4].axvline(x=arrival, ymin=-10, ymax=10,color='r', lw=0.75)
# axs[0].set_title('a)', loc='left', fontsize=14)
# axs[1].set_title('b)', loc='left', fontsize=14)
# axs[2].set_title('c)', loc='left', fontsize=14)
# axs[3].set_title('d)', loc='left', fontsize=14)
# axs[4].set_title('e)', loc='left', fontsize=14)
axs[0].annotate("a)", xy=(0.01, 0.85), xycoords="axes fraction", fontsize=14)
axs[1].annotate("b)", xy=(0.01, 0.85), xycoords="axes fraction", fontsize=14)
axs[2].annotate("c)", xy=(0.01, 0.85), xycoords="axes fraction", fontsize=14)
axs[3].annotate("d)", xy=(0.01, 0.85), xycoords="axes fraction", fontsize=14)
axs[4].annotate("e)", xy=(0.01, 0.85), xycoords="axes fraction", fontsize=14)
plt.show()
if save:
project_dirs = H._setup_dirs_()
filename = "12Dec08CME_ACEobservations_plot.{}".format(saveformat)
filepath = os.path.join(project_dirs['HUXt_figures'], filename)
fig.savefig(filepath, dpi=300, bbox_inches='tight')
|
import os
import sys
with open('/Users/duntex/Wooooops/maria01.1.html') as f:
lines = f.readlines()
print("""
<html>
<body>
<table>
""")
lst = []
for line in lines:
pic_url = line.split("><br><a")[0].split("<img src=")[1]
tmp = line.split("<a href=")[1]
rapid_link = tmp.split("\">")[0] + "\""
title = tmp.split("\">")[1].split("/</a>")[0]
prod_id = title.split(' ')[1].split(' ')[0]
d = {'id': prod_id, 'title': title[1:], 'img_url': pic_url, 'rapid_url': rapid_link}
lst.append(d)
lst.sort()
count = 0
row_size = 6
for ele in lst:
path = '/Users/duntex/acd/Sorted/{}/{}/{}'.format(ele['id'][0], ele['id'].split('-')[0], ele['id'])
if os.path.exists(path):
continue
if count % row_size == 0:
print("<tr>")
content = '<a target="_blank" href="https://www.javbus.com/{}"><img src={} title="{}"/></a><br><a href={}>{}</a>'.format(ele['id'], ele['img_url'], ele['title'], ele['rapid_url'], ele['id'])
print('<th>{}</th>'.format(content))
if count % row_size == row_size - 1:
print("</tr>")
count += 1
print("""
</table>
</body>
</html>
""")
|
# -*- coding: utf-8 -*-
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models
import odoo.addons.decimal_precision as dp
from odoo.tools.safe_eval import safe_eval
class DiscountContractLine(models.Model):
_inherit = 'discount.contract.line'
theoretical_base = fields.Float(readonly=True, digits=dp.get_precision(
'Discount base value'))
theoretical_discount = fields.Monetary(readonly=True)
@api.multi
def _get_eval_context(self):
return {
'self': self,
'fields': fields,
'relativedelta': relativedelta,
}
@api.one
def _compute_theoretical_base(self):
formula = self.contract_id.forecast_method_id.formula
if formula:
eval_context = self._get_eval_context()
self.theoretical_base = safe_eval(formula, eval_context)
else:
self.theoretical_base = 0.0
@api.one
def _compute_theoretical_discount(self):
self.theoretical_discount = self.rule_id.compute_discount_amount(
self.theoretical_base)
@api.one
def _update_contract_line(self):
super(DiscountContractLine, self)._update_contract_line()
self._compute_theoretical_base()
self._compute_theoretical_discount()
@api.multi
def _get_period_dates(self, in_previous_period=False):
date_start, date_stop = super(DiscountContractLine,
self)._get_period_dates(
in_previous_period)
if self._context.get('force_date_start'):
date_start = self._context['force_date_start']
if self._context.get('force_date_stop'):
date_stop = self._context['force_date_stop']
return date_start, date_stop
|
import unittest
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src')
from board import Board
from pieces.bishop import Bishop
from pieces.king import King
from pieces.knight import Knight
from pieces.pawn import Pawn
from pieces.queen import Queen
from pieces.rook import Rook
# Os testes sรฃo realizados considerando tabuleiros vazios
# Adicionar roque
class PieceTest(unittest.TestCase):
def test_bishop_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
bishop = Bishop('white', 'white_bishop_1')
positions = [(0,0), (0,7), (3,4), (7,0), (7,7)]
expectations = {
(0,0): [(i,i,'mov') for i in range(1,8)],
(0,7): [(i,7-i,'mov') for i in range(1,8)],
(3,4): [(2, 3, 'mov'), (1, 2, 'mov'), (0, 1, 'mov'), (2, 5, 'mov'), (1, 6, 'mov'), (0, 7, 'mov'), (4, 3, 'mov'), (5, 2, 'mov'), (6, 1, 'mov'), (7, 0, 'mov'), (4, 5, 'mov'), (5, 6, 'mov'), (6, 7, 'mov')],
(7,0): [(i,7-i,'mov') for i in range(0,7)],
(7,7): [(i,i,'mov') for i in range(0,7)],
}
for pos in positions:
possible_moves = bishop.get_possible_moves(pos, matrix)
self.assertCountEqual(possible_moves, expectations[pos])
def test_king_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
king = King('white', 'white_king')
positions = [(0,0), (0,7), (3,4), (7,0), (7,7)]
expectations = {
(0,0): [(1, 1, 'mov'), (1, 0, 'mov'), (0, 1, 'mov')],
(0,7): [(1, 6, 'mov'), (1, 7, 'mov'), (0, 6, 'mov')],
(3,4): [(2, 5, 'mov'), (2, 3, 'mov'), (4, 5, 'mov'), (4, 3, 'mov'), (2, 4, 'mov'), (4, 4, 'mov'), (3, 5, 'mov'), (3, 3, 'mov')],
(7,0): [(6, 1, 'mov'), (6, 0, 'mov'), (7, 1, 'mov')],
(7,7): [(6, 6, 'mov'), (6, 7, 'mov'), (7, 6, 'mov')]
}
for pos in positions:
possible_moves = list(set(king.get_possible_moves(pos, matrix)))
self.assertCountEqual(possible_moves, expectations[pos])
def test_knight_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
knight = Knight('white', 'white_knight_1')
positions = [(0,0), (0,7), (3,4), (7,0), (7,7)]
expectations = {
(0,0): [(2, 1, 'mov'), (1, 2, 'mov')],
(0,7): [(2, 6, 'mov'), (1, 5, 'mov')],
(3,4): [(1, 3, 'mov'), (2, 2, 'mov'), (1, 5, 'mov'), (2, 6, 'mov'), (5, 3, 'mov'), (4, 2, 'mov'), (5, 5, 'mov'), (4, 6, 'mov')],
(7,0): [(5, 1, 'mov'), (6, 2, 'mov')],
(7,7): [(5, 6, 'mov'), (6, 5, 'mov')]
}
for pos in positions:
possible_moves = knight.get_possible_moves(pos, matrix)
self.assertCountEqual(possible_moves, expectations[pos])
def test_pawn_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
pawn = Pawn('black', 'black_pawn_1')
positions = [(1,0), (1,7), (6,0), (6,7)]
expectations = {
(1, 0): [(2, 0, 'mov'), (3, 0, 'mov')],
(1, 7): [(2, 7, 'mov'), (3, 7, 'mov')],
(6, 0): [(7, 0, 'mov')],
(6, 7): [(7, 7, 'mov')]
}
for pos in positions:
possible_moves = pawn.get_possible_moves(pos, matrix)
self.assertCountEqual(list(set(possible_moves)), expectations[pos])
def test_queen_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
queen = Queen('black', 'black_queen')
positions = [(0,0), (0,7), (3,4), (7,0), (7,7)]
expectations = {
(0,0): [(0, 1, 'mov'), (0, 2, 'mov'), (0, 3, 'mov'), (0, 4, 'mov'), (0, 5, 'mov'), (0, 6, 'mov'), (0, 7, 'mov'), (1, 0, 'mov'), (2, 0, 'mov'), (3, 0, 'mov'), (4, 0, 'mov'), (5, 0, 'mov'), (6, 0, 'mov'), (7, 0, 'mov'), (1, 1, 'mov'), (2, 2, 'mov'), (3, 3, 'mov'), (4, 4, 'mov'), (5, 5, 'mov'), (6, 6, 'mov'), (7, 7, 'mov')],
(0,7): [(0, 6, 'mov'), (0, 5, 'mov'), (0, 4, 'mov'), (0, 3, 'mov'), (0, 2, 'mov'), (0, 1, 'mov'), (0, 0, 'mov'), (1, 7, 'mov'), (2, 7, 'mov'), (3, 7, 'mov'), (4, 7, 'mov'), (5, 7, 'mov'), (6, 7, 'mov'), (7, 7, 'mov'), (1, 6, 'mov'), (2, 5, 'mov'), (3, 4, 'mov'), (4, 3, 'mov'), (5, 2, 'mov'), (6, 1, 'mov'), (7, 0, 'mov')],
(3,4): [(3, 3, 'mov'), (3, 2, 'mov'), (3, 1, 'mov'), (3, 0, 'mov'), (3, 5, 'mov'), (3, 6, 'mov'), (3, 7, 'mov'), (2, 4, 'mov'), (1, 4, 'mov'), (0, 4, 'mov'), (4, 4, 'mov'), (5, 4, 'mov'), (6, 4, 'mov'), (7, 4, 'mov'), (2, 3, 'mov'), (1, 2, 'mov'), (0, 1, 'mov'), (2, 5, 'mov'), (1, 6, 'mov'), (0, 7, 'mov'), (4, 3, 'mov'), (5, 2, 'mov'), (6, 1, 'mov'), (7, 0, 'mov'), (4, 5, 'mov'), (5, 6, 'mov'), (6, 7, 'mov')],
(7,0): [(7, 1, 'mov'), (7, 2, 'mov'), (7, 3, 'mov'), (7, 4, 'mov'), (7, 5, 'mov'), (7, 6, 'mov'), (7, 7, 'mov'), (6, 0, 'mov'), (5, 0, 'mov'), (4, 0, 'mov'), (3, 0, 'mov'), (2, 0, 'mov'), (1, 0, 'mov'), (0, 0, 'mov'), (6, 1, 'mov'), (5, 2, 'mov'), (4, 3, 'mov'), (3, 4, 'mov'), (2, 5, 'mov'), (1, 6, 'mov'), (0, 7, 'mov')],
(7,7): [(7, 6, 'mov'), (7, 5, 'mov'), (7, 4, 'mov'), (7, 3, 'mov'), (7, 2, 'mov'), (7, 1, 'mov'), (7, 0, 'mov'), (6, 7, 'mov'), (5, 7, 'mov'), (4, 7, 'mov'), (3, 7, 'mov'), (2, 7, 'mov'), (1, 7, 'mov'), (0, 7, 'mov'), (6, 6, 'mov'), (5, 5, 'mov'), (4, 4, 'mov'), (3, 3, 'mov'), (2, 2, 'mov'), (1, 1, 'mov'), (0, 0, 'mov')]
}
for pos in positions:
possible_moves = queen.get_possible_moves(pos, matrix)
self.assertCountEqual(possible_moves, expectations[pos])
def test_rook_get_possible_moves(self):
matrix = PieceTest.initialize_empty_grid()
rook = Rook('black', 'black_rook_1')
positions = [(0,0), (0,7), (3,4), (7,0), (7,7)]
expectations = {
(0,0): [(0, 1, 'mov'), (0, 2, 'mov'), (0, 3, 'mov'), (0, 4, 'mov'), (0, 5, 'mov'), (0, 6, 'mov'), (0, 7, 'mov'), (1, 0, 'mov'), (2, 0, 'mov'), (3, 0, 'mov'), (4, 0, 'mov'), (5, 0, 'mov'), (6, 0, 'mov'), (7, 0, 'mov')],
(0,7): [(0, 6, 'mov'), (0, 5, 'mov'), (0, 4, 'mov'), (0, 3, 'mov'), (0, 2, 'mov'), (0, 1, 'mov'), (0, 0, 'mov'), (1, 7, 'mov'), (2, 7, 'mov'), (3, 7, 'mov'), (4, 7, 'mov'), (5, 7, 'mov'), (6, 7, 'mov'), (7, 7, 'mov')],
(3,4): [(3, 3, 'mov'), (3, 2, 'mov'), (3, 1, 'mov'), (3, 0, 'mov'), (3, 5, 'mov'), (3, 6, 'mov'), (3, 7, 'mov'), (2, 4, 'mov'), (1, 4, 'mov'), (0, 4, 'mov'), (4, 4, 'mov'), (5, 4, 'mov'), (6, 4, 'mov'), (7, 4, 'mov')],
(7,0): [(7, 1, 'mov'), (7, 2, 'mov'), (7, 3, 'mov'), (7, 4, 'mov'), (7, 5, 'mov'), (7, 6, 'mov'), (7, 7, 'mov'), (6, 0, 'mov'), (5, 0, 'mov'), (4, 0, 'mov'), (3, 0, 'mov'), (2, 0, 'mov'), (1, 0, 'mov'), (0, 0, 'mov')],
(7,7): [(7, 6, 'mov'), (7, 5, 'mov'), (7, 4, 'mov'), (7, 3, 'mov'), (7, 2, 'mov'), (7, 1, 'mov'), (7, 0, 'mov'), (6, 7, 'mov'), (5, 7, 'mov'), (4, 7, 'mov'), (3, 7, 'mov'), (2, 7, 'mov'), (1, 7, 'mov'), (0, 7, 'mov')]
}
for pos in positions:
possible_moves = rook.get_possible_moves(pos, matrix)
self.assertCountEqual(possible_moves, expectations[pos])
@classmethod
def initialize_empty_grid(self):
squares = {}
for i in range(8):
for j in range(8):
square_info = {'piece': None, 'coord':(i, j), 'selected':None, 'gamerule':None}
squares[(i,j)] = square_info
return squares |
from sqlalchemy import Column, Integer, String, ForeignKey, Table, Boolean, BigInteger
from sqlalchemy.exc import ProgrammingError, IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from .custom_logger import define_logger
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import OperationalError
from sqlalchemy import create_engine
import time
import sys
import os
import re
Base = declarative_base()
logger = define_logger("DB_Orm")
class User(Base):
__tablename__ = "user"
user_id = Column(BigInteger, primary_key=True)
user_name = Column(String)
screen_name = Column(String)
user_location = Column(String)
description = Column(String)
user_url = Column(String)
followers_count = Column(String)
friends_count = Column(String)
listed_count = Column(String)
created_at = Column(BigInteger)
verified = Column(Boolean)
statuses_count = Column(String)
user_lang = Column(String)
timestamp = Column(BigInteger)
def __repr__(self):
return f"User: {self.user_id}: {self.screen_name} as {self.user_name}"
class Tweet(Base):
__tablename__ = 'tweet'
tweet_id = Column(BigInteger, primary_key=True)
timestamp = Column(BigInteger)
contributors = Column(String)
coordinates = Column(String)
created_at = Column(BigInteger)
current_user_retweet = Column(Integer)
favorite_count = Column(Integer)
favorited = Column(String)
full_text = Column(String)
geo = Column(String)
hashtags = Column(String)
in_reply_to_status_id = Column(String)
in_reply_to_user_id = Column(String)
lang = Column(String)
location = Column(String)
media = Column(String)
place = Column(String)
possibly_sensitive = Column(String)
quoted_status_id = Column(String)
retweet_count = Column(Integer)
retweeted = Column(String)
retweeted_status_id = Column(String)
scopes = Column(String)
source_status_id = Column(String)
truncated = Column(String)
urls = Column(String)
user_id = Column(BigInteger, ForeignKey('user.user_id'))
user_mentions = Column(String)
withheld_copyright = Column(String)
withheld_in_countries = Column(String)
withheld_scope = Column(String)
tweet_mode = Column(String)
def __repr__(self):
return f"{self.tweet_id}: ".ljust(20) + f"{self.full_text}"
def get_engine():
dbname = 'postgres'
user = 'admin'
password = 'docker'
host = os.getenv('DB_ACCES_NAME', '127.0.0.1')
port = 5432
url = f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{dbname}"
engine = create_engine(url, pool_size=250,
connect_args={'client_encoding': 'utf8'})
return engine
def initialize():
logger.debug("Initializing DB")
t_start = time.time()
while True:
try:
engine = get_engine()
Base.metadata.create_all(engine)
logger.debug("DB Initialization finished!")
break
except Exception as e:
logger.warning(e)
if time.time() - t_start > 120:
logger.error("Task timeout: 120 sec.")
sys.exit(1)
logger.debug(f"Waiting for DB.")
time.sleep(5)
def add_tweet_with_user(
Session,
timestamp,
# Tweet required
tweet_id,
full_text,
created_at,
# User required
user_id,
user_name,
screen_name,
#
overwrite=False,
# Tweet optional
contributors=None,
coordinates=None,
current_user_retweet=None,
favorite_count=None,
favorited=None,
geo=None,
hashtags=None,
in_reply_to_status_id=None,
in_reply_to_user_id=None,
lang=None,
location=None,
media=None,
place=None,
possibly_sensitive=None,
quoted_status_id=None,
retweet_count=None,
retweeted=None,
retweeted_status_id=None,
scopes=None,
source_status_id=None,
truncated=None,
urls=None,
user_mentions=None,
withheld_copyright=None,
withheld_in_countries=None,
withheld_scope=None,
tweet_mode=None,
# User Optional
user_location=None,
description=None,
user_url=None,
followers_count=None,
friends_count=None,
listed_count=None,
user_created_at=None,
verified=None,
statuses_count=None,
user_lang=None
):
add_user(
Session=Session,
user_id=user_id,
user_name=user_name,
screen_name=screen_name,
user_location=user_location,
description=description,
user_url=user_url,
followers_count=followers_count,
friends_count=friends_count,
listed_count=listed_count,
created_at=user_created_at,
verified=verified,
statuses_count=statuses_count,
user_lang=user_lang,
timestamp=timestamp,
overwrite=overwrite
)
tweet = Tweet(tweet_id=str(tweet_id),
timestamp=timestamp,
contributors=contributors,
coordinates=coordinates,
created_at=created_at,
current_user_retweet=current_user_retweet,
favorite_count=favorite_count,
favorited=favorited,
full_text=full_text,
geo=geo,
hashtags=hashtags,
in_reply_to_status_id=str(in_reply_to_status_id),
in_reply_to_user_id=str(in_reply_to_user_id),
lang=lang,
location=location,
media=media,
place=place,
possibly_sensitive=possibly_sensitive,
quoted_status_id=str(quoted_status_id),
retweet_count=retweet_count,
retweeted=retweeted,
retweeted_status_id=str(retweeted_status_id),
scopes=scopes,
source_status_id=source_status_id,
truncated=truncated,
urls=urls,
user_id=str(user_id),
user_mentions=user_mentions,
withheld_copyright=withheld_copyright,
withheld_in_countries=withheld_in_countries,
withheld_scope=withheld_scope,
tweet_mode=tweet_mode,
)
session = Session()
if overwrite:
tw = session.query(Tweet).filter(Tweet.tweet_id == tweet_id).first()
if tw:
logger.debug(f"Deleting tweet: {tweet_id}")
session.delete(tw)
session.flush()
try:
insert_to_table(session, tweet)
logger.debug(f'Inserting tweet to table, id: {tweet_id}, timestamp: {timestamp}')
except IntegrityError:
logger.warning(f"Possible tweet duplicate: {tweet_id}")
session.rollback()
pass
session.close()
def add_user(
Session,
user_id,
user_name,
screen_name,
user_location,
description,
user_url,
followers_count,
friends_count,
listed_count,
created_at,
verified,
statuses_count,
user_lang,
timestamp,
overwrite=False
):
session = Session()
user = User(
user_id=user_id,
user_name=user_name,
screen_name=screen_name,
user_location=user_location,
description=description,
user_url=user_url,
followers_count=followers_count,
friends_count=friends_count,
listed_count=listed_count,
created_at=created_at,
verified=verified,
statuses_count=statuses_count,
user_lang=user_lang,
timestamp=timestamp
)
# if overwrite:
# tw = session.query(User).filter(User.user_id == user_id).first()
# if tw:
# logger.debug(f"Deleting user: {screen_name}")
# session.delete(tw)
# session.flush()
try:
insert_to_table(session, user)
logger.debug(f'Inserted user to table. screen_name: {screen_name:>20}, '
f'user_id: {user_id}, timestamp: {timestamp}')
except IntegrityError:
logger.warning(f"User in table: {user_id}, {screen_name}")
session.close()
def get_database_connectors() -> "Engine, Session":
engine = get_engine()
Session = sessionmaker(bind=engine)
return engine, Session
def insert_to_table(session, table_object):
try:
session.add(table_object)
session.commit()
except OperationalError as e:
logger.error(f"OperationalError when inserting to table: '{e}'")
except ProgrammingError as pe:
logger.error(f"ProgrammingError when inserting to table.")
def filter_by_lang(Session, lang, inverted=False):
"""
Args:
lang: language to filter
inverted:
Returns:
"""
try:
lang = str(lang)
session = Session()
if not inverted:
tweets = session.query(Tweet.tweet_id, Tweet.lang).filter(Tweet.lang == lang).all()
else:
tweets = session.query(Tweet.tweet_id, Tweet.lang).filter(Tweet.lang != lang).all()
session.close()
return tweets
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
# def filter_by_existing_key(Session, key, inverted=False):
# """
# Filter db, to get all tweets with key
# Args:
# key: string
# inverted: bool, inverted filtraion
#
# Returns:
#
# """
# session = Session()
# if not inverted:
# text = f"session.query(Tweet.tweet_id, Tweet.{key}).filter(Tweet.{key} != 'None').all()"
# else:
# text = f"session.query(Tweet.tweet_id, Tweet.{key}).filter(Tweet.{key} == 'None').all()"
# tweets = eval(text)
# return tweets
def filter_db_search_words(Session, input_string):
"""
Args:
Session:
words:
Returns:
"""
try:
input_string = input_string.lower()
stages = re.split(r"[;]", input_string)
for stage_ind, stage in enumerate(stages):
words = re.split(r"[,. !@#$%^&*]", stage)
for i, word in enumerate(words):
word = ''.join(letter for letter in word if letter not in "!?,. ;'\\\"()!@#$%^&*()_)+_-[]")
word = word.lstrip(" ").rstrip(" ")
words[i] = word
words = [word for word in words if len(word) > 0]
stages[stage_ind] = words
stages = [stage for stage in stages if len(stage) > 0]
if len(stages) < 1:
return None
session = Session()
tweets = []
logger.debug(f"Searching tweets, staged: {stages}")
words = stages[0]
for word in words:
output = [tweet for tweet in session.query(Tweet.tweet_id, Tweet.full_text).all() if
word.lower() in tweet[1].lower()]
tweets += output
tweets = set(tweets) # drop duplicates
for run_ind in range(1, len(stages)):
stage = stages[run_ind]
old_tweets = tweets.copy()
tweets = []
for tweet in old_tweets:
for word in stage:
if word in tweet[1].lower():
tweets.append(tweet)
break
session.close()
return tweets
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
def filter_db_search_phrases(Session, words):
"""
Args:
Session:
words:
Returns:
"""
try:
stages = re.split(r'[,.!;?]', words) # Separating stages
for i, word in enumerate(stages):
word = ''.join(letter for letter in word if letter not in "'\\\"()@#$%^&*()_)+_-[]")
word = word.lstrip(" ").rstrip(" ")
stages[i] = word
phrases = [phrases for phrases in stages if len(phrases) > 0]
session = Session()
tweets = []
logger.debug(f"Searching tweets, phrases: {phrases}")
for phrase in phrases:
output = [tweet for tweet in session.query(Tweet.tweet_id, Tweet.full_text).all() if
phrase.lower() in tweet[1].lower()]
tweets += output
session.close()
return tweets
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
def get_db_full_tweet_with_user(Session, tweet_id):
try:
session = Session()
tweet_id = int(tweet_id)
tweet = session.query(Tweet, User).join(User).filter(Tweet.tweet_id == tweet_id).first()
session.close()
return tweet
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
def get_db_all_tweet_list(Session):
try:
session = Session()
tweets = session.query(Tweet.tweet_id).all()
session.close()
return tweets
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
def drop_existing_tweets(Session, tweet_id_list):
try:
session = Session()
tweets = [tw_id for tw_id in tweet_id_list if not session.query(Tweet).filter(Tweet.tweet_id == tw_id).first()]
session.close()
return tweets
except OperationalError as oe:
logger.error(f"Operational error: is database running?")
return None
|
import numpy as np
import matplotlib.pyplot as plt
def generateChild50(parent1, parent2):
child1 = {}
child2 = {}
for i,key in enumerate(parent1.keys()):
if i <= 3:
child1[key] = parent1[key]
child2[key] = parent2[key]
else :
child1[key] = parent2[key]
child2[key] = parent1[key]
return child1, child2
def angle2XY(theta_x, theta_y, x_incident, y_incident, z_incident, k):
#z = 49.08029
theta_x = np.deg2rad(theta_x)
theta_y = np.deg2rad(theta_y)
d = np.array([x_incident[0], y_incident[0], z_incident[0]])
alpha = np.deg2rad(21) - theta_y + np.deg2rad(90)
beta = theta_x
z_n = np.sin(alpha)*np.cos(beta)
x_n = np.cos(alpha)*np.cos(beta)
y_n = np.sin(beta)
N = [x_n[0],y_n[0],z_n[0]]
n = np.array(N) / np.linalg.norm(N)
r = d-2*np.dot(d, n)*n
# k = z/r[2]
z = k*r[2]
x = k*r[0]
y = k*r[1]
return x,y,z
def cal_fitness(pop):
#z_reel = 49.08029
z_reel = 341.24
fitness = []
for genes in pop:
theta_x = 3.92 # 0
theta_y = -0.18 #0
x_reel = -337.61
y_reel = 34.63
x,y,z = angle2XY(theta_x+genes['offset_theta_x'],theta_y+genes['offset_theta_y'],
genes['x'],genes['y'],genes['z'], genes['k1'])
#d1 = np.sqrt((y-0)**2+(x--44.1921)**2+(z-z_theorique)**2)
d1 = np.sqrt((y-y_reel)**2+(x-x_reel)**2+(z-z_reel)**2)
theta_x = 0.42 # 1
theta_y = -0.24 # -1
x_reel = -337.61
y_reel = -22.17
x,y,z = angle2XY(theta_x+genes['offset_theta_x'],theta_y+genes['offset_theta_y'],
genes['x'],genes['y'],genes['z'], genes['k2'])
#d2 = np.sqrt((y-2.2094)**2+(x--47.4164)**2+(z-z_theorique)**2)
d2 = np.sqrt((y-y_reel)**2+(x-x_reel)**2+(z-z_reel)**2)
theta_x = 0.2 #-1
theta_y = 3.08 #1
x_reel = -256
y_reel = -22.17
x,y,z = angle2XY(theta_x+genes['offset_theta_x'],theta_y+genes['offset_theta_y'],
genes['x'],genes['y'],genes['z'], genes['k3'])
#d3 = np.sqrt((y--2.1026)**2+(x--41.1996)**2+(z-z_theorique)**2)
d3 = np.sqrt((y-y_reel)**2+(x-x_reel)**2+(z-z_reel)**2)
theta_x = 4.06
theta_y = 3.16
x_reel = -256
y_reel = 34.63
x,y,z = angle2XY(theta_x+genes['offset_theta_x'],theta_y+genes['offset_theta_y'],
genes['x'],genes['y'],genes['z'], genes['k3'])
d4 = np.sqrt((y-y_reel)**2+(x-x_reel)**2+(z-z_reel)**2)
d_tot = d1+d2+d3+d4
fitness.append(1/d_tot) # on souhaite maximiser 1/d_tot
return fitness
def select_mating_pool(pop, fitness, num_parents_mating):
children = []
cum_sum_fitness = np.cumsum(fitness[:-1])
for i in range(num_parents_mating):
if i < 100: # Elitisme
child1, child2 = generateChild50(pop[i], pop[i+1])
children.append(child1)
children.append(child2)
else:
rand1 = np.random.uniform(0,1)
rand2 = np.random.uniform(0,1)
index1 = np.argmax(cum_sum_fitness>rand1)
parent1 = pop[index1]
index2 = np.argmax(cum_sum_fitness>rand2)
parent2 = pop[index2]
child1, child2 = generateChild50(parent1, parent2)
children.append(child1)
children.append(child2)
return children
def generate_first_pop(sol_per_pop) :
pop = []
for i in range(sol_per_pop):
genes = {
'offset_theta_x': np.random.uniform(low=-1,high=1, size=1),
#'offset_theta_x': np.array([0]),
#'offset_theta_y': np.array([0]),
'offset_theta_y': np.random.uniform(low=-1,high=1, size=1),
'k1': np.random.uniform(low=300,high=600, size=1),
'k2': np.random.uniform(low=300,high=600, size=1),
'k3': np.random.uniform(low=300,high=600, size=1),
'k4': np.random.uniform(low=300,high=600, size=1),
'x': np.random.uniform(low=-0.1,high=0.1, size=1),
'y': np.random.uniform(low=-0.1,high=0.1, size=1),
'z': np.random.uniform(low=-1.1,high=-0.9, size=1)
#'x': np.array([0]),
#'y': np.array([0]),
#'z': np.array([-1]),
}
pop.append(genes)
return pop
def add_mutation(childs, prob_mutation) :
for child in childs :
random_mutation = np.random.uniform(0,1)
if random_mutation < prob_mutation :
random_dict_key = np.random.randint(0,9)
if random_dict_key == 0 :
child['offset_theta_x'] = child['offset_theta_x'] + np.random.normal(0, 0.01)
elif random_dict_key == 1 :
child['offset_theta_y'] = child['offset_theta_y'] + np.random.normal(0, 0.01)
elif random_dict_key == 2 :
child['k1'] = child['k1'] + np.random.normal(0, 0.1)
elif random_dict_key == 3 :
child['k2'] = child['k2'] + np.random.normal(0, 0.1)
elif random_dict_key == 4 :
child['k3'] = child['k3'] + np.random.normal(0, 0.1)
elif random_dict_key == 5 :
child['k4'] = child['k4'] + np.random.normal(0, 0.1)
elif random_dict_key == 6 :
child['x'] = child['x'] + np.random.normal(0, 0.01)
elif random_dict_key == 7 :
child['y'] = child['y'] + np.random.normal(0, 0.01)
elif random_dict_key == 8 :
child['z'] = child['z'] + np.random.normal(0, 0.01)
return childs
def add_mutation2(childs, prob_mutation) :
for child in childs :
random_mutation = np.random.uniform(0,1)
if random_mutation < prob_mutation :
for random_dict_key in range(9):
# random_dict_key = np.random.randint(0,8)
if random_dict_key == 0 :
child['offset_theta_x'] = child['offset_theta_x'] + np.random.normal(0, 0.01)
elif random_dict_key == 1 :
child['offset_theta_y'] = child['offset_theta_y'] + np.random.normal(0, 0.01)
elif random_dict_key == 2 :
child['k1'] = child['k1'] + np.random.normal(0, 0.01)
elif random_dict_key == 3 :
child['k2'] = child['k2'] + np.random.normal(0, 0.01)
elif random_dict_key == 4 :
child['k3'] = child['k3'] + np.random.normal(0, 0.01)
elif random_dict_key == 5 :
child['k4'] = child['k4'] + np.random.normal(0, 0.01)
elif random_dict_key == 6 :
child['x'] = child['x'] + np.random.normal(0, 0.01)
elif random_dict_key == 7 :
child['y'] = child['y'] + np.random.normal(0, 0.01)
elif random_dict_key == 8 :
child['z'] = child['z'] + np.random.normal(0, 0.01)
return childs
def main():
sol_per_pop = 20000 # 1500 fonctionne bien
num_generations = 50
num_parents_mating = int(sol_per_pop/2)
pop = generate_first_pop(sol_per_pop)
best_fitness = []
average_fitness = []
best_child = {}
best_child_fitness = -1
for generation in range(num_generations):
# Measuring the fitness of each chromosome in the population.
fitness = cal_fitness(pop)
sorted_fitness = sorted(fitness)
best_fitness.append(sorted_fitness[-1])
average_fitness.append(np.mean(fitness))
sorted_norm_fitness = np.array(sorted_fitness)/sum(sorted_fitness)
# sorts pop in function of fitness
sorted_pop = [p for _,p in sorted(zip(fitness, pop))]
if sorted_fitness[-1] > best_child_fitness :
best_child = sorted_pop[-1]
best_child_fitness = sorted_fitness[-1]
print('gen ' + str(generation) + ' best fitness : ' + str(1/sorted_fitness[-1]))
# Selecting the best parents in the population for mating.
children = select_mating_pool(sorted_pop, sorted_norm_fitness, num_parents_mating)
# mutation
add_mutation2(children, 0.5) # mutation2 fonctionnait bien
# going back
pop = children
print(best_child)
print(1/best_child_fitness)
plt.plot(1/np.array(best_fitness))
plt.plot(1/np.array(average_fitness))
plt.show()
if __name__ == '__main__':
main() |
import src.UITools
import src.GameBody
import pygame
class myButton(src.UITools.uiButton):
def __init__(self):
src.UITools.uiButton.__init__(self)
def stateChange(self):
src.UITools.defaultStateChange(self)
class picButton(src.UITools.uiButton):
def __init__(self, normSurface, downSurface, moveonSurface):
src.UITools.uiButton.__init__(self, "button", normSurface, downSurface, moveonSurface)
def stateChange(self):
src.UITools.defaultStateChange(self)
class testUI(src.GameBody.GameBody):
def __init__(self):
src.GameBody.GameBody.__init__(self, (400, 400), "test UIs", (255,255,255), 30)
self.button = myButton()
normSurface = pygame.image.load('button.jpg').convert()
downSurface = pygame.image.load('buttonDown.png').convert()
moveonSurface = pygame.image.load('buttonMoveOn.png').convert()
self.picbutton = picButton(normSurface, downSurface, moveonSurface)
'''
button1 = myButton()
button2 = myButton()
button3 = myButton()
button1.moveTo((0,0))
button2.moveTo((0,100))
button3.moveTo(((0,200)))
surface = pygame.Surface((300, 400))
surface.convert()
surface.fill((0,255,255))
self.plane = src.UITools.plane((100,10), (0,0), (button1,button2,button3), surface)
'''
def update(self):
self.button.update(self.screen)
self.picbutton.moveTo((0,200))
self.picbutton.update(self.screen)
#self.plane.update(self.screen)
if __name__ == '__main__':
pygame.init()
testbody = testUI()
while not testbody.isQuit:
testbody.step()
pygame.quit() |
from onegov.election_day.collections import UploadTokenCollection
def test_upload_token_collection(session):
collection = UploadTokenCollection(session)
assert collection.query().all() == []
token = collection.create()
assert collection.query().all() == [token]
assert collection.by_id(token.id) == token
another_token = collection.create()
assert set(collection.query().all()) == set([token, another_token])
collection.delete(token)
assert collection.query().all() == [another_token]
|
from rest_framework import serializers
from .models import Profile, Account, Transaction
from django.contrib.auth import get_user_model
class RegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
def create(self, validated_data):
user = get_user_model().objects.create(
username = validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = get_user_model()
fields = ('username',
'password',
)
class ProfileSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = Profile
fields = ('owner',
'first_name',
'last_name',
'email',
'date_created',
'date_modified'
)
read_only_fields = ('date_created', 'date_modified')
class AccountSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = Account
fields = ('owner',
'account_number',
'credit_line',
'apr',
'principal_balance',
'interest',
'total_amount',
'date_created',
'date_modified'
)
read_only_fields = (
'apr',
'account_number',
'principal_balance',
'interest',
'total_amount',
'date_created',
'date_modified'
)
class TransactionSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = Transaction
fields = ('owner',
'account',
'transaction_id',
'transaction_type',
'amount',
'date_created',
'date_modified'
)
read_only_fields = ('date_created', 'date_modified')
|
import numpy as np
import pandas as pd
from itertools import chain
from football_utilities import get_opp_from_row
columns = np.asarray(['Year', 'Week', 'Player', 'Team', 'Pos', 'G', 'QBRat',
'Cmp', 'PsAtt', 'PsYds', 'PsYdsAtt', 'PsLng', 'Int',
'PsTD', 'RsAtt', 'RsYds', 'RsYdsAtt', 'RsLng', 'RsTD',
'Rec', 'Tgt', 'RcYds', 'RcYdsRec', 'RcLng', 'RcTD',
'Sack', 'SackYds', 'Fum', 'FumL'])
# these are the placeholder column names to drop, not indices
drop_idx = {'QB': [3, 12, 18, 21, 24],
'RB': [3, 9, 16, 19],
'TE': [3, 10, 16, 19],
'WR': list(chain([3, 25], range(10,23)))}
colname_idx = {'QB': np.asarray(list(chain(range(19), range(25, 29)))),
'RB': np.asarray(list(chain(range(6), range(14, 25), range(27, 29)))),
'TE': np.asarray(list(chain(range(6), range(19, 25), range(14, 19), range(27, 29)))),
'WR': np.asarray(list(chain(range(6), range(19, 25), range(27, 29))))}
data_dict = {}
for pos in ['QB', 'RB', 'TE', 'WR']:
data = pd.DataFrame()
for y in range(2001, 2016):
for w in range(1, 18):
fname = 'Data/Raw/HTML/%s_%d_Wk%d.html' %(pos, y, w)
with open(fname, 'r') as f:
raw = f.read()
tables = pd.read_html(raw)
table = tables[-1]
table = table.drop(range(2))
assert table.shape[0] > 20
table.insert(0, 'Year', y)
table.insert(1, 'Week', w)
table.insert(4, 'Pos', pos)
data = pd.concat([data, table])
print "Parsing HTML for %ss, Week %d, %d" %(pos, w, y)
data = data.drop(drop_idx[pos], axis=1)
data.columns = columns[colname_idx[pos]]
data = data.apply(pd.to_numeric, errors='ignore')
data.reset_index(drop=True, inplace=True)
data_dict[pos] = data
data = pd.concat(data_dict.values(), join='outer')
data = data.reindex_axis(columns, axis=1)
data.reset_index(drop=True, inplace=True)
missing_teams = [('Jalen Parmele', 'BAL'), ('Devin Moore', 'IND'),
('Darius Reynaud', 'NYG'), ('Stefan Logan', 'DET'),
('Jason Wright', 'ARI'), ('Bernard Scott', 'CIN'),
('Leon Washington', 'SEA'), ('Deji Karim', 'JAC'),
('Clifton Smith', 'CLE'), ('Quinn Porter', 'STL')]
for p, t in missing_teams:
data.loc[(pd.isnull(data.Team)) & (data.Player==p), 'Team'] = t
# Rams moved from St. Louis to Los Angeles in 2016, and Yahoo retroactively changed the abbreviation
data.Team = data.Team.str.replace('STL', 'LAR')
def calc_ffpts(x):
return np.nansum([(0.2 * np.floor(x['PsYds'] / 5)), (-2.0 * x['Int']),
(4.0 * x['PsTD']), (0.1 * x['RsYds']), (6.0 * x['RsTD']),
(0.1 * x['RcYds']), (6.0 * x['RcTD']), (-2.0 * x['FumL'])])
data['FFPts'] = data.apply(calc_ffpts, 1)
# function to find oppenent from schedule (grouped by year/week)
def get_opp_from_row(r, schgb):
"""schgb is the full schedule grouped by year and week"""
s = schgb.get_group((r['Year'], r['Week']))
gameid = s.loc[s.GameID.str.match('.*%s.*' %r['Team']), 'GameID'].values[0]
if gameid.startswith(r['Team']):
return gameid[4:7]
else:
return gameid[0:3]
# read in schedule and find opponents
sch = pd.read_pickle('Data/NFL_Schedule_2001-2015.pickled')
schgb = sch.groupby(['Year', 'Week'])
print "Finding opponents..."
opp = data.apply(lambda r: get_opp_from_row(r, schgb), 1)
data.insert(5, 'Opp', opp)
data.to_csv('Data/Offense_2001-2015.csv', index=False)
data.to_pickle('Data/Offense_2001-2015.pickled')
|
import numpy as np
import copy
import utils
import itertools
ids = ['342457421', '931177406']
""" Tab change for letter to int
unpopulated U = 1
immune I = 8
healthy H = 2
sick S = 33 - 3 days left
= 32 - 2 days left
= 31 - Last day
= 30 go to healthy --> 2
quarantined Q = 52 - 2 days left
= 51 - Last day
= 50 go to healthy --> 2
? : 0 1 2 3 4 5 6 7
U H S1 S2 S3 I Q1 Q2
2 si vrai 1 si faux
"""
def solve_problem(input):
maps = input['observations']
queries = input['queries']
police = input['police']
medics = input['medics']
all_the_maps = fulling_my_maps(maps, police)
all_the_actions = {}
for time in range(len(maps)):
if time == len(maps) - 1:
break
all_the_actions[time] = actions(all_the_maps[time], police, medics)
all_the_maps = prepare_the_maps(all_the_maps, all_the_actions)
dict_result = {}
for i in range(len(queries)):
coor = queries[i][0]
time = queries[i][1]
val = queries[i][2]
tr_val = trad_val(val)
if all_the_maps[time][coor[0]][coor[1]] > 1111:
dict_result[queries[i]] = '?'
elif dizaine_number(all_the_maps[time][coor[0]][coor[1]])[0] == tr_val:
dict_result[queries[i]] = 'T'
elif dizaine_number(all_the_maps[time][coor[0]][coor[1]])[0] != tr_val:
dict_result[queries[i]] = 'F'
return dict_result
# put your solution here, remember the format needed
def fulling_my_maps(maps, police):
""" fulling the all_maps with the helps of maps
with the code of ex1
if the case is ? fulling with the previous information
if their are not previous information (time = 0 ) fulling with all the possible information frone the start"""
list_of_maps = []
shape = [len(maps[0]), len(maps[0][0])]
for t in range(len(maps)):
actual_map = np.zeros(shape, int)
if t == 0:
for i in range(shape[0]):
for j in range(shape[1]):
letter = maps[t][i][j]
if letter == 'U':
actual_map[i][j] = 1
elif letter == 'H':
actual_map[i][j] = 2
elif letter == 'S':
actual_map[i][j] = 33
elif letter == 'I':
actual_map[i][j] = 8
elif letter == 'Q':
actual_map[i][j] = 52
elif letter == '?':
actual_map[i][j] = 22211111
else:
for i in range(shape[0]):
for j in range(shape[1]):
letter = maps[t][i][j]
previous_num = list_of_maps[t - 1][i][j]
if letter == 'U':
actual_map[i][j] = 1
elif letter == 'H':
actual_map[i][j] = 2
elif letter == 'S':
if dizaine_number(previous_num)[0] == 3:
if previous_num == 33:
actual_map[i][j] = 32
if previous_num == 32:
actual_map[i][j] = 31
else:
actual_map[i][j] = 33
# the previous num is not sure
if previous_num > 11111:
conter = []
if dizaine_number(previous_num)[2] == 2:
conter.append(2)
if dizaine_number(previous_num)[3] == 2:
conter.append(1)
if dizaine_number(previous_num)[4] == 2:
conter.append(3)
if len(conter) == 1:
actual_map[i][j] = 30 + conter[0]
if len(conter) == 0:
actual_map[i][j] = 33
elif len(conter) > 1:
tmp = [1, 1, 1, 1, 1, 1, 1, 1]
for ct in range(len(conter)):
if conter[ct] == 2:
tmp[3] = 2
if conter[ct] == 1:
tmp[4] = 2
if conter[ct] == 3:
tmp[2] = 2
actual_map[i][j] = tab_to_number(tmp)
elif letter == 'I':
actual_map[i][j] = 8
elif letter == 'Q':
if dizaine_number(previous_num)[0] == 5:
if previous_num == 52:
actual_map[i][j] = 51
if dizaine_number(previous_num)[0] == 3:
actual_map[i][j] = 52
if previous_num > 1111:
if check_chiffre(previous_num, 2, 6):
actual_map[i][j] = 11111122
elif letter == '?':
tab_previous = dizaine_number(previous_num)
tab_result = copy.deepcopy(tab_previous)
if previous_num > 1111:
if tab_previous[2] == 2:
tab_result[2] = 1
tab_result[3] = 2
tab_result[6] = 2
if tab_previous[3] == 2:
tab_result[3] = 1
tab_result[4] = 2
tab_result[6] = 2
if tab_previous[4] == 2:
tab_result[4] = 1
tab_result[1] = 2
if tab_previous[6] == 2:
tab_result[6] = 1
tab_result[7] = 2
if tab_previous[7] == 2:
tab_result[7] = 1
tab_result[1] = 2
actual_map[i][j] = tab_to_number(tab_result)
else:
tmp_tab = [1, 1, 1, 1, 1, 1, 1, 1]
if previous_num == 2:
tmp_tab[1] = 2
if previous_num == 33:
tmp_tab[3] = 2
if police > 0:
tmp_tab[6] = 2
if previous_num == 32:
tmp_tab[4] = 2
if police > 0:
tmp_tab[6] = 2
if previous_num == 31:
tmp_tab[1] = 2
if previous_num == 52:
tmp_tab[7] = 2
if previous_num == 51:
tmp_tab[1] = 2
actual_map[i][j] = tab_to_number(tmp_tab)
if previous_num == 1:
actual_map[i][j] = 1
list_of_maps.append(actual_map)
return list_of_maps
def trad_val(x):
if x == 'H':
return 2
if x == 'U':
return 1
if x == 'S':
return 3
if x == 'I':
return 8
if x == 'Q':
return 5
def actions(map_np, police, medics):
vaccinate_list = []
quarantine_list = []
shape = [len(map_np), len(map_np[0])]
# fulling vaccinate list
for i in range(shape[0]):
for j in range(shape[1]):
number = map_np[i][j]
if number == 2:
vaccinate_list.append(('vaccinate', (i, j)))
if number > 111:
if dizaine_number(number)[1] == 2:
vaccinate_list.append(('vaccinate', (i, j)))
# fulling quarantine list
for i in range(shape[0]):
for j in range(shape[1]):
number_arr = dizaine_number(map_np[i][j])
if number_arr[0] == 3:
quarantine_list.append(('quarantine', (i, j)))
if len(number_arr) > 2:
if number_arr[2] == 2 or number_arr[3] == 2 or number_arr[4] == 2:
quarantine_list.append(('quarantine', (i, j)))
all_action = []
mixed_quarantine_list = special_power_set(quarantine_list, min(police, len(quarantine_list)))
mixed_vaccinate_list = special_power_set(vaccinate_list, min(medics, len(vaccinate_list)))
mixed_vaccinate_list = list(mixed_vaccinate_list)
mixed_quarantine_list = list(mixed_quarantine_list)
for i in range(len(mixed_quarantine_list)):
for j in range(len(mixed_vaccinate_list)):
tpi = mixed_quarantine_list[i]
tpj = mixed_vaccinate_list[j]
temp = tpj + tpi
all_action.append(temp)
all_action_tuple = tuple(all_action)
if medics == 0 and police != 0:
return mixed_quarantine_list
if medics != 0 and police == 0:
return tuple(mixed_vaccinate_list)
if medics == 0 and police == 0:
return all_action
return all_action_tuple
def prepare_the_maps(all_map, all_the_actions):
list_of_unknown = {}
dict_map_possib = {}
shape = [len(all_map[0]), len(all_map[0][0])]
for time in range(len(all_map)):
coor_list = []
if time == len(all_map) - 1:
break
for i in range(shape[0]):
for j in range(shape[1]):
poss_list = []
if all_map[time][i][j] > 111:
arr_value = dizaine_number(all_map[time][i][j])
if arr_value[0] == 2:
poss_list.append(1)
if arr_value[1] == 2:
poss_list.append(2)
if arr_value[2] == 2:
poss_list.append(33)
if arr_value[3] == 2:
poss_list.append(32)
if arr_value[4] == 2:
poss_list.append(31)
if arr_value[5] == 2:
poss_list.append(1)
if arr_value[6] == 2:
poss_list.append(52)
if arr_value[7] == 2:
poss_list.append(51)
coor_list.append(poss_list)
list_of_unknown[time] = coor_list
for time in range(len(list_of_unknown)):
if len(list_of_unknown[time]) == 0:
continue
for unk in range(len(list_of_unknown[time])):
for pos in range(len(list_of_unknown[time][unk])):
val = list_of_unknown[time][unk][pos]
list_of_unknown[time][unk][pos] = put_number_in_zero(val, unk + 1)
for time in range(len(list_of_unknown)):
list_tmp_map = []
coor_of_unknown = where(all_map[time], 1111, 'sup')
tmp_list = []
if len(list_of_unknown[time]) == 0:
tmp = []
tmp.append(all_map[time])
dict_map_possib[time] = tmp
continue
for unk in range(len(list_of_unknown[time])):
tmp_list += list_of_unknown[time][unk]
number_unk = len(list_of_unknown[time])
sub = spcial_subsets(tmp_list, number_unk)
for s in range(len(sub)):
for unk in range(number_unk):
sub[s][unk] = remove_number_in_zero(sub[s][unk])
for s in range(len(sub)):
tmp_map = copy.deepcopy(all_map[time])
for unk in range(number_unk):
coord = coor_of_unknown[unk]
tmp_map[coord[0]][coord[1]] = sub[s][unk]
list_tmp_map.append(tmp_map)
dict_map_possib[time] = list_tmp_map
for time in range(len(dict_map_possib)):
list_equal = []
act_equal = []
actual_map_eq = []
coor_of_unknown = where(all_map[time], 1111, 'sup')
for im in range(len(dict_map_possib[time])):
for act in range(len(all_the_actions)):
test_map = dict_map_possib[time][im]
if len(all_the_actions[time]) == 0:
test_action = []
else:
test_action = all_the_actions[time][act]
map_res = result(test_map, test_action, shape)
true_future_map = all_map[time + 1]
if check_tow_map_equal(map_res, true_future_map):
if len(list_equal) == 0:
list_equal.append(map_res)
actual_map_eq.append(dict_map_possib[time][im])
act_equal.append(all_the_actions[act])
elif not check_tow_map_equal(list_equal[0], map_res):
list_equal.append(map_res)
actual_map_eq.append(dict_map_possib[time][im])
act_equal.append(all_the_actions[act])
# list_equal = distinct(list_equal)
if len(list_equal) == 1:
map_choice = actual_map_eq[0]
all_map[time] = map_choice
map_res = list_equal[0]
all_map[time + 1] = map_res
if len(list_equal) > 1:
for unk in range(len(coor_of_unknown)):
crd = coor_of_unknown[unk]
tmp_val = [1, 1, 1, 1, 1, 1, 1, 1]
for eq in range(len(list_equal)):
map_res = list_equal[eq]
val_res = map_res[crd[0]][crd[1]]
if val_res == 1:
tmp_val[0] = 2
if val_res == 2:
tmp_val[1] = 2
if val_res == 33:
tmp_val[2] = 2
if val_res == 32:
tmp_val[3] = 2
if val_res == 31:
tmp_val[4] = 2
if val_res == 8:
tmp_val[5] = 2
if val_res == 52:
tmp_val[6] = 2
if val_res == 51:
tmp_val[7] = 2
all_map[time][crd[0]][crd[1]] = tab_to_number(tmp_val)
for time in range(len(all_map)):
for i in range(shape[0]):
for j in range(shape[1]):
val = all_map[time][i][j]
if val > 1111:
if check_safe_case(val):
all_map[time][i][j] = check_safe_case(val)
vac_list = []
unpop_list = []
for time in range(len(all_map)):
for i in range(shape[0]):
for j in range(shape[1]):
val = all_map[time][i][j]
if val == 8:
vac_list.append((i, j, time))
if (i, j) in unpop_list:
continue
if val == 1:
unpop_list.append((i, j))
for vac in vac_list:
x = vac[0]
y = vac[1]
time = vac[2]
for t2 in range(len(all_map)):
if t2 > time:
all_map[t2][x][y] = 8
for un in unpop_list:
x = un[0]
y = un[1]
for t2 in range(len(all_map)):
all_map[t2][x][y] = 1
return all_map
def result(map_base, action, shape):
map_np = copy.deepcopy(map_base)
# 1 do all the action
for i in range(len(action)):
if len(action) == 0:
break
type_action = action[i][0]
coor_action = action[i][1]
if type_action == 'vaccinate' or type_action == 'v':
map_np[coor_action[0]][coor_action[1]] = 8
if type_action == 'quarantine' or type_action == 'q':
map_np[coor_action[0]][coor_action[1]] = 53
# 2 contamination
list_contamination = []
for i in range(shape[0]):
for j in range(shape[1]):
if dizaine_number(map_np[i][j])[0] == 3:
if check_in_map(i - 1, j, shape):
if map_np[i - 1][j] == 2:
tmp = [i - 1, j]
list_contamination.append(tmp)
if check_in_map(i + 1, j, shape):
if map_np[i + 1][j] == 2:
tmp = [i + 1, j]
list_contamination.append(tmp)
if check_in_map(i, j - 1, shape):
if map_np[i][j - 1] == 2:
tmp = [i, j - 1]
list_contamination.append(tmp)
if check_in_map(i, j + 1, shape):
if map_np[i][j + 1] == 2:
tmp = [i, j + 1]
list_contamination.append(tmp)
for i in range(len(list_contamination)):
map_np[list_contamination[i][0]][list_contamination[i][1]] = 34
# 3 reduce the quarantine and sicks
for i in range(shape[0]):
for j in range(shape[1]):
number = dizaine_number(map_np[i][j])
if number[0] == 3 or number[0] == 5:
if number[1] == 1:
map_np[i][j] = 2
else:
map_np[i][j] -= 1
return map_np
def where(arr, value, st):
list_res = []
if len(np.shape(arr)) == 1:
for i in range(len(arr)):
if arr[i] == value and st == 'eq':
list_res.append(i)
elif arr[i] > value and st == 'sup':
list_res.append(i)
return list_res
for i in range(len(arr)):
for j in range(len(arr[0])):
if arr[i][j] == value and st == 'eq':
list_res.append((i, j))
elif arr[i][j] > value and st == 'sup':
list_res.append((i, j))
return list_res
def check_safe_case(x):
"""check if one unknow case have just one possibility
input : x int of 8 chiffres
output :if yes return the true value
else return fals"""
tab_x = dizaine_number(x)
tab_x = np.asanyarray(tab_x)
indx_tow = where(tab_x, 2, 'eq')
if len(indx_tow) > 1:
return False
if indx_tow[0] == 0:
return 1
if indx_tow[0] == 1:
return 2
if indx_tow[0] == 2:
return 33
if indx_tow[0] == 3:
return 32
if indx_tow[0] == 4:
return 31
if indx_tow[0] == 5:
return 1
if indx_tow[0] == 6:
return 52
if indx_tow[0] == 7:
return 51
def check_tow_map_equal(resmap, truemap):
shape = np.shape(resmap)
for i in range(shape[0]):
for j in range(shape[1]):
if truemap[i][j] > 1111:
continue
if truemap[i][j] != resmap[i][j]:
return False
return True
def tab_to_number(x):
"""arr of 8 with len(arr) == 8 to a int
intput : [1, 2, 3, 4, 5, 6, 7, 8] ---> 12345678"""
res = 0
po = 7
for i in range(8):
res += x[i] * pow(10, po)
po -= 1
return res
def check_maybe_sick(x):
"""check if the case is maybe an sick 33 or 32"""
if check_chiffre(x, 2, 2):
return True
if check_chiffre(x, 2, 3):
return True
return False
def check_chiffre(x, value, place):
"""check if in the number the value is in the place
x:1234, value: 3, place:2 ----> true """
arr_x = dizaine_number(x)
if arr_x[place] == value:
return True
return False
def check_in_map(x, y, shape):
if shape[0] > x >= 0 and shape[1] > y >= 0:
return True
return False
def dizaine_number(x):
"""transphorm a int number to a revers arr
1234 ---> [1, 2, 3, 4]"""
chiffres = []
while x > 0:
chiffres.append(x % 10)
x = x // 10
chiffres.reverse()
return chiffres
def findsubsets(s, n):
if len(s) >= n:
return [list(i) for i in itertools.combinations(s, n)]
else:
return [s]
def put_number_in_zero(x, val):
x += val * 10 ** len(dizaine_number(x))
return x
def remove_number_in_zero(x):
val = dizaine_number(x)[0]
x -= val * 10 ** (len(dizaine_number(x)) - 1)
return x
def spcial_subsets(list, k):
res = findsubsets(list, k)
final_res = []
for i in range(len(res)):
flag = 0
for tr in range(k):
if dizaine_number(res[i][tr])[0] != tr + 1:
flag = 1
if flag == 0:
final_res.append(res[i])
return final_res
def special_power_set(list, k):
temp = utils.powerset(list)
final = []
for i in range(len(temp)):
if len(temp[i]) == k:
final.append(temp[i])
return final
|
import praw
import time
#set variable r to object praw
r = praw.Reddit(user_agent = "Comment bot using Reddit API")
#Before reddit started using Oauth
r.login()
words_to_match = ['insane']
cache = []
def bot_on():
print("Going to the subreddit")
#any subreddit you want
subreddit = r.get_subreddit("test")
print("Browsing through comments")
#comment object holding up to 50 comments
comments = subreddit.get_comments(limit=50)
#go through each comment
for comment in comments:
print("Match found! Comment ID: " + comment.id)
comment_text = comment_text.body
isMatch = any(string in comment_text for string in words_to_match)
comment.reply('Try different words such as: crazy, riduculous, amazing, spectactular')
print("Reply successful")
cache.append(comment.id)
print("")
while True:
bot_on()
#every 10 sec bot turns on
time.sleep(10) |
# will not work, once you start writing
# default args, all other args after it
# has to be mentioned as default args
# def add(n1, n2=0, n3=0, n4):
def add(n1, n2=0, n3=0, n4=0):
print("sum:", (n1 + n2 + n3 + n4))
add(1,2,3,4)
add(1,2,3)
add(1,2)
add(1)
# add() # will raise error, n1 is required
add(1, n3=10)
# variadic functoion
# function that can take N number of args
def add_v2(*args, **kwargs):
print("inside add_v2()")
print("args:", args)
print("kwargs:", kwargs)
sum = 0
for i in args:
sum += i
for k, v in kwargs.items():
if k.startswith('n'):
sum += v
print(sum)
add_v2(100,200,300,400,n1=1, n2=2, n3=3, n4=4, n5=5, n6=6, a=7, b=8, c=9, d=10)
# function clojures
def add_sub(n1, n2):
def add(n1, n2):
return n1+n2
def subn(n1, n2):
return n1-n2
return (add(n1, n2), subn(n1, n2))
print(add_sub(n1=10, n2=5))
print(add(2,1)) # will call original add
# print(subn(5,2)) # will not work, subn is function clojure |
#!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.