text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python3
import gzip
import sys
import math
import random
# Write a program that creates random fasta files
# Create a function that makes random DNA sequences
# Parameters include length and frequencies for A, C, G, T
# Command line:
# python3 rand_fasta.py <count> <min> <max> <a> <c> <g> <t>
"""
python3 rand_fasta.py 3 10 20 0.1 0.2 0.3 0.4
>seq-0
TCGTTTTGATTACGG
>seq-1
CGGCTGTTCCGTAATGC
>seq-2
TTTCGTGTACTTTCTAGTGA
"""
|
import pygame
import pygame_menu
import main
pygame.init()
surface = pygame.display.set_mode((800, 500))
penalty_for_question = 10
def set_difficulty(value, difficulty):
global penalty_for_question
penalty_for_question = difficulty
def start_the_game():
main.initial_game(penalty_for_question)
menu = pygame_menu.Menu('Bem Vindo ao Jogo de Palavras', width=600, height=400,
theme=pygame_menu.themes.THEME_BLUE
)
menu.add.text_input('Seu Nome :', default='Vinicius')
menu.add.selector('Dificuldade :', [('Fácil', 10), ('Difícil', 30)], onchange=set_difficulty)
menu.add.button('Jogar', start_the_game)
menu.add.button('Sair', pygame_menu.events.EXIT)
menu.mainloop(surface)
|
import requests
import lxml.etree
import db_conn
import re
import fileinput
import os
import pandas as pd
site_root = os.path.realpath (os.path.dirname (__file__))
json_url = os.path.join (site_root, "static", "temp.json")
def run():
documents = []
response=[]
url_list = db_conn.final_link
categories = db_conn.links_list
counter=0
with open (json_url,'w') as f:
f.seek (0)
f.write ('{"Response":[')
for url in url_list:
response = requests.get(url)
xml_page = response.text
parser = lxml.etree.XMLParser(recover=True, encoding='utf-8')
documents.append(lxml.etree.fromstring(xml_page.encode("utf-8"), parser=parser))
cat=categories[counter]
counter=counter+1
panda_n(documents,cat)
documents.clear()
text_to_search = [',,,,', ',,,', ',,']
text_to_replace = [',', ']}', ',']
for i in range (len (text_to_search)):
with fileinput.FileInput (json_url, inplace=True, backup='.bak') as file:
for line in file:
print (line.replace (text_to_search[i], text_to_replace[i]), end='')
def panda_n(documents,cat):
title_list = []
cati=[]
id=[]
description_list = []
guid_url_list = []
dop = []
for xml_doc in documents:
#articles = xml_doc.xpath ("//item")
title_list=xml_doc.xpath ("//title/text()")
description_list=xml_doc.xpath ("//description/text()")
guid_url_list= xml_doc.xpath ("//link/text()")
dop=xml_doc.xpath ("//pubDate/text()")
a=len(title_list)
b=len(description_list)
c=len(guid_url_list)
d=len(dop)
del title_list[0:a-min(a,b,c,d)]
del description_list[0:b-min(a,b,c,d)]
del guid_url_list[0:c-min(a,b,c,d)]
del dop[0:d-min(a,b,c,d)]
for i in range(len(dop)):
id.append(i+1)
cati.append(cat)
news_data = pd.DataFrame (id, columns=["Article_Id"])
news_data["Categories"] = cat
news_data["Title"]=title_list
news_data["Description"] = description_list
#news_data["category"] = categories
news_data["Links"]=guid_url_list
news_data["Date_Of_Publishing"]=dop
with open (json_url, 'a') as f:
f.write (news_data.to_json (orient='records')[1:-1].replace('}][{', '},{'))
f.write(',')
#print(news_data)
#print(news_data.to_json(orient='records',lines=True))
#return (news_data)
run()
|
#Advent of Code Day 6 - 2020
with open('day6_input.txt', 'r') as f:
all_groups = f.read().split('\n\n')
def day6(input_val):
# Part 1
ans_first, ans_second = 0, 0
for group in input_val:
answers = [x for x in group.replace('\n', '')]
ans_first += len(set(answers))
# Part 2
for group in input_val:
initial = list(group.split('\n')[0])
for answer in group.split('\n')[1:]:
initial = [x for x in initial if x in answer]
ans_second += len(initial)
return ans_first, ans_second
print ('done', day6(all_groups)) |
from .core import *
from .facility import *
from .family import *
from .reservation import *
from .signals import * |
from .models import WheelSpin, Stake
from rest_framework import serializers
class MarketInstanceSerializer(serializers.ModelSerializer):
"""
A UserProfile serializer to return the UserProfile details
"""
# profile = UserProfileSerializer(required=True)
class Meta:
model = WheelSpin
fields = "__all__"
# fields = ('id', 'marketinstance', 'amount_stake_per_market', 'created_at', 'bet_expiry_time', 'closed_at',)#'profile')
class StakeSerializer(serializers.ModelSerializer):
"""
A Stake serializer to return the UserProfile details
"""
class Meta:
model = Stake
# fields = ('__all__')
fields = ("user", "marketselection", "amount", "bet_on_real_account")
|
from func.methods import *
from psql.base_model import *
from settings import bot
def create_tables():
with psql_db:
psql_db.create_tables([Users, Cites, Tags, AgeUser, Room])
@bot.message_handler(commands=['start'])
def add_user(msg):
is_registration(bot, msg, Users, start=1)
@bot.message_handler(commands=['find_user'])
def find_user(msg):
# tmp = msg.text.split()[1]
# bot.send_message(msg.chat.id, Users.select().where(tmp in Users.name).get())
pass
@bot.message_handler()
def get_msg(msg):
is_registration(bot, msg, Users)
if __name__ == '__main__':
create_tables()
log.info('test')
bot.polling(none_stop=True)
|
from flask import Flask, request, render_template
import os
import jinja2
template_dir = os.path.join(os.path.dirname(__name__),'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/', methods = ['GET'])
def index():
return render_template('Signup.html')
@app.route('/registration', methods = ['POST'])
def checkPW():
username = request.form['username']
if len(username) < 3 or len(username) > 20 or username.isalnum() == False:
return render_template('Signup.html',username = 'letters and numbers only please and be more than three characters',)
email = request.form['email']
if len(email) < 3 or len(email) > 20:
return render_template('Signup.html',
email = 'Must be between 3 to 20 characters long, including the domain name', k = username, e = email)
atNdot = 0
for char in email:
if char =='@' or char == '.':
atNdot += 1
if atNdot !=2:
return render_template('Signup.html', email = 'Invalid Email.', k = username, e = email)
password = request.form['password_create']
p1 = request.form['password_create']
p2 = request.form['password_verify']
if len(p1) <3 or len(p1) >20:
return render_template('Signup.html', password = '3 to 20 charactors only please', k = username, e = email)
if p1 == p2:
template = jinja_env.get_template('confirmation.html')
return template.render(username = username)
else:
return '''<h1>Password does not match. <a href = 'http://localhost:5000/' > Return</a> </h1>'''
if __name__ == '__main__':
app.run() |
"""
별찍기 -10
https://www.acmicpc.net/problem/2447
*********
"""
def print(x):
star = "*"
non_star = " "
test = x/3
if test != 1:
x/test
x = int(input())
# for i in range(1, x+1):
|
import pygame
import sys
from os import listdir
from os.path import isfile, join
from game.tape import Tape
from game.side import Side
from game.deck import Deck
from main import Handler
class App:
def __init__(self, argv, size, background=(0, 0, 0)):
pygame.init()
pygame.font.init()
self.argv = argv
self.size = size
self.background = background
self.screen = pygame.display.set_mode(self.size, pygame.RESIZABLE)
pygame.display.set_caption('UTMagic')
self.game_objects = []
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont('FreeMono.ttf', 40)
self.mouse = (0, 0)
self.mouse_click = False
self.images = {}
self.tape = None
self.deck = None
def load_images(self):
all_names = [f for f in listdir("../images") if isfile(join("../images", f))]
for name in all_names:
self.images[name] = pygame.image.load(join("../images", name))
def init_game(self):
Side((565, 50), ["rotlung_reanimator",
"necromancien_de_xathrid", "wild_evocation", "recycle",
"privileged_position", "vigor", "archonte_brulant"], (7, 1), self)
Side((565, 730), ["rotlung_reanimator", "cloak_of_invisibility",
"roue_du_soleil_et_de_la_lune", "gains_illusoires", "fungus_sliver",
"steely_resolve", "dread_of_night", "shared_triumph", "vigor",
"archonte_brulant", "ancient_tomb", "mesmeric_orb",
"augure_prismatique", "choke"], (7, 2), self)
handler = Handler(self.argv)
self.tape = Tape(handler.get_init(), (960, 360), self)
# state_changed, symbol, direction = handler.next() # bool, char, int
self.deck = Deck((1600, 650), self.tape, handler, self)
def spawn(self, obj):
self.game_objects.append(obj)
def handle_input(self):
self.mouse = pygame.mouse.get_pos()
self.mouse_click = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.deck.flip_mode()
if event.key == pygame.K_LEFT:
self.tape.index = 0
if event.key == pygame.K_RIGHT:
self.deck.next_phase()
if event.type == pygame.MOUSEBUTTONDOWN:
self.mouse_click = True
def run(self):
while True:
self.handle_input()
delta_time = self.clock.tick()
self.screen.fill(self.background)
for obj in self.game_objects:
obj.update(delta_time)
self.game_objects.sort(key=lambda o: o.layer)
obj.draw(self.screen)
pygame.display.flip()
if __name__ == '__main__':
app = App(sys.argv, (1920, 1080), (20, 30, 50))
app.load_images()
app.init_game()
app.run()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 17:05:47 2019
@author: Administrator
"""
#import serial
#
#def hexsend(string_data=''):
# hex_data = string_data.decode("hex")
# return hex_data
#
#if __name__ == '__main__':
# ser = serial.Serial("com16",115200)
# print(ser.name)
# if ser.isOpen():
# print("open success")
# else:
# print("open failed")
# try:
# while True:
# count = ser.inWaiting()
# if count > 0:
# data = ser.read(count)
# print("receive:", data)
## if data != b'':
## print("receive:", data)
## else:
## ser.write(hexsend(data))
# except KeyboardInterrupt:
# if serial != None:
# ser.close()
#import numpy as np
#import matplotlib.pyplot as plt
#
#a = np.array(np.arange(100))
#b = np.random.randint(0,10,a.shape[0])
#plt.bar(a,b)
#print(a)
#def zero():
# print(0)
#
#def one():
# print(1)
#
#def switch_test(arg):
# switcher = {
# 0:zero,
# 1:one,
# 2:lambda:"two",
# }
# func = switcher.get(arg,lambda:"nothing")
# return func()
#
#switch_test(0)
def func(x):
if not hasattr(func,'x'):
func.x = 0
else:
func.x += x
return func.x
|
'''def fibboncaci(n):
if n in [1, 2]:
return 1
seq = [1, 1]
while len(seq) < n:
print seq
print str(seq[0]) + ' ' + str(seq[1])
seq.append(seq[-1] + seq[-2])
return seq[len(seq) - 1]
print fibboncaci(10)
'''
def is_palindrome(word):
first_half = None
second_half = None
if len(word) % 2 == 0:
first_half = len(word) / 2
second_half = len(word) / 2
else:
first_half = (len(word) / 2) + 1
second_half = len(word) / 2
first_half = word[:first_half]
second_half = word[second_half:]
second_half = second_half[::-1]
if first_half == second_half:
return word + ' is a palindrome!'
else:
return word + ' is not a palindrome...'
print is_palindrome('kayak')
print is_palindrome('race car')
print is_palindrome('katniss everdeen')
|
# app/context_processors.py
def blogcategories(request):
from olympicvaxinfo.models import Category
return {'blogcategories': Category.objects.all().order_by('-name')} |
from diagrams import Diagram, Cluster
from diagrams.aws.database import Redshift
from diagrams.gcp.iot import IotCore
from diagrams.gcp.compute import AppEngine, Functions
graph_attr = {"fontsize": "45",
"bgcolor": "transparent",
# 'center': 'true'
# 'concentrate':'false'
'labelloc':"t"
}
with Diagram('Algorithmic Trading General Process', direction='LR', filename='finances/Algorithmic Trading/algo_trading_general_process_diagram',graph_attr=graph_attr) as d:
with Cluster('Researcg'):
data = IotCore('Data')
data_time = IotCore('Real-time/Historical')
data_type = IotCore('Market/Non-market Data')
data_time - data
data_type - data
with Cluster('Pre-trade Analysis'):
pretrade_analysis = [Redshift('Alpha Model'),
Redshift('Risk Model'),
Redshift('Transaction Cost Model')]
data >> pretrade_analysis
with Cluster('Trading Signal'):
trading_signal = AppEngine('Portfolio Construction Model')
data >> trading_signal
pretrade_analysis >> trading_signal
with Cluster('Trade Execution'):
trade_execution = Functions('Execution Model')
data >> trade_execution
trading_signal >> trade_execution
post_trade_analysis = Redshift('Post-trade Analysis')
trade_execution >> post_trade_analysis
d
|
from django.db import models
class City(models.Model):
name = models.CharField(max_length=30, verbose_name='название')
def __str__(self):
return self.name
def update_name(self, name):
self.name = name
self.save()
class Meta:
verbose_name = 'город'
verbose_name_plural = 'города'
class Location(models.Model):
city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name='город')
street = models.CharField(max_length=100, verbose_name='улица')
support = models.IntegerField(verbose_name='номер опоры')
notes = models.TextField(verbose_name='заметки', default='', blank=True)
def make_portable(self):
self.support = 0
self.save()
def __str__(self):
return self.get_location()
class Meta:
verbose_name = 'положение'
verbose_name_plural = 'положения'
class State(models.Model):
STATES = (
(0, 'OK'),
(1, 'WARNING'),
(2, 'ERROR'),
(3, 'FAILURE'),
)
state = models.IntegerField(choices=STATES, verbose_name='состояние')
logs = models.TextField(verbose_name='журнал', default='', blank=True)
def update_logs(self, logs):
self.logs = logs
self.save()
def get_state(self):
return f'{self.get_state_display()}'
def __str__(self):
return f'{self.get_state()} ({self.logs})'
class Meta:
verbose_name = 'состояние'
verbose_name_plural = 'состояния'
class Specifications(models.Model):
TYPES = (
(0, 'SPEED'),
(1, 'AVERAGE SPEED'),
(2, 'RED LIGHT'),
(3, 'DOUBLE WHITE LINE'),
(4, 'BUS LANE'),
(5, 'TOLLBOOTH'),
(6, 'LEVEL CROSSING'),
(7, 'CONGESTION CHARGE'),
)
type = models.IntegerField(choices=TYPES, verbose_name='тип')
producer = models.CharField(max_length=100, verbose_name='производитель')
date_of_manufacture = models.DateField(verbose_name='дата производства')
service_frequency = models.IntegerField(verbose_name='частота сервисного обслуживания')
notes = models.TextField(verbose_name='заметки', default='', blank=True)
def get_type(self):
return self.get_type_display()
def __str__(self):
try:
return f'{self.get_type()} ({self.camera.__str__()})'
except Camera.DoesNotExist:
return f'{self.get_type()}'
class Meta:
verbose_name = 'спецификация'
verbose_name_plural = 'спецификации'
class Camera(models.Model):
location = models.ForeignKey(Location, on_delete=models.CASCADE, verbose_name='местоположение')
specifications = models.OneToOneField(Specifications, on_delete=models.CASCADE, verbose_name='спецификации')
state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name='состояние')
def __str__(self):
return f'Камера №{self.id}'
class Meta:
verbose_name = 'камера'
verbose_name_plural = 'камеры'
class Service(models.Model):
camera = models.ForeignKey(Camera, on_delete=models.CASCADE, verbose_name='ID камеры')
service_organization = models.CharField(max_length=100, verbose_name='сервисная организация')
registration_date = models.DateField(auto_now_add=True)
service_data = models.DateField(verbose_name='дата сервиса')
info = models.TextField(verbose_name='информация', default='', blank=True)
def __str__(self):
return f'сервис №{self.id}'
class Meta:
verbose_name = 'сервис'
verbose_name_plural = 'сервисы'
|
import pytest
from ethereum.tools.tester import TransactionFailed
from plasma_core.constants import NULL_ADDRESS
from plasma_core.utils.transactions import decode_utxo_id
from plasma_core.transaction import Transaction
from plasma_core.utils.merkle.fixed_merkle import FixedMerkle
def test_deposit_valid_values_should_succeed(ethtester, testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
deposit_tx = testlang.child_chain.get_transaction(deposit_id)
merkle = FixedMerkle(16, [deposit_tx.encoded])
assert testlang.root_chain.blocks(1) == [merkle.root, ethtester.chain.head_state.timestamp]
assert testlang.root_chain.nextDepositBlock() == 2
def test_deposit_invalid_value_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_tx = Transaction(outputs=[(owner.address, NULL_ADDRESS, amount)])
with pytest.raises(TransactionFailed):
testlang.root_chain.deposit(deposit_tx.encoded, value=0)
def test_deposit_zero_amount_should_succeed(testlang):
owner, amount = testlang.accounts[0], 0
deposit_id = testlang.deposit(owner, amount)
deposit_blknum, _, _ = decode_utxo_id(deposit_id)
plasma_block = testlang.get_plasma_block(deposit_blknum)
assert plasma_block.root == testlang.child_chain.get_block(deposit_blknum).root
assert plasma_block.timestamp == testlang.timestamp
assert testlang.root_chain.nextDepositBlock() == 2
def test_deposit_invalid_format_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_tx = Transaction(outputs=[(owner.address, NULL_ADDRESS, amount), (owner.address, NULL_ADDRESS, amount)])
with pytest.raises(TransactionFailed):
testlang.root_chain.deposit(deposit_tx.encoded, value=amount)
def test_at_most_999_deposits_per_child_block(testlang):
owner = testlang.accounts[0]
child_block_interval = testlang.root_chain.CHILD_BLOCK_INTERVAL()
for i in range(0, child_block_interval - 1):
deposit_id = testlang.deposit(owner, 1)
if i % 25 == 0:
testlang.ethtester.chain.mine()
with pytest.raises(TransactionFailed):
testlang.deposit(owner, 1)
testlang.spend_utxo([deposit_id], [owner.key], [(owner.address, NULL_ADDRESS, 1)])
testlang.deposit(owner, 1)
def test_token_deposit_should_succeed(testlang, root_chain, token):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit_token(owner, token, amount)
deposit_blknum, _, _ = decode_utxo_id(deposit_id)
plasma_block = testlang.get_plasma_block(deposit_blknum)
assert plasma_block.root == testlang.child_chain.get_block(deposit_blknum).root
assert plasma_block.timestamp == testlang.timestamp
assert root_chain.nextDepositBlock() == 2
def test_token_deposit_non_existing_token_should_fail(testlang, token):
owner, amount = testlang.accounts[0], 100
deposit_tx = Transaction(outputs=[(owner.address, NULL_ADDRESS, amount)])
token.mint(owner.address, amount)
token.approve(testlang.root_chain.address, amount, sender=owner.key)
with pytest.raises(TransactionFailed):
testlang.root_chain.depositFrom(deposit_tx.encoded, sender=owner.key)
def test_token_deposit_no_approve_should_fail(testlang, token):
owner, amount = testlang.accounts[0], 100
deposit_tx = Transaction(outputs=[(owner.address, token.address, amount)])
token.mint(owner.address, amount)
with pytest.raises(TransactionFailed):
testlang.root_chain.depositFrom(deposit_tx.encoded, sender=owner.key)
def test_token_deposit_insufficient_approve_should_fail(testlang, token):
owner, amount = testlang.accounts[0], 100
deposit_tx = Transaction(outputs=[(owner.address, token.address, amount * 5)])
token.mint(owner.address, amount)
token.approve(testlang.root_chain.address, amount, sender=owner.key)
with pytest.raises(TransactionFailed):
testlang.root_chain.depositFrom(deposit_tx.encoded, sender=owner.key)
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
import multiprocessing as mp
import tensorflow as tf
from sub_tree import sub_tree
from sub_tree import node
import sys
import logging
import time
import Queue
import numpy as np
from treelib import Tree
import copy
from utils import compute_bleu_rouge
from utils import normalize
from layers.basic_rnn import rnn
from layers.match_layer import MatchLSTMLayer
from layers.match_layer import AttentionFlowMatchLayer
from layers.pointer_net import PointerNetDecoder
def main():
for idx in range(10):
print idx
def job(x):
return x * x
'''
def tree_search(trees_batch):
pool = mp.Pool()
print("Number of cpu : ", mp.cpu_count())
res = pool.map(job, range(10))
print res
def tree_search(trees_batch):
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.INFO)
print("Number of cpu : ", mp.cpu_count())
procs = []
queue = mp.Queue()
#print 'procs '+ str(len(procs))
for t in trees_batch:
proc = mp.Process(target = sub_tree, args = (t,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
'''
def test_tf():
x = tf.placeholder(tf.float64, shape=None)
y = tf.placeholder(tf.float64, shape=None)
z = tf.placeholder(tf.float64, shape=None)
a = np.ones((1, 5, 4))
b = np.array([[[1, 2], [1, 3]], [[0, 1], [0, 2]]])
c = np.array([[(1., 2., 3.), (2., 3., 4.), (3., 4., 5.), (4., 5., 6.)],
[(1., 2., 3.), (2., 2., 2.), (3., 4., 5.), (4., 5., 6.)]])
# print a
print b
print c
print type(b)
# y = tf.multiply(x,)
tmp = tf.expand_dims(z, 0)
sa = tf.shape(x)
sb = tf.shape(y)
sc = tf.shape(z)
s = tf.shape(tmp)
# q = tf.matmul(x, tmp)
# sd = tf.shape(q)
r = tf.gather_nd(c, b)
sr = tf.shape(r)
# print np.shape(a)
# print np.shape(b)
with tf.Session() as sess:
sb, sc, s, tmp, r, sr = sess.run([sb, sc, s, tmp, r, sr], feed_dict={x: a, y: b, z: c})
print sb
print sc
# print q
print r
print sr
# return result
class Data_tree(object):
def __init__(self, tree, start_node):
self.tree = tree
self.start_node = start_node
self.q_id = tree.raw_tree_data['tree_id']
self.q_type = tree.raw_tree_data['question_type']
self.words_id_list = tree.raw_tree_data['passage_token_id']
self.l_passage = tree.raw_tree_data['p_length']
self.ref_answer = tree.raw_tree_data['ref_answer']
self.p_data = []
self.listSelectedSet = []
self.value = 0
self.select_list = []
self.p_word_id, self.p_pred = [], []
self.tmp_node = None
self.expand_node = None
self.num_of_search = 0
class PSCHTree(object):
"""
python -u run.py --train --algo MCST --epochs 1 --gpu 2 --max_p_len 2000 --hidden_size 150 --train_files ../data/demo/trainset/search.train.json --dev_files ../data/demo/devset/search.dev.json --test_files ../data/demo/test/search.test.json
nohup python -u run.py --train --algo BIDAF --epochs 10 --train_files ../data/demo/trainset/test_5 --dev_files ../data/demo/devset/test_5 --test_files ../data/demo/test/search.test.json >test5.txt 2>&1 &
nohup python -u run.py --train --algo MCST --epochs 100 --gpu 3 --max_p_len 1000 --hidden_size 150 --train_files ../data/demo/trainset/search.train.json --dev_files ../data/demo/devset/search.dev.json --test_files ../data/demo/test/search.test.json >test_313.txt 2>&1 &
"""
def __init__(self, args, vocab):
self.vocab = vocab
# logging
self.logger = logging.getLogger("brc")
# basic config
self.algo = args.algo
self.hidden_size = args.hidden_size
self.optim_type = args.optim
self.learning_rate = args.learning_rate
self.weight_decay = args.weight_decay
self.use_dropout = args.dropout_keep_prob < 1
self.dropout_keep_prob = 1.0
# length limit
self.max_p_num = args.max_p_num
self.max_p_len = args.max_p_len
self.max_q_len = args.max_q_len
# self.max_a_len = args.max_a_len
self.max_a_len = 2
# test paras
self.search_time = 3
self.beta = 100.0
self._build_graph()
def _init_sub_tree(self, tree):
print '------- init sub tree :' + str(tree['tree_id']) + '---------'
start_node = 'question_' + str(tree['tree_id'])
mcts_tree = sub_tree(tree)
data_tree = Data_tree(mcts_tree, start_node)
data_tree.num_of_search += 1
return data_tree
def _do_init_tree_job(self, lock, trees_to_accomplish, trees_that_are_done, log):
while True:
try:
'''
try to get task from the queue. get_nowait() function will
raise queue.Empty exception if the queue is empty.
queue(False) function would do the same task also.
'''
with lock:
tree = trees_to_accomplish.get_nowait()
except Queue.Empty:
break
else:
'''
if no exception has been raised, add the task completion
message to task_that_are_done queue
'''
# result = self._init_sub_tree(tree)
print '------- init sub tree :' + str(tree['tree_id']) + '---------'
start_node = 'question_' + str(tree['tree_id'])
mcts_tree = sub_tree(tree)
data_tree = Data_tree(mcts_tree, start_node)
data_tree.num_of_search += 1
lock.acquire()
try:
log.put(str(tree['tree_id']) + ' is done by ' + str(mp.current_process().name))
trees_that_are_done.put(data_tree)
finally:
lock.release()
# time.sleep(.5)
return True
def _search_sub_tree(self, data_tree):
sub_tree = data_tree.tree
# print '------- search sub tree :' + str(sub_tree.q_id) + '---------'
start_node_id = data_tree.start_node
data_tree.num_of_search += 1
data_tree.select_list = [start_node_id]
tmp_node = sub_tree.tree.get_node(start_node_id)
while not tmp_node.is_leaf():
max_score = float("-inf")
max_id = -1
for child_id in tmp_node.fpointer:
child_node = sub_tree.tree.get_node(child_id)
# score = child_node.data.p
score = self.beta * child_node.data.p * ((1 + sub_tree.count) / (1 + child_node.data.num))
if score > max_score:
max_id = child_id
max_score = score
data_tree.select_list.append(max_id)
tmp_node = sub_tree.tree.get_node(max_id)
data_tree.tmp_node = tmp_node
return data_tree
def _do_search_tree_job(self, lock, trees_to_accomplish, trees_that_are_done, log):
while True:
try:
'''
try to get task from the queue. get_nowait() function will
raise queue.Empty exception if the queue is empty.
queue(False) function would do the same task also.
'''
with lock:
data_tree = trees_to_accomplish.get_nowait()
except Queue.Empty:
break
else:
'''
if no exception has been raised, add the task completion
message to task_that_are_done queue
'''
# result = self._search_sub_tree(tree)
sub_tree = data_tree.tree
# print '------- search sub tree :' + str(sub_tree.q_id) + '---------'
start_node_id = data_tree.start_node
data_tree.num_of_search += 1
data_tree.select_list = [start_node_id]
tmp_node = sub_tree.tree.get_node(start_node_id)
while not tmp_node.is_leaf():
max_score = float("-inf")
max_id = -1
for child_id in tmp_node.fpointer:
child_node = sub_tree.tree.get_node(child_id)
# score = child_node.data.p
score = self.beta * child_node.data.p * ((1 + sub_tree.count) / (1 + child_node.data.num))
if score > max_score:
max_id = child_id
max_score = score
data_tree.select_list.append(max_id)
tmp_node = sub_tree.tree.get_node(max_id)
data_tree.tmp_node = tmp_node
lock.acquire()
try:
log.put(str(data_tree.tmp_node) + ' is selected by ' + str(mp.current_process().name))
# print str(data_tree.tmp_node) + ' is selected by ' + str(mp.current_process().name)
trees_that_are_done.put(data_tree)
finally:
lock.release()
return True
def _aciton_tree(self, data_tree):
start_node = data_tree.start_node
tmp_policy = self._get_policy(data_tree)
# print (tmp_policy.values())
# print (sum(tmp_policy.values()))
prob, select_word_id, start_node = self._take_action(data_tree)
data_tree.p_data.append(prob)
data_tree.listSelectedSet.append(select_word_id)
return data_tree
def _do_tree_action_job(self, lock, trees_to_accomplish, action_result_queue, log):
while True:
try:
'''
try to get task from the queue. get_nowait() function will
raise queue.Empty exception if the queue is empty.
queue(False) function would do the same task also.
'''
lock.acquire()
try:
data_tree = trees_to_accomplish.get_nowait()
finally:
lock.release()
except Queue.Empty:
break
else:
'''
if no exception has been raised, add the task completion
message to task_that_are_done queue
'''
# result = self._aciton_tree(tree)
# result = tree
prob, select_word_id, start_node = self._take_action(data_tree)
data_tree.start_node = start_node
data_tree.p_data.append(prob)
# print ('data_tree.listSelectedSet',data_tree.listSelectedSet)
data_tree.listSelectedSet.append(select_word_id)
lock.acquire()
try:
log.put(str(data_tree.listSelectedSet) + ' is list of action choosen by ' + str(
mp.current_process().name))
action_result_queue.put(data_tree)
finally:
lock.release()
return True
def feed_in_batch(self, tree_batch, parallel_size, feed_dict):
self.tree_batch = tree_batch
self.para_size = parallel_size
self.batch_size = len(self.tree_batch['tree_ids'])
# self.feed_dict = feed_dict
def tree_search(self):
trees = []
test_tf()
# 1)initialize trees
for bitx in range(self.batch_size):
# print '-------------- yeild ' + str(bitx) + '-------------'
if self.tree_batch['p_length'][bitx] > self.max_p_len:
# print '>>>>>>>>>>>>>>>> '
self.tree_batch['p_length'][bitx] = self.max_p_len
self.tree_batch['candidates'][bitx] = self.tree_batch['candidates'][bitx][:(self.max_p_len)] # ???
tree = {'tree_id': self.tree_batch['tree_ids'][bitx],
'question_token_ids': self.tree_batch['root_tokens'][bitx],
'passage_token_id': self.tree_batch['candidates'][bitx],
'q_length': self.tree_batch['q_length'][bitx],
'p_length': self.tree_batch['p_length'][bitx],
'question_type': self.tree_batch['question_type'][bitx],
'ref_answer': self.tree_batch['ref_answers'][bitx]
# 'mcst_model':self.tree_batch['mcst_model']
}
trees.append(tree)
print ('Max parallel processes size: ', self.para_size)
number_of_task = self.batch_size
number_of_procs = self.para_size
manager = mp.Manager()
trees_to_accomplish = manager.Queue()
trees_that_are_done = manager.Queue()
log = mp.Queue()
processes = []
lock = manager.Lock()
for i in trees:
trees_to_accomplish.put(i)
# creating processes
for w in range(number_of_procs):
p = mp.Process(target=self._do_init_tree_job, args=(lock, trees_to_accomplish, trees_that_are_done, log))
processes.append(p)
p.start()
# completing process
for p in processes:
p.join()
while not log.empty():
print(log.get())
# for i,p in enumerate(processes):
# if not p.is_alive():
# print ("[MAIN]: WORKER is a goner", i)
# init the root node and expand the root node
self.tree_list = []
init_list = []
while not trees_that_are_done.empty():
now_tree = trees_that_are_done.get()
now_tree.expand_node = now_tree.tree.tree.get_node(now_tree.tree.tree.root)
init_list.append(now_tree)
# self._init_root(now_tree)
# self.tree_list.append(now_tree)
# init_roots(init_list)
self.tree_list = self.expands(init_list)
# search tree
self.end_tree = []
for t in xrange(self.max_a_len):
print ('Answer_len', t)
if len(self.tree_list) == 0:
break
for s_time in range(self.search_time):
print ('search time', s_time)
# creating processes
processes_search = []
tree_search_queue = manager.Queue()
tree_result_queue = manager.Queue()
for tree in self.tree_list:
tree_search_queue.put(tree)
search_tree_list = []
for w in range(number_of_procs):
p = mp.Process(target=self._do_search_tree_job,
args=(lock, tree_search_queue, tree_result_queue, log))
processes_search.append(p)
p.start()
time.sleep(0.1)
while 1:
if not tree_result_queue.empty():
data_tree = tree_result_queue.get()
search_tree_list.append(data_tree)
if len(search_tree_list) == number_of_procs:
break
# time.sleep(0.1)
# completing process
for p in processes_search:
# p.join()
p.terminate()
while not log.empty():
print(log.get())
self.tree_list = []
# gather train data
self.tree_list = self._search_vv(search_tree_list)
tree_need_expand_list = []
tree_no_need_expand_list = []
for data_tree in self.tree_list:
data_tree_update = self._updates(data_tree)
tmp_node = data_tree_update.tmp_node
l_passage = data_tree_update.l_passage
word_id = int(tmp_node.data.word[-1])
if tmp_node.is_leaf() and (word_id < (l_passage - 1)):
data_tree_update.expand_node = tmp_node
tree_need_expand_list.append(data_tree_update)
else:
tree_no_need_expand_list.append(data_tree_update)
self.tree_list = self.expands(tree_need_expand_list)
self.tree_list.append(tree_no_need_expand_list)
# for data_tree in search_tree_list:
# value = self._search_v(data_tree)
# data_tree_update = self._update(data_tree, value)
# data_tree_update.tree.count += 1
# tmp_node = data_tree_update.tmp_node
# l_passage = data_tree_update.l_passage
# word_id = int(tmp_node.data.word[-1])
# if tmp_node.is_leaf() and (word_id < (l_passage-1)):
# tree_data = data_tree_update.tree.get_raw_tree_data()
# feed_dict = {self.p: [tree_data['passage_token_id']],
# self.q: [tree_data['question_token_ids']],
# self.p_length: [tree_data['p_length']],
# self.q_length: [tree_data['q_length']],
# self.dropout_keep_prob: 1.0}
# data_tree_update = self.expand(data_tree_update,tmp_node,feed_dict)
# self.tree_list.append(data_tree_update)
# take action
num_action_procs = 0
action_queue = manager.Queue()
action_result_queue = manager.Queue()
for tree in self.tree_list:
# print ('######### tree.listSelectedSet: ', tree.listSelectedSet)
# print ('num ', tree.num_of_search)
if not len(tree.listSelectedSet) == 0:
last_word = tree.listSelectedSet[-1]
if not last_word == (tree.l_passage - 1):
action_queue.put(tree)
num_action_procs += 1
else:
self.end_tree.append(tree)
else:
action_queue.put(tree)
num_action_procs += 1
action_tree_list = []
processes_action = []
# print ('###start take action ')
# print ('len(self.tree_list)', len(self.tree_list))
'''
for w in range(num_action_procs):
#print (w, w)
p = mp.Process(target=self._do_tree_action_job, args=(lock, action_queue, action_result_queue, log))
processes_action.append(p)
p.start()
#time.sleep(0.1)
# completing process
while 1:
#time.sleep(0.1)
if not action_result_queue.empty():
data_tree = action_result_queue.get()
action_tree_list.append(data_tree)
if len(action_tree_list) == num_action_procs:
break
for p in processes_action:
p.terminate()
while not log.empty():
print(log.get())
self.tree_list = action_tree_list
for selection in action_tree_list:
print ('selection', selection.listSelectedSet)
for t in self.tree_list:
self.end_tree.append(t)
print ('----end tree:', len(self.end_tree))
#create nodes --->search until finish ----
pred_answers,ref_answers = [],[]
for tree in self.tree_list:
p_words_list = tree.words_id_list
listSelectedSet_words = []
listSelectedSet = map(eval, tree.listSelectedSet)
for idx in listSelectedSet:
listSelectedSet_words.append(p_words_list[idx])
# print 'listSelectedSet:'
#print listSelectedSet
# print 'listSelectedSet_words: '
# print listSelectedSet_words'
strr123 = self.vocab.recover_from_ids(listSelectedSet_words, 0)
# print strr123
pred_answers.append({'question_id': tree.q_id,
'question_type': tree.q_type,
'answers': [''.join(strr123)],
'entity_answers': [[]],
'yesno_answers': []})
ref_answers.append({'question_id': tree.q_id,
'question_type': tree.q_type,
'answers': tree.ref_answer,
'entity_answers': [[]],
'yesno_answers': []})
#print 'pred_answer: '
#print pred_answers
#print 'ref_answers: '
#print ref_answers
if len(ref_answers) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
#print '========compare======='
#print pred_dict[question_id]
#print '----------------------'
#print ref_dict[question_id]
#print '========compare 2======='
#print pred_dict
#print '----------------------'
#print ref_dict
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
value_with_mcts = bleu_rouge
print 'bleu_rouge(value_with_mcts): '
print value_with_mcts
for tree in self.end_tree:
tree_data = tree.tree.get_raw_tree_data()
input_v = value_with_mcts['Bleu-4']
total_num, total_loss = 0, 0
log_every_n_batch, n_batch_loss = 3, 0
for prob_id, prob_data in enumerate(tree.p_data):
# print 'p_data: '
# print prob_id
# print prob_data
c = []
policy = []
for prob_key, prob_value in prob_data.items():
c.append(prob_key)
policy.append(prob_value)
print 'policy: '
print [policy]
print 'value: '
print [value_with_mcts]
print 'candidate: '
print c
print 'listSelectedSet[:prob_id]'
print listSelectedSet[:prob_id]
input_v = value_with_mcts['Rouge-L']
feed_dict = {self.p: [tree_data['passage_token_id']],
self.q: [tree_data['question_token_ids']],
self.p_length: [tree_data['p_length']],
self.q_length: [tree_data['q_length']],
self.dropout_keep_prob: 1.0}
if prob_id == 0:
feeddict = dict(feed_dict.items() + {self.policy: [policy], self.v: [[input_v]]}.items())
loss_first = self.sess.run([self.loss_first], feed_dict=feeddict)
print('loss,first', loss_first)
else:
feeddict = dict(feed_dict.items() + {self.selected_id_list: [listSelectedSet[:prob_id]], self.candidate_id: [c],
self.policy: [policy],
self.v: [[input_v]]}.items())
loss = self.sess.run([self.loss], feed_dict=feeddict)
print('loss',loss)
total_loss += loss * len(self.end_tree)
total_num += len(self.end_tree)
n_batch_loss += loss
if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:
self.logger.info('Average loss from batch {} to {} is {}'.format(
bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))
n_batch_loss = 0
return 1.0 * total_loss / total_num
'''
return 0
def _init_root(self, now_tree):
tree = now_tree.tree
tree_data = tree.get_raw_tree_data()
# print ('start_node ', start_node)
feed_dict = {self.p: [tree_data['passage_token_id']],
self.q: [tree_data['question_token_ids']],
self.p_length: [tree_data['p_length']],
self.q_length: [tree_data['q_length']],
self.dropout_keep_prob: 1.0}
leaf_node = tree.tree.get_node(tree.tree.root)
self.expand(now_tree, leaf_node, feed_dict)
def expands(self, tree_list):
print ('========== start expands ==============')
p_feed = []
q_feed = []
p_lenth_feed = []
q_length_feed = []
words_list_list = []
l_passage_list = []
policy_need_list = []
for t_idx, data_tree in enumerate(tree_list, 0):
tree_data = data_tree.tree.get_raw_tree_data()
word_list = data_tree.expand_node.data.word
l_passage = data_tree.l_passage
print ('1word_list', word_list)
if (len(word_list) == 0):
data_tree = self._get_init_policy(data_tree, l_passage)
else:
p_feed.append(tree_data['passage_token_id'])
q_feed.append(tree_data['question_token_ids'])
p_lenth_feed.append(tree_data['p_length'])
q_length_feed.append(tree_data['q_length'])
words_list_list.append(data_tree.expand_node.data.word)
l_passage_list.append(data_tree.l_passage)
policy_need_list.append(t_idx)
if not (len(p_feed) == 0):
feed_dict = {self.p: p_feed,
self.q: q_feed,
self.p_length: p_lenth_feed,
self.q_length: q_length_feed,
self.dropout_keep_prob: 1.0}
policy_ids, policys = self._cal_policys(words_list_list, l_passage_list, feed_dict)
for p_idx, t_idx in enumerate(policy_need_list, 0):
tree_list[t_idx].p_pred = policys[p_idx]
tree_list[t_idx].p_word_id = policys[p_idx]
for d_tree in tree_list:
print ('d_tree.p_pred ', np.shape(d_tree.p_pred))
print ('d_tree.p_word_id', np.shape(d_tree.p_word_id))
leaf_node = d_tree.expand_node
words_list = leaf_node.data.word
print ('words_list', words_list)
for word in d_tree.p_word_id:
d_tree.tree.node_map[' '.join(words_list + [str(word)])] = len(d_tree.tree.node_map)
new_node = node()
new_node.word = words_list + [str(word)]
idx = d_tree.p_word_id.index(word)
new_node.p = d_tree.p_pred[idx]
# print 'new_node.p ' + str(new_node.p)
d_tree.tree.tree.create_node(identifier=d_tree.tree.node_map[' '.join(new_node.word)], data=new_node,
parent=leaf_node.identifier)
print ('========== end expands ==============')
return tree_list
def _search_v(self, data_tree):
tree = data_tree.tree
tree_data = tree.get_raw_tree_data()
feed_dict = {self.p: [tree_data['passage_token_id']],
self.q: [tree_data['question_token_ids']],
self.p_length: [tree_data['p_length']],
self.q_length: [tree_data['q_length']],
self.dropout_keep_prob: 1.0}
l_passage = tree_data['p_length']
tmp_node = data_tree.tmp_node
# print('tmp_node', str(tmp_node))
# print('tmp_node.data.word[-1]: ', int(tmp_node.data.word[-1]))
# print('l_passage', l_passage)
word_id = int(tmp_node.data.word[-1])
words_list = tmp_node.data.word
if (word_id == (l_passage - 1)):
v = 0
pred_answer = tmp_node.data.word
# print 'pred_answer: '
# print pred_answer
# print 'listSelectedSet'
listSelectedSet_words = []
listSelectedSet = map(eval, pred_answer)
# print listSelectedSet
for idx in listSelectedSet:
listSelectedSet_words.append(data_tree.words_id_list[idx])
# print 'str123'
str123 = self.vocab.recover_from_ids(listSelectedSet_words, 0)
# print str123
pred_answers = []
ref_answers = []
pred_answers.append({'question_id': data_tree.q_id,
'question_type': data_tree.q_type,
'answers': [''.join(str123)],
'entity_answers': [[]],
'yesno_answers': []})
ref_answers.append({'question_id': data_tree.q_id,
'question_type': data_tree.q_type,
'answers': data_tree.ref_answer,
'entity_answers': [[]],
'yesno_answers': []})
print '****tree_search'
# print 'pred_answer: '
# print pred_answers
# print 'ref_answers: '
# print ref_answers
if len(data_tree.ref_answer) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
# print '========compare in tree======='
# print pred_dict[question_id]
# print '----------------------'
# print ref_dict[question_id]
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
# print 'bleu_rouge'
# print bleu_rouge
v = bleu_rouge['Bleu-4']
print ('v: ', v)
else:
v = self._cal_value(words_list, feed_dict)
return v
def _get_init_policy(self, data_tree, l_passage):
print('&&&&&&&&& start init_policy &&&&&&&&')
tree = data_tree.tree
tree_data = tree.get_raw_tree_data()
feed_dict = {self.p: [tree_data['passage_token_id']],
self.q: [tree_data['question_token_ids']],
self.p_length: [tree_data['p_length']],
self.q_length: [tree_data['q_length']],
self.dropout_keep_prob: 1.0}
print ('length of passage', tree_data['p_length'])
print ('length of padding passage', len(tree_data['passage_token_id']))
print ('padding', tree_data['passage_token_id'][-1])
data_tree.p_pred = self.sess.run(self.prob_first, feed_dict=feed_dict)
data_tree.p_word_id = [i for i in range(l_passage)]
print('&&&&&&&&& end init_policy &&&&&&&&')
return data_tree
def _get_init_value(self, data_tree):
print('$$$$$$$ start init_value $$$$$$$$$')
tree = data_tree.tree
tree_data = tree.get_raw_tree_data()
feed_dict = {self.p: [tree_data['passage_token_id']],
self.q: [tree_data['question_token_ids']],
self.p_length: [tree_data['p_length']],
self.q_length: [tree_data['q_length']],
self.dropout_keep_prob: 1.0}
value_p = self.sess.run(self.value_first, feed_dict=feed_dict)
print ('_get_init_value', value_p)
print('$$$$$$$ end init_value $$$$$$$$$')
return value_p
def _search_vv(self, search_tree_list):
print('-------------------- start search_vv -----------------------')
value_id_list = []
p_feed = []
q_feed = []
p_lenth_feed = []
q_length_feed = []
words_list_list = []
for t_id, data_tree in enumerate(search_tree_list, 0):
tree_data = data_tree.tree.get_raw_tree_data()
tmp_node = data_tree.tmp_node
word_id = int(tmp_node.data.word[-1])
l_passage = tree_data['p_length']
words_list = tmp_node.data.word
if len(words_list) == 0:
data_tree.value = self._get_init_value(data_tree)
else:
print ('word_id', word_id)
if (word_id == (l_passage - 1)):
v = 0
pred_answer = tmp_node.data.word
listSelectedSet_words = []
listSelectedSet = map(eval, pred_answer)
# print listSelectedSet
for idx in listSelectedSet:
listSelectedSet_words.append(data_tree.words_id_list[idx])
str123 = self.vocab.recover_from_ids(listSelectedSet_words, 0)
pred_answers = []
ref_answers = []
pred_answers.append({'question_id': data_tree.q_id,
'question_type': data_tree.q_type,
'answers': [''.join(str123)],
'entity_answers': [[]],
'yesno_answers': []})
ref_answers.append({'question_id': data_tree.q_id,
'question_type': data_tree.q_type,
'answers': data_tree.ref_answer,
'entity_answers': [[]],
'yesno_answers': []})
print '****tree_search'
if len(data_tree.ref_answer) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
v = bleu_rouge['Bleu-4']
print ('v: ', v)
data_tree.v = v
else:
p_feed.append(np.array(tree_data['passage_token_id']))
q_feed.append(np.array(tree_data['question_token_ids']))
p_lenth_feed.append(np.array(tree_data['p_length']))
q_length_feed.append(np.array(tree_data['q_length']))
words_list_list.append(words_list)
value_id_list.append(t_id)
if not (len(p_feed)) == 0:
self.feed_dict = {self.p: p_feed,
self.q: q_feed,
self.p_length: p_lenth_feed,
self.q_length: q_length_feed,
self.dropout_keep_prob: 1.0}
values = self._cal_values(words_list_list, self.feed_dict)
for t_idx, v_idx in enumerate(value_id_list, 0):
search_tree_list[v_idx].value = values[t_idx]
print('---------------------- end search_vv -----------------------')
return search_tree_list
def _cal_values(self, words_list_list, feeddict):
fd_words_list = []
seq_length = []
for idx, words_list in enumerate(words_list_list, 0):
words_list = map(eval, words_list)
tp = []
for word in words_list:
tp = np.array([idx, word])
fd_words_list.append(tp)
seq_length.append(np.array(len(words_list)))
fd_words_list = np.array(fd_words_list)
seq_length = np.array(seq_length)
print ('fd_words_list', fd_words_list)
print ('shape : ', np.shape(fd_words_list))
print ('seq_length', seq_length)
print ('shape : ', np.shape(seq_length))
print feeddict
# print ('seq_length', seq_length)
feed_dict = dict({self.selected_id_list: fd_words_list, self.seq_length: seq_length,
self.selected_batch_size: len(seq_length)}.items() + feeddict.items())
values = self.sess.run(self.value, feed_dict=feed_dict)
# feed_dict = dict({self.seq_length: seq_length}.items() + feeddict.items())
# values = self.sess.run(self.shape_a, feed_dict=feed_dict)
print ('values', values)
return values
def _update(self, data_tree, value):
node_list = data_tree.select_list
for node_id in node_list:
tmp_node = data_tree.tree.tree.get_node(node_id)
tmp_node.data.Q = (tmp_node.data.Q * tmp_node.data.num + value) / (tmp_node.data.num + 1)
tmp_node.data.num += 1
return data_tree
def _updates(self, data_tree):
node_list = data_tree.select_list
value = data_tree.value
for node_id in node_list:
tmp_node = data_tree.tree.tree.get_node(node_id)
tmp_node.data.Q = (tmp_node.data.Q * tmp_node.data.num + value) / (tmp_node.data.num + 1)
tmp_node.data.num += 1
data_tree.tree.count += 1
return data_tree
def _get_policy(self, data_tree):
sub_tree = data_tree.tree
start_node_id = data_tree.start_node
tmp_node = sub_tree.tree.get_node(start_node_id)
max_time = -1
prob = {}
for child_id in tmp_node.fpointer:
child_node = sub_tree.tree.get_node(child_id)
if sub_tree.count == 0:
prob[child_node.data.word[-1]] = 0.0
else:
prob[child_node.data.word[-1]] = child_node.data.num / sub_tree.count
return prob
def expand(self, data_tree, leaf_node, feed_dict):
print '======= expand: '
words_list = leaf_node.data.word
print 'word_list:'
print words_list
l_passage = data_tree.l_passage
tree = data_tree.tree
p_word_id, p_pred = self._cal_policy(words_list, l_passage, feed_dict)
# print 'candidate_id: '
# print p_word_id
for word in p_word_id:
tree.node_map[' '.join(words_list + [str(word)])] = len(tree.node_map)
new_node = node()
new_node.word = words_list + [str(word)]
new_node.p = p_pred[p_word_id.index(word)]
# print 'new_node.p ' + str(new_node.p)
tree.tree.create_node(identifier=tree.node_map[' '.join(new_node.word)], data=new_node,
parent=leaf_node.identifier)
data_tree.tree = tree
return data_tree
def _take_action(self, data_tree):
sub_tree = data_tree.tree
start_node_id = data_tree.start_node
tmp_node = sub_tree.tree.get_node(start_node_id)
max_time = -1
prob = {}
for child_id in tmp_node.fpointer:
child_node = sub_tree.tree.get_node(child_id)
prob[child_node.data.word[-1]] = child_node.data.num / sub_tree.count
if child_node.data.num > max_time:
max_time = child_node.data.num
select_word = child_node.data.word[-1]
select_word_node_id = child_node.identifier
return prob, select_word, select_word_node_id
def _cal_policy(self, words_list, l_passage, feeddict, indx=0):
max_id = float('-inf')
policy_c_id = []
words_list = map(eval, words_list)
fd_words_list, fd_policy_c_id = [], []
for word in words_list:
fd_words_list.append([indx, word])
for can in words_list:
max_id = max(can, max_id)
for idx in range(l_passage):
if idx > max_id:
policy_c_id.append(idx)
for word in policy_c_id:
fd_policy_c_id.append([indx, word])
# print ('fd_policy_c_id',len(fd_policy_c_id))
if len(words_list) == 0:
p_pred = self.sess.run(self.prob_first, feed_dict=feeddict)
else:
feed_dict = dict(
{self.selected_id_list: fd_words_list, self.candidate_id: fd_policy_c_id}.items() + feeddict.items())
print feed_dict
p_pred = self.sess.run(self.prob, feed_dict=feed_dict)
return policy_c_id, p_pred
def _policy_padding(self, padding_list, seq_length_list):
padding_length = 0
for length in seq_length_list:
padding_length = max(padding_length, length)
# print('padding_length',padding_length)
for idx, sub_list in enumerate(padding_list, 0):
# you yade gaixi
# print ('len(sub_list)', len(sub_list))
padding = [sub_list[-1][0], (sub_list[-1][1] + 1)]
# print ('padding',padding)
rangee = padding_length - seq_length_list[idx]
print rangee
for i in range(rangee):
sub_list.append(padding)
for sub_list in padding_list:
assert len(sub_list) == padding_length
return padding_list
def _cal_policys(self, words_list_list, l_passage_list, feeddict):
policy_c_id_list = []
fd_words_list = []
seq_length_list = []
candidate_length_list = []
fd_policy_c_id_list = []
for idx, words_list in enumerate(words_list_list, 0):
max_id = float('-inf')
policy_c_id = []
words_list = map(eval, words_list)
tmp = []
for word in words_list:
tmp.append([idx, word])
fd_words_list.append(tmp)
seq_length_list.append(len(words_list))
for can in words_list:
max_id = max(can, max_id)
for i in range(l_passage_list[idx]):
if i > max_id:
policy_c_id.append(i)
candidate_length_list.append(len(policy_c_id))
policy_c_id_list.append(policy_c_id)
tmp2 = []
for word in policy_c_id:
tmp2.append([idx, word])
fd_policy_c_id_list.append(tmp2)
print ('start_padding', candidate_length_list)
fd_policy_c_id_list = self._policy_padding(fd_policy_c_id_list, candidate_length_list)
print ('fd_words_list', fd_words_list)
print ('shape', np.shape(fd_words_list))
print ('fd_policy_c_id_list', fd_policy_c_id_list)
print ('shape', np.shape(fd_policy_c_id_list))
selected_batch_size = len(fd_words_list)
candidate_batch_size = [len(fd_policy_c_id_list), 1, 1]
feed_dict = dict(
{self.selected_id_list: fd_words_list, self.candidate_id: fd_policy_c_id_list,
self.selected_batch_size: selected_batch_size, self.candidate_batch_size: candidate_batch_size}.items()
+ feeddict.items())
# print feed_dict
print
# shape_a, shape_b = self.sess.run([self.shape_a,self.shape_b],feed_dict = feed_dict)
# print ('shape_a',shape_a)
# print ('shape_b',shape_b)
can = self.sess.run(self.can, feed_dict=feed_dict)
print ('can', can)
print ('shape of can ', np.shape(can))
c_pred = can
# c_pred = self.sess.run(self.prob, feed_dict=feed_dict)
return policy_c_id_list, c_pred
def _cal_value(self, words_list, feeddict, indx=0):
words_list = map(eval, words_list)
fd_words_list = []
for word in words_list:
fd_words_list.append([indx, word])
if len(words_list) == 0:
value_p = self.sess.run(self.value_first, feed_dict=feeddict)
else:
feed_dict = dict({self.selected_id_list: fd_words_list}.items() + feeddict.items())
value_p = self.sess.run(self.value, feed_dict=feed_dict)
return value_p
def _build_graph(self):
"""
Builds the computation graph with Tensorflow
"""
# session info
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
start_t = time.time()
self._setup_placeholders()
self._embed()
self._encode()
self._initstate()
self._action_frist()
self._action()
# self._compute_loss()
# param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])
# self.logger.info('There are {} parameters in the model'.format(param_num))
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))
def _setup_placeholders(self):
"""
Placeholders
"""
self.p = tf.placeholder(tf.int32, [None, None])
self.q = tf.placeholder(tf.int32, [None, None])
self.p_length = tf.placeholder(tf.int32, [None])
self.q_length = tf.placeholder(tf.int32, [None])
self.dropout_keep_prob = tf.placeholder(tf.float32)
# test
# self.p_words_id = tf.placeholder(tf.int32, [None,None])
self.candidate_id = tf.placeholder(tf.int32, None)
self.seq_length = tf.placeholder(tf.int32, [None])
self.selected_batch_size = tf.placeholder(tf.int32, None)
self.candidate_batch_size = tf.placeholder(tf.int32, None)
# self.words = tf.placeholder(tf.float32, [None, None])
self.selected_id_list = tf.placeholder(tf.int32, None)
self.policy = tf.placeholder(tf.float32, [1, None]) # policy
self.v = tf.placeholder(tf.float32, [1, 1]) # value
def _embed(self):
"""
The embedding layer, question and passage share embeddings
"""
# with tf.device('/cpu:0'), tf.variable_scope('word_embedding'):
with tf.variable_scope('word_embedding'):
self.word_embeddings = tf.get_variable(
'word_embeddings',
shape=(self.vocab.size(), self.vocab.embed_dim),
initializer=tf.constant_initializer(self.vocab.embeddings),
trainable=True
)
self.p_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)
self.q_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)
def _encode(self):
"""
Employs two Bi-LSTMs to encode passage and question separately
"""
with tf.variable_scope('passage_encoding'):
self.p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)
with tf.variable_scope('question_encoding'):
_, self.sep_q_encodes = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)
# self.sep_q_encodes,_ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)
if self.use_dropout:
self.p_encodes = tf.nn.dropout(self.p_encodes, self.dropout_keep_prob)
self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)
def _initstate(self):
self.V = tf.Variable(
tf.random_uniform([self.hidden_size * 2, self.hidden_size * 2], -1. / self.hidden_size,
1. / self.hidden_size))
self.W = tf.Variable(
tf.random_uniform([self.hidden_size * 2, 1], -1. / self.hidden_size, 1. / self.hidden_size))
self.W_b = tf.Variable(tf.random_uniform([1, 1], -1. / self.hidden_size, 1. / self.hidden_size))
self.V_c = tf.Variable(
tf.random_uniform([self.hidden_size * 2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size))
self.V_h = tf.Variable(
tf.random_uniform([self.hidden_size * 2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size))
self.q_state_c = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_c))
self.q_state_h = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_h))
self.q_state = tf.concat([self.q_state_c, self.q_state_h], 1)
self.words = tf.reshape(self.p_encodes, [-1, self.hidden_size * 2])
def _action_frist(self):
"""
select first word
"""
# self.candidate = tf.reshape(self.p_emb,[-1,self.hidden_size*2])
self.VV = tf.expand_dims(self.V, 0)
self.w = tf.matmul(self.p_encodes, self.VV)
self.t_q_state = tf.expand_dims(tf.transpose(self.q_state), 0)
################# this place if it is duo jincheng gaizenme zuo #############
self.tmp = tf.matmul(self.w, self.t_q_state)
self.logits_first = tf.reshape(self.tmp, [-1])
self.prob_first = tf.nn.softmax(self.logits_first)
self.prob_id_first = tf.argmax(self.prob_first)
self.value_first = tf.sigmoid(tf.reshape(tf.matmul(self.q_state, self.W), [1, 1]) + self.W_b) # [1,1]
def _action(self):
"""
Employs Bi-LSTM again to fuse the context information after match layer
"""
# self.selected_id_list = tf.expand_dims(self.selected_id_list, 0)
self.candidate = tf.gather_nd(self.p_encodes, self.candidate_id)
self.shape_a = tf.shape(self.seq_length)
self.selected_list = tf.gather_nd(self.p_encodes, self.selected_id_list)
self.rnn_input = tf.reshape(self.selected_list, [self.selected_batch_size, -1, self.hidden_size * 2])
rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.hidden_size, state_is_tuple=False)
_, self.states = tf.nn.dynamic_rnn(rnn_cell, self.rnn_input, sequence_length=self.seq_length,
initial_state=self.q_state, dtype=tf.float32) # [1, dim]
self.value = tf.sigmoid(tf.matmul(self.states, self.W) + self.W_b) # [1,1]
# self.value = tf.sigmoid(tf.reshape(tf.matmul(self.states, self.W), [1, 1]) + self.W_b) # [1,1]
self.VVV = tf.tile(self.VV, self.candidate_batch_size)
self.can = tf.matmul(self.candidate, self.VVV)
# self.shape_a = tf.shape(self.can)
# self.shape_b = tf.shape(self.states)
# self.logits = tf.reshape(tf.matmul(tf.matmul(self.candidate, self.V), tf.transpose(self.states)), [-1])
#
# self.prob = tf.nn.softmax(self.logits)
# self.prob_id = tf.argmax(self.prob)
def _compute_loss(self):
"""
The loss function
"""
self.loss_first = tf.contrib.losses.mean_squared_error(self.v, self.value_first) - \
tf.matmul(self.policy,
tf.reshape(tf.log(tf.clip_by_value(self.prob_first, 1e-30, 1.0)), [-1, 1]))
self.optimizer_first = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss_first)
self.loss = tf.contrib.losses.mean_squared_error(self.v, self.value) - tf.matmul(self.policy, tf.reshape(
tf.log(tf.clip_by_value(self.prob, 1e-30, 1.0)), [-1, 1]))
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
self.all_params = tf.trainable_variables()
def _create_train_op(self):
"""
Selects the training algorithm and creates a train operation with it
"""
if self.optim_type == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif self.optim_type == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif self.optim_type == 'rprop':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
elif self.optim_type == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
else:
raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))
self.train_op = self.optimizer.minimize(self.loss)
if __name__ == '__main__':
1 == 1
# tree_search()
test_tf()
# tree_search() |
__author__ = 'Ben'
def greeting(msg):
print(msg) |
import os
import torch
import torch.distributed as dist
def _redefine_print(is_main):
"""disables printing when not in main process"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_main or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def setup_ddp(args):
# Set the local_rank, rank, and world_size values as args fields
# This is done differently depending on how we're running the script. We
# currently support either torchrun or the custom run_with_submitit.py
# If you're confused (like I was), this might help a bit
# https://discuss.pytorch.org/t/what-is-the-difference-between-rank-and-local-rank/61940/2
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
args.world_size = 1
return
args.distributed = True
torch.cuda.set_device(args.gpu)
dist.init_process_group(
backend="nccl",
rank=args.rank,
world_size=args.world_size,
init_method=args.dist_url,
)
torch.distributed.barrier()
_redefine_print(is_main=(args.rank == 0))
def reduce_across_processes(val):
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t)
return t
|
UP_VOTES = 25
DOWN_VOTES = 25
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-21 03:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contract',
fields=[
('contract_id', models.AutoField(primary_key=True, serialize=False)),
('no_contract', models.CharField(blank=True, max_length=100)),
('start_contract', models.DateField()),
('end_contract', models.DateField()),
],
),
migrations.CreateModel(
name='Contracttype',
fields=[
('contract_type_id', models.AutoField(primary_key=True, serialize=False)),
('contract_type', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.AutoField(primary_key=True, serialize=False)),
('customer_name', models.CharField(max_length=100)),
('customer_ho_address', models.CharField(blank=True, max_length=100)),
('customer_contact', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Customersegment',
fields=[
('customer_segment_id', models.AutoField(primary_key=True, serialize=False)),
('customer_segment', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Lintasartaperson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('initial', models.CharField(max_length=3)),
('name', models.CharField(max_length=100)),
('email', models.CharField(blank=True, max_length=100)),
('telp', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('no_jaringan', models.CharField(max_length=10)),
('keterangan', models.TextField(max_length=500)),
('jasa', models.CharField(max_length=100)),
('jenis_kecepatan', models.CharField(blank=True, max_length=100)),
('kecepatan', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Partner',
fields=[
('partner_id', models.AutoField(primary_key=True, serialize=False)),
('partner_name', models.CharField(max_length=100)),
('partner_address', models.CharField(blank=True, max_length=100)),
('partner_contact_sales', models.CharField(blank=True, max_length=100)),
('partner_contact_support', models.CharField(blank=True, max_length=100)),
('partner_contact_manager', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('project_id', models.AutoField(primary_key=True, serialize=False)),
('project_name', models.CharField(max_length=100)),
('project_level', models.CharField(choices=[('c', 'Complex'), ('m', 'Medium'), ('s', 'Simple')], max_length=1)),
('scope_detail', models.TextField(max_length=1000)),
('scope_summary', models.TextField(max_length=500)),
('sla_availability', models.CharField(max_length=5)),
('sla_support_hour', models.CharField(choices=[('24x7', '24x7'), ('8x5', '8x5')], max_length=5)),
('sla_response_time', models.CharField(max_length=50)),
('sla_resolution_time', models.CharField(max_length=50)),
('descriptions', models.CharField(blank=True, max_length=100)),
('project_date_handover', models.DateField()),
('topology', models.ImageField(default='project/static/img/topo/no-img.jpg', upload_to='project/static/img/topo/')),
('contract_customer', models.CharField(max_length=500)),
('contract_partner', models.CharField(max_length=500)),
('purchase_order', models.CharField(max_length=500)),
('project_approval', models.CharField(max_length=500)),
('proposal_to_customer', models.CharField(max_length=500)),
('proposal_to_customer_sow', models.CharField(max_length=500)),
('proposal_to_customer_hld', models.CharField(max_length=500)),
('proposal_to_customer_boq', models.CharField(max_length=500)),
('proposal_from_partner', models.CharField(max_length=500)),
('proposal_from_partner_sow', models.CharField(max_length=500)),
('proposal_from_partner_boq', models.CharField(max_length=500)),
('implementation_document', models.CharField(max_length=500)),
('technical_data', models.CharField(max_length=500)),
('low_level_design', models.CharField(max_length=500)),
('business_case', models.CharField(max_length=500)),
('user_acceptance_test', models.CharField(max_length=500)),
('bast_to_customer', models.CharField(max_length=500)),
('partner_document', models.TextField(max_length=1000)),
('product_spesification', models.CharField(max_length=500)),
('bast_from_partner', models.CharField(max_length=500)),
('wi_operation', models.CharField(max_length=500)),
('wi_customer_care', models.CharField(max_length=500)),
('table_updated', models.DateField(auto_now=True)),
('table_creation_timestamp', models.DateField(auto_now_add=True)),
('car_lintasarta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='car_lintasarta', to='project.Lintasartaperson')),
('customer', models.ManyToManyField(to='project.Customer')),
('delivery_lintasarta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='delivery_lintasarta', to='project.Lintasartaperson')),
('partner', models.ManyToManyField(to='project.Partner')),
],
),
migrations.CreateModel(
name='Projectstatus',
fields=[
('project_status_id', models.AutoField(primary_key=True, serialize=False)),
('status', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='project',
name='project_status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Projectstatus'),
),
migrations.AddField(
model_name='project',
name='project_support_lintasarta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_support_lintasarta', to='project.Lintasartaperson'),
),
migrations.AddField(
model_name='project',
name='sa_lintasarta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sa_lintasarta', to='project.Lintasartaperson'),
),
migrations.AddField(
model_name='project',
name='sales_lintasarta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sales_lintasarta', to='project.Lintasartaperson'),
),
migrations.AddField(
model_name='order',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project'),
),
migrations.AddField(
model_name='customer',
name='customersegment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Customersegment'),
),
migrations.AddField(
model_name='contract',
name='contract_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Contracttype'),
),
migrations.AddField(
model_name='contract',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project'),
),
]
|
import os
import sys
# import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
import time
# Import Mask RCNN
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR) # To find local version of the library
# Root directory of the project
from samples.coco.coco import CocoConfig, CocoDataset, evaluate_coco
from mrcnn import model as modellib
import argparse
# Path to trained weights file
MODEL_PATH = os.path.join(ROOT_DIR, 'weights')
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2017"
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(description='Train Mask R-CNN on MS COCO.')
parser.add_argument('--dataset', required=True,
default='/specific/netapp5_2/gamir/DER-Roei/datasets/MSCoco',
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
parser.add_argument('--gpu', required=False,
default=0,
metavar="0, 1, ...",
help='GPU number ro run',
type=int)
parser.add_argument('--workers', required=False,
default=0,
metavar="0, 1, ...",
help='Number of workers',
type=int)
args = parser.parse_args()
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Auto Download: ", args.download)
print("GPU: ", args.gpu)
print("Number of Workers: ", args.workers)
# Define GPU training
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# Configurations
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on one image at a time.
# Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
POST_NMS_ROIS_INFERENCE = 100
config = InferenceConfig()
config.display()
# Create model
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = model.get_imagenet_weights()
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# # Save in a new locations
# stmp = time.strftime("%c").replace(" ", "_")
# model_path = os.path.join(MODEL_PATH, stmp)
# create_folder(model_path)
# model_path = os.path.join(model_path, stmp, "mask_rcnn.h5")
# Testing dataset
dataset_val = CocoDataset()
coco = dataset_val.load_coco(args.dataset, "val", year=args.year, return_coco=True,
auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
|
#!/usr/bin/python
from Combinator import Combinator
def run(filename):
f = open(filename, 'r')
case_count = int(f.readline())
case_list = []
for i in xrange(case_count):
candy_count = int(f.readline())
line = f.readline()
all_candies = [bin(int(value))[2:] for value in line.split()]
#print(all_candies)
result = process_case(all_candies)
print ('Case #%d: %s' % (i+1, result if result > 0 else 'NO'))
def process_case_v2(all_candies):
# algorithm:
# group candy by length of their value in bit
# for each group, find all combinations
# find all combinations of k items
# start from 1, until k > n (n is total number of items in the group)
# find combinations of all groups combined
# find the largest pile
return
def process_case(all_candies):
result = -1
max_k = len(all_candies) / 2
for i in xrange(max_k):
k = i + 1
lists = []
for i in xrange(k):
lists.append(all_candies)
c = Combinator(lists)
for left_pile in c.get_all_combinations():
right_pile = all_candies[:]
[right_pile.remove(candy) for candy in left_pile]
#print left_pile, patrick_sum(left_pile), right_pile, patrick_sum(right_pile)
if patrick_sum(left_pile) == patrick_sum(right_pile):
result = max(result, normal_sum(left_pile), normal_sum(right_pile))
return result
def normal_sum(list):
result = 0
for item in list:
result += int(item, base=2)
return result
def patrick_sum(list):
result = '0'
for item in list:
result = patrick_add(result, item)
return int(result, base=2)
def patrick_add(left, right):
left = list(left)
right = list(right)
size = max(len(left), len(right))
while(len(left) < size):
left.insert(0,'0')
while(len(right) < size):
right.insert(0,'0')
result = ''
for i in xrange(size):
result += '0' if left[i] == right[i] else '1'
return result
|
import pytest
import itertools
from src.card import Card, CardSuit, CardRank, PICTURES
def test_card_constructor():
card = Card(CardSuit.HEARTS, CardRank.TEN)
assert hasattr(card, 'suit')
assert hasattr(card, 'rank')
assert hasattr(card, 'trump')
assert getattr(card, 'suit') == CardSuit.HEARTS
assert getattr(card, 'rank') == CardRank.TEN
assert getattr(card, 'trump') is True
assert str(card) == '♥️10'
kwargs_card = Card(
suit=CardSuit.CLUBS,
rank=CardRank.QUEEN)
assert getattr(kwargs_card, 'suit') == CardSuit.CLUBS
assert getattr(kwargs_card, 'rank') == CardRank.QUEEN
assert getattr(kwargs_card, 'trump') is True
class TestCard:
# The whole test may only works on IntEnums
allcards = [
Card(suit, rank) for suit in CardSuit for rank in CardRank
]
trumps = (
[Card(CardSuit.HEARTS, CardRank.TEN)] +
[Card(suit, rank)
for rank in [CardRank.QUEEN, CardRank.JACK]
for suit in CardSuit if suit != CardSuit.DIAMONDS] +
[Card(CardSuit.DIAMONDS, rank)
for rank in CardRank]
)
nontrumps = [card for card in allcards if not card.trump]
def test_card_amount(self):
assert len(self.allcards) == 20
assert len(self.trumps) == 12
assert len(self.nontrumps) == 8
def test_trumps_correct(self):
for card in self.trumps:
assert card.trump is True
for card in self.nontrumps:
assert card not in self.trumps
def test_card_order(self):
heart_ten = Card(CardSuit.HEARTS, CardRank.TEN)
assert heart_ten == heart_ten
for card in self.allcards:
assert heart_ten >= card
assert card <= heart_ten
for trump in self.trumps:
for nontrump in self.nontrumps:
assert trump > nontrump
sorted_trumps = sorted(self.trumps)
for i in range(len(sorted_trumps)):
for j in range(i, len(sorted_trumps)):
assert i <= j
sorted_cards = sorted(self.allcards)
for card in sorted_cards[:8]:
assert card.trump is False
for card in sorted_cards[8:]:
assert card.trump is True
|
#!/usr/bin/env python
import getpass
import time
import json
import requests
from pprint import pprint
from prettytable import PrettyTable
import ngn_get
def virl_device_data():
response = requests.get('http://10.10.20.160:19399/roster/rest/', auth=('guest', 'guest'))
uglyjson = response.json()
dic_list = [v for k,v in uglyjson.items()]
onlyrouters_dic = [pair for pair in dic_list if 'PortConsole' in pair ]
finaldata = []
for pair in onlyrouters_dic:
temp_list = [value for key, value in pair.items() if key == 'PortConsole' or key == 'NodeSubtype' or key == 'NodeName' or key == 'managementIP']
finaldata.append(temp_list)
return(finaldata)
def prepare_lldp(device_info, user, pswd):
for device_list in device_info:
mgmt_ip = device_list[3]
nodename = device_list[0]
nodetype = device_list[1]
print("\n===== Connecting to {node} with ip {ip} =====\n".format(node=nodename, ip=mgmt_ip))
lldp = ngn_get.netmiko_xe_get_lldp_neigh(mgmt_ip, user, pswd)
lldp_configured = 'false'
if lldp == 'false':
print("lldp is not configured, let's do it")
commands = ['lldp run']
lldp_config = ngn_get.netmiko_xe_config_cmds(commands, mgmt_ip, user, pswd)
print('lldp was configured succesfully and the output as follows:\n')
print(lldp_config)
lldp_configured = 'true'
else:
print("See the lldp output as follows:\n")
print(lldp)
if lldp_configured == 'true':
lldp = ngn_get.netmiko_xe_get_lldp_neigh(mgmt_ip, user, pswd)
print("See the lldp output as follows:\n")
def main():
user = input("Enter your username: ")
pswd = getpass.getpass(prompt='Enter your password: ', stream=None)
device_info = virl_device_data()
prepare_lldp(device_info, user, pswd)
if __name__ == '__main__':
main()
|
from random import randint
class random_number:
def __init__(self, start: int, end: int):
self.start = start
self.end = end
def get_start(self):
return self.start
def get_end(self):
return self.end
def set_start(self, start: int):
self.start = start
def set_end(self, end):
self.end = end
def get_random_number(self):
return randint(self.start, self.end)
|
try:
import readline
except ImportError:
print "Module readline not available."
else:
import rlcompleter
readline.parse_and_bind("bind ^I rl_complete")
|
import sys
sys.path.append('..')
import numpy as np
#import chap5.transfer_function_coef as TF
import yaml
param_file = open('../params/tf_params.yaml','r')
params = yaml.load(param_file)
a_phi_1 = params.get('a_phi_1')
a_phi_2 = params.get('a_phi_2')
a_beta_1 = params.get('a_beta_1')
a_beta_2 = params.get('a_beta_2')
a_theta_1 = params.get('a_theta_1')
a_theta_2 = params.get('a_theta_2')
a_theta_3 = params.get('a_theta_3')
a_V_1 = params.get('a_V_1')
a_V_2 = params.get('a_V_2')
a_V_3 = params.get('a_V_3')
trim_state = params.get('trim_state')
trim_input = params.get('trim_input')
gravity = 9.8
#sigma =
Va0 = 25
#----------roll loop-------------
tr = 0.3
wn = 2.2 / tr
h = 0.807
roll_kp = 0.41 #(wn**2) / a_phi_2
roll_kd = 0.08 #(2*h*wn - a_phi_1) / a_phi_2
#----------course loop-------------
W_X = 9
wn = wn / W_X
h = 2.0
course_kp = 1.5 #2*h*wn * Va0/gravity
course_ki = 0.3 #(wn**2) * Va0/gravity
##----------sideslip loop-------------
#dr_max = 1.0
#e_max = 0.5
#h = 5.0
#
#sideslip_kp = dr_max / e_max
#wn = (a_beta_1 + a_beta_2*sideslip_kp) / (2*h)
#sideslip_ki = (wn**2) / a_beta_2
#----------yaw damper-------------
yaw_damper_tau_r = 0.05
yaw_damper_kp = 1
#----------pitch loop-------------
tr = 3.5
wn = 2.2 / tr
h = 5.007
#a_theta_3 *= -1
pitch_kp = -4.5 #(wn**2 - a_theta_2) / a_theta_3
pitch_kd = -0.70 #(2*h*wn - a_theta_1) / a_theta_3
K_theta_DC = (pitch_kp*a_theta_3) / (a_theta_2 + pitch_kp*a_theta_3)
#----------altitude loop-------------
W_h = 10
wn = wn / W_h
h = 0.707
altitude_kp = 0.05 #(2*h*wn) / (K_theta_DC*Va0)
altitude_ki = 0.011 #(wn**2) / (K_theta_DC*Va0)
altitude_zone = 2
#---------airspeed hold using throttle---------------
tr = 1
wn = 2.2 / tr
h = 5.0
airspeed_throttle_kp = 1.25 #(2*h*wn - a_V_1) / a_V_2
airspeed_throttle_ki = 0.35 #wn**2 / a_V_2
|
import tensorflow as tf
import os
from createBoardAndProgram import *
BOARD_SIZE_X = 12
BOARD_SIZE_Y = 12
LENGTH_OF_PROGRAM = 4
sess = None
# initialize variables/model parameters
W = tf.Variable(tf.zeros([16, 432]), name="weights")
b = tf.Variable(tf.zeros([16]), name="bias")
def init():
# your code to load the trained neural network into memory
global sess,W,b
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# verify if we don't have a checkpoint saved already
ckpt = tf.train.get_checkpoint_state("./data")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# define the training loop operations
def combine(X):
# combine data X with variables and return the result
matmul = tf.matmul(W, X, transpose_b = True)
transposedMatmul = tf.transpose(matmul)
return tf.reshape(transposedMatmul + b, [4,4])
def inference(X):
# compute inference model over data X and return the result
return tf.nn.softmax(combine(X))
# Obstacle board, start position board, goal position board
def solve(boxBoard, startBoard, goalBoard):
X = tf.reshape(tf.cast([boxBoard, startBoard, goalBoard], tf.float32), [1, 3 * BOARD_SIZE_X * BOARD_SIZE_Y])
return list(sess.run(tf.to_int32(tf.arg_max(inference(X), 1))))
def test():
board, startX, startY, goalX, goalY, program = createBoardAndProgram(BOARD_SIZE_X, BOARD_SIZE_Y, 10, 4)
startBoard = [[0] * BOARD_SIZE_X for i in range(BOARD_SIZE_Y)]
startBoard[startY][startX] = 1
goalBoard = [[0] * BOARD_SIZE_X for i in range(BOARD_SIZE_Y)]
goalBoard[goalY][goalX] = 1
program = solve(board, startBoard, goalBoard)
init()
#test()
|
# Write a Python program to select the odd items of a list
def oddFromList(listprovided):
j = 0
oddItems = []
while j < len(listprovided):
if listprovided[j]%2 != 0:
oddItems.append(listprovided[j])
j += 1
return oddItems
listprovided = [3,4,5,6,4,2,1,3,4,6,7,8]
output = oddFromList(listprovided)
print(output)
|
from django.apps import AppConfig
class CraweruiConfig(AppConfig):
name = 'crawerui'
|
#!/usr/bin/python
import argparse
import configparser
import importlib
import time
import os
import sys
from pyspark.sql import SparkSession
if os.path.exists('libs.zip'):
sys.path.insert(0, 'libs.zip')
else:
sys.path.insert(0, './libs')
if os.path.exists('jobs.zip'):
sys.path.insert(0, 'jobs.zip')
else:
sys.path.insert(0, './jobs')
config = configparser.ConfigParser()
config.read('../config/dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] = config.get('AWS', 'AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY'] = config.get('AWS', 'AWS_SECRET_ACCESS_KEY')
__author__ = 'dre'
def create_spark_session():
"""
Creates the spark session
:return: None
"""
# We configure spark to download the necessary hadoop-aws dependencies
# and se the fileoutputcommitter to 2 for better handling of writing data to s3
spark = SparkSession \
.builder \
.config("spark.jars.packages",
"org.apache.hadoop:hadoop-aws:2.7.0",
) \
.config("spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version", "2") \
.getOrCreate()
return spark
def main():
parser = argparse.ArgumentParser(description='Run a PySpark job')
parser.add_argument('--job', type=str, required=True, dest='job_name', help="The name of the job module you want "
"to run. (ex: poc will run job on "
"jobs.poc package)")
parser.add_argument('--input-data', type=str, required=True, dest='input_name',
help="The path to the directory that contains the input data")
parser.add_argument('--output-data', type=str, required=True, dest='output_name',
help="The path to the directory that contains the output data")
args = parser.parse_args()
print("Called with arguments: %s" % args)
print('\nRunning job %s...\ninput is %s\noutput is %s\n' % (args.job_name, args.input_name, args.output_name))
job_module = importlib.import_module('jobs.%s' % args.job_name)
start = time.time()
spark = create_spark_session()
# Call the job provided in the arguments
job_module.analyze(spark, args.input_name, args.output_name)
end = time.time()
print("\nExecution of job %s took %s seconds" % (args.job_name, end - start))
if __name__ == "__main__":
main()
|
def get_ancestors(node):
"""
Given a template node, returns an ordered list of all the nodes which are
above it in the template tree.
"""
node_path = []
parent = node.parent
while parent:
node_path.append(parent)
parent = parent.parent
node_path.reverse()
return node_path
|
"""
CSC148, Winter 2019
Assignment 1
Task 1 Tests
"""
import datetime
import pytest
from typing import List, Dict, Union
from application import create_customers, process_event_history
from visualizer import Visualizer
from task1_tests import create_customers_log, create_pure_log
from filter import CustomerFilter, DurationFilter, LocationFilter
phone_numbers = ['100-1200', '200-1200', '010-1020', '020-1020',
'001-1002', '002-1002', '100-2110', '010-2110',
'010-2011', '001-2011', '100-2101', '001-2101',
'100-3111', '010-3111', '001-3111']
x1 = -79.572504
x2 = -79.44713
x3 = -79.321756
y1 = 43.743916
y2 = 43.688264
y3 = 43.632611
loc = {1200: (x1, y1), 1020: (x2, y1), 1002: (x3, y1),
2110: (x1, y2), 2011: (x2, y2), 2101: (x3, y2),
3111: (x2, y3)}
def create_task4_log() -> Dict[str, List[Dict]]:
log = {}
log['events'] = []
event = {}
dates = ["2018-11-01", "2018-12-01", "2019-01-01"]
for i in range(3):
call_number = 1
three_calls_only = ['100-1200', '001-2101', '001-3111']
#Term, PrePaid, PrePaid
for src_phone in phone_numbers:
num_calls = 0
max_calls = 14
dur_lst = [[], [], []]
if src_phone == '100-1200': #Term
max_calls = 3
dur_lst = [[20, 30, 40], [50, 10, 40], [50, 60, 40]]
elif src_phone == '010-1020': #MTM
for j in range(14):
dur_lst[0].append(10 * j)
dur_lst[1].append(20 * j)
dur_lst[2].append(30 * j)
elif src_phone == '001-2101': #PrePaid
max_calls = 3
dur_lst = [[169, 800, 31], [1000, 931, 69], [500, 469, 531]]
elif src_phone == '001-3111': #PrePaid
max_calls = 3
dur_lst = [[69, 11, 20], [20, 10, 20], [69, 69, 12]]
for dst_phone in phone_numbers:
if src_phone != dst_phone:
dur = 60
if (src_phone in three_calls_only) and num_calls >= max_calls:
break
elif src_phone in three_calls_only:
dur = dur_lst[i][num_calls] * 60
num_calls += 1
elif src_phone == '200-1200' or src_phone == '100-2101':
dur = 65 #Term
elif src_phone == '100-3111':
dur = 10 * 60 #Term
elif src_phone == '010-1020':
dur = dur_lst[i][num_calls] * 60 #MTM
num_calls += 1
sec = str(call_number % 60)
min = str(call_number // 60)
if len(sec) == 1:
sec = '0' + sec
if len(min) == 1:
min = '0' + min
event['type'] = 'call'
event['src_number'] = src_phone
event['dst_number'] = dst_phone
event['time'] = f'{dates[i]} 01:{min}:{sec}'
event['duration'] = dur
event['src_loc'] = loc[int(src_phone[4:])]
event['dst_loc'] = loc[int(dst_phone[4:])]
log['events'].append(event.copy())
call_number += 1
log['customers'] = create_customers_log()
return log
def test_customer_filter() -> None:
log = create_pure_log()
customers = create_customers(log)
process_event_history(log, customers)
all_calls = []
for c in customers:
hist = c.get_history()
all_calls.extend(hist[0])
fil = CustomerFilter()
invalid_inputs = ['', 'dskljgdf', '69.69', 'd1200', 'L200',
'-79.6, 43.3, -79.5, 43.4', '3690', ' ']
for input in invalid_inputs:
filtered = fil.apply(customers, all_calls, input)
assert filtered == all_calls
line_of_customer = ['100-2101', '001-2101']
filtered = fil.apply(customers, all_calls, '2101')
for call in filtered:
assert call.src_number in line_of_customer \
or call.dst_number in line_of_customer
assert loc[2101] == call.src_loc or loc[2101] == call.dst_loc
def test_duration_filter() -> None:
log = create_task4_log()
customers = create_customers(log)
process_event_history(log, customers)
all_calls = []
for c in customers:
hist = c.get_history()
all_calls.extend(hist[0])
fil = DurationFilter()
invalid_inputs = ['', 'LG40', 'l50', 'g65', '50', 'sdklfjeind', ' ']
for input in invalid_inputs:
filtered = fil.apply(customers, all_calls, input)
assert filtered == all_calls
filtered = fil.apply(customers, all_calls, 'L60')
for call in filtered:
assert call.duration < 60
filtered = fil.apply(customers, filtered, 'G60')
assert filtered == []
filtered = fil.apply(customers, all_calls, 'G5400')
for call in filtered:
assert call.duration > 5400
def test_location_filter() -> None:
log = create_pure_log()
customers = create_customers(log)
process_event_history(log, customers)
all_calls = []
for c in customers:
hist = c.get_history()
all_calls.extend(hist[0])
rx = (x2 - x1) / 4
ry = (y1 - y2) / 4
fil = LocationFilter()
MIN_LONGITUDE = -79.697878
MAX_LONGITUDE = -79.196382
MIN_LATITUDE = 43.576959
MAX_LATITUDE = 43.799568
invalid_inputs = ['', f'{MIN_LONGITUDE}, {MIN_LATITUDE},{MAX_LONGITUDE},{MAX_LATITUDE}',
f'-79.698, {MIN_LATITUDE}, {MAX_LONGITUDE}, {MAX_LATITUDE}',
f'{MIN_LONGITUDE}, 43.576, {MAX_LONGITUDE}, {MAX_LATITUDE}',
f'{MIN_LONGITUDE}, {MIN_LATITUDE}, -79.195, {MAX_LATITUDE}',
f'{MIN_LONGITUDE}, {MIN_LATITUDE}, {MAX_LONGITUDE}, 43.8',
f'{MIN_LONGITUDE},{MIN_LATITUDE}, -79.54, {MAX_LATITUDE}',
f'{MIN_LONGITUDE}, {MIN_LATITUDE}, {MAX_LATITUDE}',
f'-79.6, 43.60, -79.2, 43.75, -79.5',
'klsjdfohg[we', ' ']
for input in invalid_inputs:
filtered = fil.apply(customers, all_calls, input)
assert filtered == all_calls
for key in loc.keys():
x = loc[key][0]
y = loc[key][1]
fil_string = f'{x - rx}, {y - ry}, {x + rx}, {y + ry}'
filtered = fil.apply(customers, all_calls, fil_string)
lines_in_area = []
if key == 3111:
assert len(filtered) == (24 * 3 + 6) * 3
else:
assert len(filtered) == 27 * 2 * 3
for cust in customers:
if cust.get_id() == key:
lines_in_area = cust.get_phone_numbers()
break
for call in filtered:
assert call.src_number in lines_in_area \
or call.dst_number in lines_in_area
assert loc[int(call.src_number[4:])] == call.src_loc \
or loc[int(call.dst_number[4:])] == call.dst_loc
fil_string = f'{x1 - rx}, {y2 - ry}, {x1 + rx}, {y1 + ry}'
filtered = fil.apply(customers, all_calls, fil_string)
lines_in_area = ['100-1200', '200-1200', '100-2110', '010-2110']
assert len(filtered) == (11 * 4 * 2 + 12) * 3
for call in filtered:
assert call.src_number in lines_in_area \
or call.dst_number in lines_in_area
assert loc[int(call.src_number[4:])] == call.src_loc \
or loc[int(call.dst_number[4:])] == call.dst_loc
fil_string = f'{x1}, {y2}, {x2}, {y1}'
filtered = fil.apply(customers, all_calls, fil_string)
lines_in_area = ['100-1200', '200-1200', '100-2110', '010-2110',
'010-1020', '020-1020', '010-2011', '001-2011']
assert len(filtered) == (7 * 8 * 3) * 3
for call in filtered:
assert call.src_number in lines_in_area \
or call.dst_number in lines_in_area
assert loc[int(call.src_number[4:])] == call.src_loc \
or loc[int(call.dst_number[4:])] == call.dst_loc
def test_combined_filters() -> None:
log = create_task4_log()
customers = create_customers(log)
process_event_history(log, customers)
all_calls = []
for c in customers:
hist = c.get_history()
all_calls.extend(hist[0])
fil = LocationFilter()
filtered = fil.apply(customers, all_calls, f'{x2}, {y3}, {x2}, {y3}')
fil = DurationFilter()
filtered = fil.apply(customers, filtered, f'G{69 * 60 - 1}')
filtered = fil.apply(customers, filtered, f'L{69 * 60 + 1}')
assert len(filtered) == 3
count = 0
for call in filtered:
assert call.src_number == '001-3111'
if call.time.month == 1:
count += 1
assert count == 2
fil = CustomerFilter()
filtered = fil.apply(customers, all_calls, "1020")
fil = DurationFilter()
filtered = fil.apply(customers, filtered, f'G{10 * 60 - 1}')
filtered = fil.apply(customers, filtered, f'L{130 * 60 + 1}')
for call in filtered:
assert 10 * 60 - 1 < call.duration < 130 * 60 + 1
print(f'src: {call.src_number}, dst: {call.dst_number}, dur: {call.duration}')
assert len(filtered) == 23 + 11 + 6
fil = LocationFilter()
filtered = fil.apply(customers, all_calls, f'{x3}, {y2}, {x3}, {y2}') #2101
fil = CustomerFilter()
filtered = fil.apply(customers, filtered, "1002")
assert len(filtered) == 3 * 2 * 3
for call in filtered:
assert call.src_number[4:] == '1002' or call.src_number[4:] == '2101'
assert fil.apply(customers, filtered, "3111") == []
fil = DurationFilter()
assert fil.apply(customers, filtered, 'L60') == []
if __name__ == '__main__':
pytest.main(['task4_tests.py'])
v = Visualizer()
print("Toronto map coordinates:")
print(" Lower-left corner: -79.697878, 43.576959")
print(" Upper-right corner: -79.196382, 43.799568")
log = create_task4_log()
customers = create_customers(log)
process_event_history(log, customers)
# ----------------------------------------------------------------------
# NOTE: You do not need to understand any of the implementation below,
# to be able to solve this assignment. However, feel free to
# read it anyway, just to get a sense of how the application runs.
# ----------------------------------------------------------------------
# Gather all calls to be drawn on screen for filtering, but we only want
# to plot each call only once, so only plot the outgoing calls to screen.
# (Each call is registered as both an incoming and outgoing)
all_calls = []
for c in customers:
hist = c.get_history()
all_calls.extend(hist[0])
print("\n-----------------------------------------")
print("Total Calls in the dataset:", len(all_calls))
# Main loop for the application.
# 1) Wait for user interaction with the system and processes everything
# appropriately
# 2) Take the calls from the results of the filtering and create the
# drawables and connection lines for those calls
# 3) Display the calls in the visualization window
events = all_calls
while not v.has_quit():
events = v.handle_window_events(customers, events)
connections = []
drawables = []
for event in events:
connections.append(event.get_connection())
drawables.extend(event.get_drawables())
# Put the connections on top of the other sprites
drawables.extend(connections)
v.render_drawables(drawables)
|
"""Quantum Job class"""
import random
import string
from qiskit import _openquantumcompiler as openquantumcompiler
import qiskit.backends as backends
class QuantumJob():
"""Creates a quantum circuit job
Attributes:
qobj (dict): describes circuits and configuration to run them
"""
# TODO We need to create more tests for checking all possible inputs.
def __init__(self, circuits, backend='local_qasm_simulator',
circuit_config=None, seed=None,
resources={'max_credits':3, 'wait':5, 'timeout':120},
shots=1024, names=None,
do_compile=False, preformatted=False):
"""
Args:
circuits (QuantumCircuit | list(QuantumCircuit) | qobj):
QuantumCircuit or list of QuantumCircuit. If preformatted=True,
this is a raw qobj.
backend (str): The backend to run the circuit on.
timeout (float): Timeout for job in seconds.
seed (int): The intial seed the simulatros use.
resources (dict): Resource requirements of job.
coupling_map (dict): A directed graph of coupling::
{
control(int):
[
target1(int),
target2(int),
, ...
],
...
}
eg. {0: [2], 1: [2], 3: [2]}
initial_layout (dict): A mapping of qubit to qubit::
{
("q", strart(int)): ("q", final(int)),
...
}
eg.
{
("q", 0): ("q", 0),
("q", 1): ("q", 1),
("q", 2): ("q", 2),
("q", 3): ("q", 3)
}
shots (int): the number of shots
circuit_type (str): "compiled_dag" or "uncompiled_dag" or
"quantum_circuit"
names (str | list(str)): names/ids for circuits
preformated (bool): the objects in circuits are already compiled
and formatted (qasm for online, json for local). If true the
parameters "names" and "circuit_config" must also be defined
of the same length as "circuits".
"""
if isinstance(circuits, list):
self.circuits = circuits
else:
self.circuits = [circuits]
if names is None:
self.names = []
for _ in range(len(self.circuits)):
self.names.append(self._generate_job_id(length=10))
elif isinstance(names, list):
self.names = names
else:
self.names = [names]
self.timeout = resources['timeout']
self.wait = resources['wait']
# check whether circuits have already been compiled
# and formatted for backend.
if preformatted:
# circuits is actually a qobj...validate (not ideal but conventient)
self.qobj = circuits
else:
self.qobj = self._create_qobj(circuits, circuit_config, backend,
seed, resources, shots, do_compile)
self.backend = self.qobj['config']['backend']
self.resources = resources
self.seed = seed
self.result = None
def _create_qobj(self, circuits, circuit_config, backend, seed,
resources, shots, do_compile):
# local and remote backends currently need different
# compilied circuit formats
formatted_circuits = []
if do_compile:
for circuit in circuits:
formatted_circuits.append(None)
else:
if backend in backends.local_backends():
for circuit in self.circuits:
formatted_circuits.append(openquantumcompiler.dag2json(circuit))
else:
for circuit in self.circuits:
formatted_circuits.append(circuit.qasm(qeflag=True))
# create circuit component of qobj
circuit_records = []
if circuit_config is None:
config = {'coupling_map': None,
'basis_gates': 'u1,u2,u3,cx,id',
'layout': None,
'seed': seed}
circuit_config = [config] * len(self.circuits)
for circuit, fcircuit, name, config in zip(self.circuits,
formatted_circuits,
self.names,
circuit_config):
record = {
'name': name,
'compiled_circuit': None if do_compile else fcircuit,
'compiled_circuit_qasm': None if do_compile else fcircuit,
'circuit': circuit,
'config': config
}
circuit_records.append(record)
return {'id': self._generate_job_id(length=10),
'config': {
'max_credits': resources['max_credits'],
'shots': shots,
'backend': backend
},
'circuits': circuit_records}
def _generate_job_id(self, length=10):
return ''.join([random.choice(
string.ascii_letters + string.digits) for i in range(length)])
|
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost')
)
channel = connection.channel() #声明一个管道
#声明 queue durable= True 意思持久化队列
channel.queue_declare(queue='hello',durable=True)
#当exchange为空时 消息只会被发送到一个consumer,不是广播模式
#fanout 所以bind到exchange的queue都可以收到
#
#
#
#direct 通过routingkey和exchange决定哪一个唯一的queue可以接收消息
#topic所有妇科routingkey的才可以bind到queue的消息
channel.basic_publish(exchange='',
routing_key='hello',#queue名字
body='Hello World',
properties=pika.BasicProperties(
delivery_mode=2,)#make the message persistent
)
print("[x] ----->>>>>>")
connection.close()
#当前模式下, |
# Generated by Django 3.1.1 on 2020-09-15 05:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attend', '0004_auto_20200914_1502'),
]
operations = [
migrations.AddField(
model_name='face',
name='emp_id',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
|
# Generated by Django 2.0.3 on 2018-04-07 16:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0009_basic_info_promo_video'),
]
operations = [
migrations.RemoveField(
model_name='singlevideo',
name='course_level',
),
migrations.AddField(
model_name='course',
name='course_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.CourseLevel'),
),
]
|
"""Modifier Schema"""
from __future__ import annotations
from pydantic import Field, BaseModel, constr, validator, root_validator
from typing import List, Union, Optional
from ._base import IDdRadianceBaseModel
class Void(BaseModel):
"""Void modifier"""
type: constr(regex='^void$') = 'void'
class ModifierBase(IDdRadianceBaseModel):
"""Base class for Radiance Modifiers"""
type: constr(regex='^ModifierBase$') = 'ModifierBase'
class Mirror(ModifierBase):
"""Radiance mirror material."""
type: constr(regex='^mirror$') = 'mirror'
modifier: Optional[_REFERENCE_UNION_MODIFIERS] = Field(
default=Void(),
description='Material modifier (default: Void).'
)
dependencies: List[_REFERENCE_UNION_MODIFIERS] = Field(
default=None,
description='List of modifiers that this modifier depends on. '
'This argument is only useful for defining advanced modifiers '
'where the modifier is defined based on other modifiers '
'(default: None).'
)
r_reflectance: float = Field(
default=1,
ge=0,
le=1,
description='A value between 0 and 1 for the red channel reflectance '
'(default: 1).'
)
g_reflectance: float = Field(
default=1,
ge=0,
le=1,
description='A value between 0 and 1 for the green channel reflectance '
'(default: 1).'
)
b_reflectance: float = Field(
default=1,
ge=0,
le=1,
description='A value between 0 and 1 for the blue channel reflectance '
'(default: 1).'
)
alternate_material: _REFERENCE_UNION_MODIFIERS = Field(
default=None,
description='An optional material that may be used like the illum type to '
'specify a different material to be used for shading non-source rays. '
'If None, this will keep the alternat_material as mirror. If this alternate '
'material is given as Void, then the mirror surface will be invisible. '
'Using Void is only appropriate if the surface hides other (more '
'detailed) geometry with the same overall reflectance (default: None).'
)
class Plastic(ModifierBase):
"""Radiance plastic material."""
type: constr(regex='^plastic$') = 'plastic'
modifier: Optional[_REFERENCE_UNION_MODIFIERS] = Field(
default=Void(),
description='Material modifier (default: Void).'
)
dependencies: List[_REFERENCE_UNION_MODIFIERS] = Field(
default=None,
description='List of modifiers that this modifier depends on. '
'This argument is only useful for defining advanced modifiers '
'where the modifier is defined based on other modifiers '
'(default: None).'
)
r_reflectance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the red channel reflectance '
'(default: 0).'
)
g_reflectance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the green channel reflectance '
'(default: 0).'
)
b_reflectance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the blue channel reflectance '
'(default: 0).'
)
specularity: float = Field(
default=0,
ge=0,
le=1,
description='A value between 0 and 1 for the fraction of specularity. '
'Specularity fractions greater than 0.1 are not realistic '
'for non-metallic materials. (default: 0).'
)
roughness: float = Field(
default=0,
ge=0,
le=1,
description='A value between 0 and 1 for the roughness, specified as the '
'rms slope of surface facets. Roughness greater than 0.2 are '
'not realistic (default: 0).'
)
class Metal(Plastic):
"""Radiance metal material."""
type: constr(regex='^metal$') = 'metal'
specularity: float = Field(
default=0.9,
ge=0,
le=1,
description='A value between 0 and 1 for the fraction of specularity. '
'Specularity fractions lower than 0.9 are not realistic for '
'metallic materials. (default: 0.9).'
)
class Trans(Plastic):
"""Radiance Translucent material."""
type: constr(regex='^trans$') = 'trans'
transmitted_diff: float = Field(
default=0,
ge=0,
le=1,
description='The fraction of transmitted light that is transmitted diffusely in '
'a scattering fashion (default: 0).'
)
transmitted_spec: float = Field(
default=0,
ge=0,
le=1,
description='The fraction of transmitted light that is not diffusely scattered '
'(default: 0).'
)
@root_validator
def check_sum_fractions(cls, values):
"""Ensure sum is less than 1."""
trans_diff = values.get('transmitted_diff')
trans_spec = values.get('transmitted_spec')
r_refl = values.get('r_reflectance')
g_refl = values.get('g_reflectance')
b_refl = values.get('b_reflectance')
identifier = values.get('identifier')
summed = trans_diff + trans_spec + (r_refl + g_refl + b_refl) / 3.0
assert summed <= 1, 'The sum of the transmitted and reflected ' \
'fractions cannot be greater than 1, but is {} for modifier {}.'.format(
summed, identifier)
return values
class Glass(ModifierBase):
"""Radiance glass material."""
type: constr(regex='^glass$') = 'glass'
modifier: Optional[_REFERENCE_UNION_MODIFIERS] = Field(
default=Void(),
description='Material modifier (default: Void).'
)
dependencies: List[_REFERENCE_UNION_MODIFIERS] = Field(
default=None,
description='List of modifiers that this modifier depends on. '
'This argument is only useful for defining advanced modifiers '
'where the modifier is defined based on other modifiers '
'(default: None).'
)
r_transmissivity: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the red channel transmissivity '
'(default: 0).'
)
g_transmissivity: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the green channel transmissivity '
'(default: 0).'
)
b_transmissivity: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the blue channel transmissivity '
'(default: 0).'
)
refraction_index: Optional[float] = Field(
default=1.52,
ge=0,
description='A value between 0 and 1 for the index of refraction '
'(default: 1.52).'
)
class BSDF(ModifierBase):
"""Radiance BSDF (Bidirectional Scattering Distribution Function) material."""
type: constr(regex='^BSDF$') = 'BSDF'
modifier: Optional[_REFERENCE_UNION_MODIFIERS] = Field(
default=Void(),
description='Material modifier (default: Void).'
)
dependencies: List[_REFERENCE_UNION_MODIFIERS] = Field(
default=None,
description='List of modifiers that this modifier depends on. '
'This argument is only useful for defining advanced modifiers '
'where the modifier is defined based on other modifiers '
'(default: None).'
)
up_orientation: List[float] = Field(
default=(0.01, 0.01, 1.00),
min_items=3,
max_items=3,
description='Vector as sequence that sets the hemisphere that the BSDF material '
'faces. (default: (0.01, 0.01, 1.00).'
)
thickness: float = Field(
default=0,
description='Optional number to set the thickness of the BSDF material '
'Sign of thickness indicates whether proxied geometry is '
'behind the BSDF surface (when thickness is positive) or in '
'front (when thickness is negative)(default: 0).'
)
function_file: str = Field(
default='.',
min_length=1,
max_length=100,
description='Optional input for function file (default: ".").'
)
transform: str = Field(
default=None,
min_length=1,
max_length=100,
description='Optional transform input to scale the thickness and reorient '
'the up vector (default: None).'
)
bsdf_data: str = Field(
...,
description='A string with the contents of the BSDF XML file.'
)
front_diffuse_reflectance: List[float] = Field(
default=None,
min_items=3,
max_items=3,
description='Optional additional front diffuse reflectance as sequence of '
'numbers (default: None).'
)
back_diffuse_reflectance: List[float] = Field(
default=None,
min_items=3,
max_items=3,
description='Optional additional back diffuse reflectance as sequence of '
'numbers (default: None).'
)
diffuse_transmittance: List[float] = Field(
default=None,
min_items=3,
max_items=3,
description='Optional additional diffuse transmittance as sequence of '
'numbers (default: None).'
)
@validator('front_diffuse_reflectance')
def check_front_diff_value(cls, values):
"""Ensure every list value is between 0 and 1."""
assert all(0 <= v <= 1 for v in values), \
'Every value in front diffuse reflectance must be between 0 and 1.'
return values
@validator('back_diffuse_reflectance')
def check_back_diff_value(cls, values):
"""Ensure every list value is between 0 and 1."""
assert all(0 <= v <= 1 for v in values), \
'Every value in back diffuse reflectance must be between 0 and 1.'
return values
@validator('diffuse_transmittance')
def check_diff_trans_value(cls, values):
"""Ensure every list value is between 0 and 1."""
assert all(0 <= v <= 1 for v in values), \
'Every value in diffuse transmittance must be between 0 and 1.'
return values
class Light(ModifierBase):
"""Radiance Light material."""
type: constr(regex='^light$') = 'light'
modifier: Optional[_REFERENCE_UNION_MODIFIERS] = Field(
default=Void(),
description='Material modifier (default: Void).'
)
dependencies: List[_REFERENCE_UNION_MODIFIERS] = Field(
default=None,
description='List of modifiers that this modifier depends on. '
'This argument is only useful for defining advanced modifiers '
'where the modifier is defined based on other modifiers '
'(default: None).'
)
r_emittance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the red channel of the modifier '
'(default: 0).'
)
g_emittance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the green channel of the modifier '
'(default: 0).'
)
b_emittance: float = Field(
default=0.0,
ge=0,
le=1,
description='A value between 0 and 1 for the blue channel of the modifier '
'(default: 0).'
)
class Glow(Light):
"""Radiance Glow material."""
type: constr(regex='^glow$') = 'glow'
max_radius: float = Field(
default=0,
description='Maximum radius for shadow testing (default: 0). Surfaces with zero '
'will never be tested for zero, although it may participate in '
'interreflection calculation. Negative values will never contribute '
'to scene illumination.'
)
# Union Modifier Schema objects defined for type reference
_REFERENCE_UNION_MODIFIERS = \
Union[Plastic, Glass, BSDF, Glow, Light, Trans, Metal, Void, Mirror]
# Required for self.referencing model
# see https://pydantic-docs.helpmanual.io/#self-referencing-models
Mirror.update_forward_refs()
Plastic.update_forward_refs()
Glass.update_forward_refs()
BSDF.update_forward_refs()
Glow.update_forward_refs()
Light.update_forward_refs()
Trans.update_forward_refs()
Metal.update_forward_refs()
Void.update_forward_refs()
|
import PyDynamixel_v2.PyDynamixel_v2 as pd
import SerialReader.Serial as sr
from time import sleep
import serial
BAUDRATE = 10000000
# definição das portas seriais conectadas ao RS845
while True:
print('Detectando portas Seriais....\n')
comports = sr.serialPorts()
for i, com in enumerate(comports):
print(i, com, end='\t')
if comports != []:
ind = input("\nEscolha a porta Serial onde esta conectado o Conversor RS845: ")
else:
print("Nenhuma porta serial detectada ! \nPressione enter para atualizar")
input()
try:
ind = int(ind)
comport = serial.Serial(comports[ind], baudrate= BAUDRATE)
comport.close()
comport = comports[ind]
break
except:
print("Comport inválida, tente outra!")
comport = 0
serial = pd.DxlComm(port=comport, baudrate=BAUDRATE)
for i in range(100):
sleep(0.05)
jointest = pd.Joint(i)
try:
# Attach das juntas
serial.attach_joint(jointest)
serial.enable_torques()
print(jointest.servo_id )
except:
pass |
from common.run_method import RunMethod
import allure
@allure.step("极权限/根据员工ID查询为角色组管理员的所有角色分组")
def role_queryMasterRoleTreeByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据员工ID查询为角色组管理员的所有角色分组"
url = f"/api-admin/role/queryMasterRoleTreeByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/根据员工ID查询角色树")
def role_queryRoleTreeByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据员工ID查询角色树"
url = f"/api-admin/role/queryRoleTreeByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/添加角色")
def role_addRole_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/添加角色"
url = f"/api-admin/role/addRole"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/添加角色组")
def role_addRoleGroup_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/添加角色组"
url = f"/api-admin/role/addRoleGroup"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/删除角色组")
def role_deleteRoleGroup_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/删除角色组"
url = f"/api-admin/role/deleteRoleGroup"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/删除角色")
def role_deleteRole_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/删除角色"
url = f"/api-admin/role/deleteRole"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询员工对应的所有角色")
def role_queryRolesByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询员工对应的所有角色"
url = f"/api-admin/role/queryRolesByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/修改员工与角色绑定关系")
def role_updateEmployeeRoles_put(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/修改员工与角色绑定关系"
url = f"/api-admin/role/updateEmployeeRoles"
res = RunMethod.run_request("PUT", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/修改角色组名称")
def role_updateRoleGroupName_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/修改角色组名称"
url = f"/api-admin/role/updateRoleGroupName"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/修改角色名称")
def role_updateRoleName_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/修改角色名称"
url = f"/api-admin/role/updateRoleName"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/根据角色ID查询员工列表")
def role_queryEmployeesByRoleId_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据角色ID查询员工列表"
url = f"/api-admin/role/queryEmployeesByRoleId"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/删除员工角色关系")
def role_deleteEmployeeRole_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/删除员工角色关系"
url = f"/api-admin/role/deleteEmployeeRole"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/角色添加员工信息")
def role_addEmployeeByRoleId_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/角色添加员工信息"
url = f"/api-admin/role/addEmployeeByRoleId"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
'''
50. Pow(x, n)
Implement pow(x, n).
Example 1:
Input: 2.00000, 10
Output: 1024.00000
Example 2:
Input: 2.10000, 3
Output: 9.26100
'''
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
if not x:
return 1
if n == 0:
return 1
N = abs(n)
result = x
odd_result = 1
while N > 1:
if N%2:
# N is odd number
odd_result *= result
N = N - 1
else:
result *= result
N = N/2
result *= odd_result
if n < 0:
result = 1.0/result
return result
if __name__ == '__main__':
x = 2
n = 10
cs = Solution()
print cs.myPow(x, n)
|
def SelectionSort(numbers):
for i in range(len(numbers)-1):
index_small = i
for j in range(i+1,len(numbers)):
if numbers[j] < numbers[index_small]:
index_small = j
temp = numbers[i]
numbers[i] = numbers[index_small]
numbers[index_small] = temp
print(numbers)
SelectionSort([2,4,15,1]) |
from queue import PriorityQueue
from collections import defaultdict
from math import sqrt
# This program, along with graph.py, will be used by r2d2_commands.py for navigation
inf = float('inf')
def null_heuristic(u, v):
return 0
def manhattan_distance_heuristic(u, v):
return sum(abs(x1 - x2) for x1, x2 in zip(u, v))
def euclidean_distance_heuristic(u, v):
return sqrt(sum((x1 - x2) ** 2 for x1, x2 in zip(u, v)))
# adapted from https://en.wikipedia.org/wiki/A*_search_algorithm#Pseudocode
def A_star(G, start, goal, heuristic=null_heuristic):
closedSet = set()
openSet = set([start])
frontier = PriorityQueue()
parent = {}
gScore = defaultdict(lambda:inf) # gScore[v] = cost(start, v)
gScore[start] = 0
fScore = defaultdict(lambda:inf) # fScore[v] = cost(start, v) + heuristic(v, goal)
fScore[start] = heuristic(start, goal)
frontier.put((fScore[start], start))
while openSet:
_, u = frontier.get()
if u == goal:
return reconstruct_path(start, goal, parent)
try:
openSet.remove(u)
except:
pass
closedSet.add(u)
for v in G.neighbors(u):
if v in closedSet:
continue
tentative_gScore = gScore[u] + G.dist_between(u, v)
if v not in openSet:
openSet.add(v)
elif tentative_gScore >= gScore[v]:
continue
parent[v] = u
gScore[v] = tentative_gScore
fScore[v] = gScore[v] + heuristic(v, goal)
frontier.put((fScore[v], v))
if v == goal:
return reconstruct_path(start, goal, parent)
def reconstruct_path(start, goal, parent):
path = [goal]
current = goal
while current != start:
current = parent[current]
path.append(current)
return path[::-1] |
from typing import Optional, Union
import numpy as np
import pandas as pd # type: ignore
import matplotlib.pyplot as plt # type: ignore
import matplotlib.animation as animation # type: ignore
class Hist:
def __init__(self, data: pd.DataFrame, numbins: int) -> None:
fig, ax = plt.subplots()
# see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html
self.n, _bins, self.patches = plt.hist(data, numbins, facecolor='black')
ax.set_title("Discrete case-based mortality model (%d people)" % len(data))
ax.set_xlabel("Age at Death")
ax.set_ylabel("Persons")
self.anim = animation.FuncAnimation(fig, self.__animate, interval=100, frames=numbins, repeat=False)
def save(self, filename: str) -> None:
# there seems to be no way of preventing passing the loop once setting to the saved gif and it loops forever, which is very annoying
self.anim.save(filename, dpi=80, writer=animation.ImageMagickWriter(extra_args=["-loop", "1"]))
def show(self) -> None:
plt.show()
def __animate(self, frameno: int) -> Union[list, list[list]]:
i = 0
for rect, h in zip(self.patches, self.n):
rect.set_height(h if i <= frameno else 0)
i = i + 1
return self.patches
def plot(pop_disc: pd.DataFrame, pop_cont: pd.DataFrame, filename: Optional[str]=None, anim_filename: Optional[str]=None) -> None:
bins = int(max(pop_disc.age_at_death.max(), pop_cont.age_at_death.max())) + 1
rng = (0.0, float(bins))
y1, x1 = np.histogram(pop_disc.age_at_death, bins, range=rng)
plt.plot(x1[1:], y1)
y2, x2 = np.histogram(pop_cont.age_at_death, bins, range=rng)
plt.plot(x2[1:], y2)
plt.title("Mortality model sampling algorithm comparison")
plt.legend(["Discrete", "Continuous"])
plt.xlabel("Age at Death")
plt.ylabel("Persons")
if filename is not None:
plt.savefig(filename, dpi=80)
h = Hist(pop_disc.age_at_death, int(max(pop_disc.age)))
if anim_filename is not None:
h.save(anim_filename)
h.show()
|
from flask import Flask, render_template, request, url_for, redirect, flash
import database as db
from forms import ChargedUpForm, FindTeamForm, PitScoutingForm
import os
from google.cloud import storage
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(32)
app.config['UPLOAD_FOLDER'] = os.environ['UPLOAD_FOLDER']
@app.route('/')
def main():
form = ChargedUpForm()
return render_template('index.html', form=form) # the main page
@app.route('/submitdata', methods=['POST']) # ONLY the post responses will be filtered here and dealt with here
def submitData():
data = dict(request.form)
if ("disabled" not in data):
data["disabled"] = "n"
if ("disconnected" not in data):
data["disconnected"] = "n"
team = int(data["team_number"])
data = { # to clear things up, this data is the data of a single match
"team_number": team,
"match": int(data["match"]),
"starting_pos": data["starting_pos"],
"mobility": data["mobility"],
"disabled": data["disabled"],
"disconnected": data["disconnected"],
"disconnected_total_seconds": data["disconnected_total_seconds"],
"auto_charge": data['auto_charge'],
"teleop_charge": data['teleop_charge'],
"name": data["name"],
"cone_auto_top": int(data["cone_auto_top"]),
"cone_auto_middle": int(data["cone_auto_middle"]),
"cone_auto_hybrid": int(data["cone_auto_hybrid"]),
"cone_teleop_top": int(data["cone_teleop_top"]),
"cone_teleop_middle": int(data["cone_teleop_middle"]),
"cone_teleop_hybrid": int(data["cone_teleop_hybrid"]),
"cube_auto_top": int(data["cube_auto_top"]),
"cube_auto_middle": int(data["cube_auto_middle"]),
"cube_auto_hybrid": int(data["cube_auto_hybrid"]),
"cube_teleop_top": int(data["cube_teleop_top"]),
"cube_teleop_middle": int(data["cube_teleop_middle"]),
"cube_teleop_hybrid": int(data["cube_teleop_hybrid"]),
"notes": data["notes"],
}
db.setData(data)
return render_template('confirm.html',
name = data['name'],
team_number=data['team_number'],
match=data['match'],
disabled= data['disabled'],
disconnected= data['disconnected'],
disconnected_total_seconds= data['disconnected_total_seconds'],
starting_pos= data['starting_pos'],
mobility = data['mobility'],
cube_auto_top = data["cube_auto_top"],
cube_auto_middle = data["cube_auto_middle"],
cube_auto_hybrid = data["cube_auto_hybrid"],
cone_auto_top = data["cone_auto_top"],
cone_auto_middle = data["cone_auto_middle"],
cone_auto_hybrid = data["cone_auto_hybrid"],
cube_teleop_top = data["cube_teleop_top"],
cube_teleop_middle = data["cube_teleop_middle"],
cube_teleop_hybrid = data["cube_teleop_hybrid"],
cone_teleop_top = data["cone_teleop_top"],
cone_teleop_middle = data["cone_teleop_middle"],
cone_teleop_hybrid = data["cone_teleop_hybrid"],
auto_charge = data["auto_charge"],
teleop_charge = data['teleop_charge'],
notes = data['notes'])
@app.route('/getdata')
def getTeamData():
team_number = request.args.get('team')
if team_number is None or team_number == 0:
return """ No team number was specified, therefore no team data was fetched from the database. Please try again! """
matches = db.getData(int(team_number))
if matches is None or matches == []: # if there is no match data in the list 'matches'
return """ This team has not been scouted yet! Get on that! """
auto_cone_top_tmp = []
auto_cone_middle_tmp = []
auto_cone_hybrid_tmp = []
auto_cube_top_tmp = []
auto_cube_middle_tmp = []
auto_cube_hybrid_tmp = []
teleop_cone_top_tmp = []
teleop_cone_middle_tmp = []
teleop_cone_hybrid_tmp = []
teleop_cube_top_tmp = []
teleop_cube_middle_tmp = []
teleop_cube_hybrid_tmp = []
for match in matches:
auto_cone_top_tmp.append(match["cone_auto_top"])
auto_cone_middle_tmp.append(match["cone_auto_middle"])
auto_cone_hybrid_tmp.append(match["cone_auto_hybrid"])
auto_cube_top_tmp.append(match["cube_auto_top"])
auto_cube_middle_tmp.append(match["cube_auto_middle"])
auto_cube_hybrid_tmp.append(match["cube_auto_hybrid"])
teleop_cone_top_tmp.append(match["cone_teleop_top"])
teleop_cone_middle_tmp.append(match["cone_teleop_middle"])
teleop_cone_hybrid_tmp.append(match["cone_teleop_hybrid"])
teleop_cube_top_tmp.append(match["cube_teleop_top"])
teleop_cube_middle_tmp.append(match["cube_teleop_middle"])
teleop_cube_hybrid_tmp.append(match["cube_teleop_hybrid"])
avg_auto_cone_top = sum(auto_cone_top_tmp)/len(auto_cone_top_tmp)
avg_auto_cone_top = round(avg_auto_cone_top, 3)
avg_auto_cone_middle = sum(auto_cone_middle_tmp)/len(auto_cone_middle_tmp)
avg_auto_cone_middle = round(avg_auto_cone_middle, 3)
avg_auto_cone_hybrid = sum(auto_cone_hybrid_tmp)/len(auto_cone_hybrid_tmp)
avg_auto_cone_hybrid = round(avg_auto_cone_hybrid, 3)
avg_auto_cube_top = sum(auto_cube_top_tmp)/len(auto_cube_top_tmp)
avg_auto_cube_top = round(avg_auto_cube_top, 3)
avg_auto_cube_middle = sum(auto_cube_middle_tmp)/len(auto_cube_middle_tmp)
avg_auto_cube_middle = round(avg_auto_cube_middle, 3)
avg_auto_cube_hybrid = sum(auto_cube_hybrid_tmp)/len(auto_cube_hybrid_tmp)
avg_auto_cube_hybrid = round(avg_auto_cube_hybrid, 3)
avg_teleop_cone_top = sum(teleop_cone_top_tmp)/len(teleop_cone_top_tmp)
avg_teleop_cone_top = round(avg_teleop_cone_top, 3)
avg_teleop_cone_middle = sum(teleop_cone_middle_tmp)/len(teleop_cone_middle_tmp)
avg_teleop_cone_middle = round(avg_teleop_cone_middle, 3)
avg_teleop_cone_hybrid = sum(teleop_cone_hybrid_tmp)/len(teleop_cone_hybrid_tmp)
avg_teleop_cone_hybrid = round(avg_teleop_cone_hybrid, 3)
avg_teleop_cube_top = sum(teleop_cube_top_tmp)/len(teleop_cube_top_tmp)
avg_teleop_cube_top = round(avg_teleop_cube_top, 3)
avg_teleop_cube_middle = sum(teleop_cube_middle_tmp)/len(teleop_cube_middle_tmp)
avg_teleop_cube_middle = round(avg_teleop_cube_middle, 3)
avg_teleop_cube_hybrid = sum(teleop_cube_hybrid_tmp)/len(teleop_cube_hybrid_tmp)
avg_teleop_cube_hybrid = round(avg_teleop_cube_hybrid, 3)
try:
return render_template('team_data.html',
number=team_number,
matches=matches,
matches_len=len(matches),
avg_auto_cone_top=avg_auto_cone_top,
avg_auto_cone_middle=avg_auto_cone_middle,
avg_auto_cone_hybrid=avg_auto_cone_hybrid,
avg_auto_cube_top=avg_auto_cube_top,
avg_auto_cube_middle=avg_auto_cube_middle,
avg_auto_cube_hybrid=avg_auto_cube_hybrid,
avg_teleop_cone_top=avg_teleop_cone_top,
avg_teleop_cone_middle=avg_teleop_cone_middle,
avg_teleop_cone_hybrid=avg_teleop_cone_hybrid,
avg_teleop_cube_top=avg_teleop_cube_top,
avg_teleop_cube_middle=avg_teleop_cube_middle,
avg_teleop_cube_hybrid=avg_teleop_cube_hybrid,
)
except KeyError:
return """ This team has not been scouted yet! Get on that! """
@app.route('/rankings')
def toRankings():
teams = db.getConeTeleopRankings()
return render_template('rankings.html',
name="Average Teleop Cone",
teams_len=len(teams),
teams=teams)
@app.route('/getrankings', methods=['POST'])
def getRankingData():
config = dict(request.form)['config']
if config == "default":
data = db.getConeTeleopRankings()
config = "ConeTeleop_avg"
name="Cone Teleop Rankings"
elif config == "ConeAuto_avg":
data = db.getConeAutoRankings()
name="Cone Auto Rankings"
elif config == "ConeTeleop_avg":
data = db.getConeTeleopRankings()
name="Cone Teleop Rankings"
elif config == "CubeAuto_avg":
data = db.getCubeAutoRankings()
name="Cube Auto Rankings"
elif config == "CubeTeleop_avg":
data = db.getCubeTeleopRankings()
name="Cube Teleop Rankings"
elif config == "cone_auto_top":
data = db.getConeAutoTopRankings()
name="Cone Auto Top Rankings"
elif config == "cone_auto_middle":
data = db.getConeAutoMiddleRankings()
name="Cone Auto Middle Rankings"
elif config == "cone_auto_hybrid":
data = db.getConeAutoHybridRankings()
name="Cone Auto Hybrid:"
elif config == "cube_auto_top":
data = db.getCubeAutoTopRankings()
name="Cube Auto Top Rankings:"
elif config == "cube_auto_middle":
data = db.getCubeAutoMiddleRankings()
name="Cube Auto Middle Rankings:"
elif config == "cube_auto_hybrid":
data = db.getCubeAutoHybridRankings()
name="Cube Auto Hybrid Rankings:"
elif config == "cone_teleop_top":
data = db.getConeTeleopTopRankings()
name="Cone Teleop Top Rankings:"
elif config == "cone_teleop_middle":
data = db.getConeTeleopMiddleRankings()
name="Cube Teleop Middle Rankings:"
elif config == "cone_teleop_hybrid":
data = db.getConeTeleopHybridRankings()
name="Cone Teleop Hybrid Rankings:"
elif config == "cube_teleop_top":
data = db.getCubeTeleopTopRankings()
name="Cube Teleop Top Rankings:"
elif config == "cube_teleop_middle":
data = db.getCubeTeleopMiddleRankings()
name="Cube Teleop Middle Rankings:"
elif config == "cube_teleop_hybrid":
data = db.getCubeTeleopHybridRankings()
name="Cube Teleop Hybrid Rankings:"
elif config == "auto_charge":
data = db.getChargingPortAuto()
name="Auto Charging Station Rankings:"
elif config == "teleop_charge":
data = db.getChargingPortTeleop()
name="Teleop Charging Station Rankings:"
else:
data = db.getConeTeleopRankings() # algorithmic rankings are default
return render_template("rankings.html",
name=name,
teams=data,
teams_len=len(data)
)
@app.route('/findteam', methods=["GET", "POST"])
def findTeam():
form = FindTeamForm()
if form.is_submitted():
data = dict(request.form)
return redirect(url_for("getTeamData", team=int(data["team_number"])))
return render_template("find_team.html", form=form)
@app.route('/pit-scouting', methods=["GET", "POST"])
def pitScouting():
form = PitScoutingForm()
if request.method=="POST":
data = dict(request.form)
file = request.files['image']
if file.filename.split('.')[-1] not in ['jpg','jpeg','png']:
flash('Not a valid extension')
return render_template("pit_scouting.html", form=form)
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
storage_client = storage.Client()
bucket = storage_client.bucket('1261-pit-scouting-images')
blob = bucket.blob(filename)
if blob.exists():
flash('File already exists')
return render_template("pit_scouting.html", form=form)
generation_match_precondition = 0
blob.upload_from_filename(os.path.join(app.config['UPLOAD_FOLDER'], filename), if_generation_match=generation_match_precondition)
return render_template("pit_scouting.html", form=form)
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) |
from django.conf import settings
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'reports.views.index', name='reports_index'),
url(r'^(?P<quiz_id>\d+)/$', 'reports.views.report_quiz', name='report_quiz'),
url(r'^(?P<quiz_id>\d+)/(?P<attempt_id>\d+)/$', 'reports.views.report_attempt', name='report_attempt'),
)
|
#!/usr/bin/env python3
"""
index_builder_final.py - Index Builder for Web Search Engine
author: Pablo Caruana
email: pablo dot caruana at gmail dot com
date: 12/1/2016
"""
import json
import nltk
import bs4
import os
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
class Index_Builder:
def __init__(self, chunk_id, content_chunk):
"""
num_threads: number of threads being used for indexing
(none for the moment)
chunk_id: a string of chunk_id
content_chunk: file name/directory of content chunk get from crawler
"""
# self.num_threads = num_threads
self.chunk_id = chunk_id
self.content_chunk = content_chunk
def index_builder(self, chunk_data):
"""
Preparation includes:
Strip all html tags from raw content of pages
Concatenate page content and title, tokenize
Create an array of url for each word
Remove all stop words + get frequency of each word in the chunk
"""
word_lists = []
indexed_words = {}
entries = []
stop_words = set(stopwords.words('english'))
stop_words.update('.', '?', '-', '\'', '\:', ';', ',', '!', '<', '>', '%', '$', '\"', '/', '(', ')', '[', ']', '|',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 's', 't', 're', 'm', 'c', 'b', '”', '“', '’')
for entry in chunk_data:
# Strip all html tags
html_doc = entry["html"]
soup = BeautifulSoup(html_doc, 'html.parser')
content = soup.get_text()
# All the text for one entry in the chunk
all_text = entry["title"] + ' ' + content
all_words_in_one_entry = nltk.word_tokenize(all_text.lower())
word_count_in_entry = nltk.FreqDist(all_words_in_one_entry)
unique_words_in_one_entry = set(all_words_in_one_entry)
all_unique_words_in_one_entry = list(unique_words_in_one_entry)
# Array containing tuples of word + word_count + entry_id
entry_array = []
for word in all_unique_words_in_one_entry:
word_positions = []
for pos, w in enumerate(all_words_in_one_entry):
if w == word:
word_positions.append(pos)
word_tuple = (word, word_count_in_entry[word], self.chunk_id + '-' + str(entry["doc_id"]), word_positions)
entry_array.append(word_tuple)
entries += entry_array
word_lists += all_words_in_one_entry
# Remove all the stopwords
unique_words = set(word_lists)
unique_words = unique_words.difference(stop_words)
word_counts = nltk.FreqDist(word_lists)
"""
Indexing includes:
Only choose words with frequency > = 3
Append all the urls into an array for the same word
NOTE: list of urls for each word is already ranked by word frequency in each doc
(should I normalize word frequency?)
indexed_words is a list of all words indexed, along with their corresponding
word_count and list of urls
"""
# Only choose words with frequency count >= 1
for word in unique_words:
indexed_word_info = {}
entry_ids = []
if word_counts[word] >= 1:
indexed_word_info['word_count'] = word_counts[word]
for tup in entries:
if word == tup[0]:
entry_ids.append(tup[1:])
# within each tuple (word_count + entry_id + word_positions),
# rank each document id based on the frequency of word in that document
doc_ids = {}
for en_id in sorted(entry_ids, reverse=True):
doc_ids[en_id[1]] = en_id[2]
# the value of key 'doc_id' is now ranked
indexed_word_info['doc_id'] = doc_ids
if indexed_word_info:
indexed_words[word] = indexed_word_info
return indexed_words
def run(self):
# Start indexing
indexed_words = self.index_builder(self.content_chunk)
# Open new file to write indexed chunk into
file_name = 'sample_files/indexed_files/indexed_' + self.chunk_id + '.json'
with open(file_name, 'w') as indexed_file:
json.dump(indexed_words, indexed_file)
indexed_file.close()
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import warnings
from dataclasses import dataclass
from typing import List, Mapping
from packaging.version import Version
from pants.base.deprecated import warn_or_error
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import ExitCode
from pants.engine.env_vars import CompleteEnvironmentVars
from pants.init.logging import initialize_stdio, stdio_destination
from pants.init.util import init_workdir
from pants.option.option_value_container import OptionValueContainer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.util.docutil import doc_url
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
# Pants 2.18 is using a new distribution model, that's supported (sans bugs) in 0.10.0.
MINIMUM_SCIE_PANTS_VERSION = Version("0.10.0")
@dataclass(frozen=True)
class PantsRunner:
"""A higher-level runner that delegates runs to either a LocalPantsRunner or
RemotePantsRunner."""
args: List[str]
env: Mapping[str, str]
# This could be a bootstrap option, but it's preferable to keep these very limited to make it
# easier to make the daemon the default use case. Once the daemon lifecycle is stable enough we
# should be able to avoid needing to kill it at all.
def will_terminate_pantsd(self) -> bool:
_DAEMON_KILLING_GOALS = frozenset(["kill-pantsd", "clean-all"])
return not frozenset(self.args).isdisjoint(_DAEMON_KILLING_GOALS)
def _should_run_with_pantsd(self, global_bootstrap_options: OptionValueContainer) -> bool:
terminate_pantsd = self.will_terminate_pantsd()
if terminate_pantsd:
logger.debug(f"Pantsd terminating goal detected: {self.args}")
# If we want concurrent pants runs, we can't have pantsd enabled.
return (
global_bootstrap_options.pantsd
and not terminate_pantsd
and not global_bootstrap_options.concurrent
)
@staticmethod
def scrub_pythonpath() -> None:
# Do not propagate any PYTHONPATH that happens to have been set in our environment
# to our subprocesses.
# Note that don't warn (but still scrub) if RUNNING_PANTS_FROM_SOURCES is set. This allows
# scripts that run pants directly from sources, and therefore must set PYTHONPATH, to mute
# this warning.
pythonpath = os.environ.pop("PYTHONPATH", None)
if pythonpath and not os.environ.pop("RUNNING_PANTS_FROM_SOURCES", None):
logger.debug(f"Scrubbed PYTHONPATH={pythonpath} from the environment.")
def run(self, start_time: float) -> ExitCode:
self.scrub_pythonpath()
options_bootstrapper = OptionsBootstrapper.create(
env=self.env, args=self.args, allow_pantsrc=True
)
with warnings.catch_warnings(record=True):
bootstrap_options = options_bootstrapper.bootstrap_options
global_bootstrap_options = bootstrap_options.for_global_scope()
# We enable logging here, and everything before it will be routed through regular
# Python logging.
stdin_fileno = sys.stdin.fileno()
stdout_fileno = sys.stdout.fileno()
stderr_fileno = sys.stderr.fileno()
with initialize_stdio(global_bootstrap_options), stdio_destination(
stdin_fileno=stdin_fileno,
stdout_fileno=stdout_fileno,
stderr_fileno=stderr_fileno,
):
run_via_scie = "SCIE" in os.environ
enable_scie_warnings = "NO_SCIE_WARNING" not in os.environ
scie_pants_version = os.environ.get("SCIE_PANTS_VERSION")
if enable_scie_warnings:
if not run_via_scie:
raise RuntimeError(
softwrap(
f"""
The `pants` launcher binary is now the only supported way of running Pants.
See {doc_url("installation")} for details.
"""
),
)
if run_via_scie and (
# either scie-pants is too old to communicate its version:
scie_pants_version is None
# or the version itself is too old:
or Version(scie_pants_version) < MINIMUM_SCIE_PANTS_VERSION
):
current_version_text = (
f"The current version of the `pants` launcher binary is {scie_pants_version}"
if scie_pants_version
else "Run `PANTS_BOOTSTRAP_VERSION=report pants` to see the current version of the `pants` launcher binary"
)
warn_or_error(
"2.18.0.dev6",
f"using a `pants` launcher binary older than {MINIMUM_SCIE_PANTS_VERSION}",
softwrap(
f"""
{current_version_text}, and see {doc_url("installation")} for how to upgrade.
"""
),
)
# N.B. We inline imports to speed up the python thin client run, and avoids importing
# engine types until after the runner has had a chance to set __PANTS_BIN_NAME.
if self._should_run_with_pantsd(global_bootstrap_options):
from pants.bin.remote_pants_runner import RemotePantsRunner
try:
remote_runner = RemotePantsRunner(self.args, self.env, options_bootstrapper)
return remote_runner.run(start_time)
except RemotePantsRunner.Fallback as e:
logger.warning(f"Client exception: {e!r}, falling back to non-daemon mode")
from pants.bin.local_pants_runner import LocalPantsRunner
# We only install signal handling via ExceptionSink if the run will execute in this process.
ExceptionSink.install(
log_location=init_workdir(global_bootstrap_options), pantsd_instance=False
)
runner = LocalPantsRunner.create(
env=CompleteEnvironmentVars(self.env),
working_dir=os.getcwd(),
options_bootstrapper=options_bootstrapper,
)
return runner.run(start_time)
|
import requests
from lxml import html
import csv
import time
import argparse
class WeatherStation:
def __init__(self, filename):
self.wind_speed = 0
self.wind_dir = 0
self.wind_gust = 0
self.temp = 0
self.humidity = 0
self.node = 0
self.data = []
self.payload = {}
self.data_time = 0
self.net_address = "http://192.168.137.99/livedata.htm"
self.datafile = 0
self.datawriter = 0
self.file_time = time.localtime()
self.filename = filename
self.uploader = 0
def get_data(self):
self.response = requests.get(self.net_address)
self.tree = html.fromstring(self.response.text)
self.node = self.tree.xpath("//input[@name='CurrTime']")[0]
self.data_time = self.node.get("value")
self.node = self.tree.xpath("//input[@name='CurrTime']")[0]
self.data_time = self.node.get("value")
self.node = self.tree.xpath("//input[@name='avgwind']")[0]
self.wind_speed = self.node.get("value")
self.node = self.tree.xpath("//input[@name='windir']")[0]
self.wind_dir = self.node.get("value")
self.node = self.tree.xpath("//input[@name='gustspeed']")[0]
self.wind_gust = self.node.get("value")
self.node = self.tree.xpath("//input[@name='outTemp']")[0]
self.temp = self.node.get("value")
self.node = self.tree.xpath("//input[@name='outHumi']")[0]
self.humidity = self.node.get("value")
def upload_data(self):
self.payload = {
"action" : "updateraw",
"ID" : "KCOBOULD381",
"PASSWORD" : "bv4te8rq",
"dateutc" : "now",
"winddir" : self.wind_dir,
"windspeedmph" : self.wind_speed,
"windgustmph" : self.wind_gust,
"tempf" : self.temp,
"humidity" : self.humidity
}
self.uploader = requests.get("https://weatherstation.wunderground.com/weatherstation/updateweatherstation.php", params=self.payload)
# print self.uploader.text
def save_data(self):
self.data = [self.data_time, self.wind_speed, self.wind_dir, self.temp]
with open(self.filename + ".csv", 'ab') as self.datafile:
self.datawriter = csv.writer(self.datafile)
self.datawriter.writerow(self.data)
def display(self):
print self.data_time, " || ", self.temp, " || ", self.wind_speed
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--duration", help="Duration of data collection in min", type=int)
parser.add_argument("-f", "--filename", help="File name for the resulting csv data file")
args = parser.parse_args()
if not args.filename:
file_time = time.localtime()
args.filename = 'winddata_(%d-%d-%d).csv' % (file_time.tm_year, file_time.tm_mon, file_time.tm_mday)
station = WeatherStation(args.filename)
tick = 0
while tick < args.duration:
starttime = time.time()
station.get_data()
tick = tick + 1
station.upload_data()
station.display()
station.save_data()
time.sleep(60 - (time.time() - starttime))
# 'winddata_(%d-%d-%d).csv' % (self.file_time.tm_year, self.file_time.tm_mon, self.file_time.tm_mday) |
# -*- coding: utf-8 -*-
"""
Define Haar feature properties.
@author: peter
"""
#import cv2
import numpy as np
from haars import *
class Haar(object):
def __init__(self, pattern, upperleft, blockSize):
self.pattern = pattern
self.upperleft = upperleft
self.blockSize = blockSize
self.pattern_shape = np.shape(pattern)
self.shape = ( self.pattern_shape[0] * blockSize,
self.pattern_shape[1] * blockSize )
def getBlockProp(self):
props = []
for row in range( self.pattern_shape[0] ):
for col in range( self.pattern_shape[1] ):
this_upperleft = ( self.upperleft[0] + row * self.blockSize,
self.upperleft[1] + col * self.blockSize)
this_lowerright = ( this_upperleft[0] + self.blockSize - 1,
this_upperleft[1] + self.blockSize - 1 )
props.append( ( (this_upperleft,this_lowerright),
self.pattern[row][col]) )
return props
def get_feature_val(self, integral_image):
pos, neg, n_pos_blocks, n_neg_blocks = 0, 0, 0, 0
for (ul, lr), sign in self.getBlockProp():
_sum11 = integral_image[ lr ]
_sum00 = integral_image[ ul[0]-1 ][ ul[1]-1 ] if (ul[0] > 0) and (ul[1] > 0) else 0
_sum10 = integral_image[ lr[0] ][ ul[1]-1 ] if (ul[1] > 0) else 0
_sum01 = integral_image[ ul[0]-1 ][ lr[1] ] if (ul[0] > 0) else 0
if sign == 1:
pos += (_sum11 + _sum00 - _sum10 - _sum01)
n_pos_blocks += 1
else:
neg += (_sum11 + _sum00 - _sum10 - _sum01)
n_neg_blocks += 1
return (pos * n_pos_blocks - neg * n_neg_blocks) / (n_pos_blocks + n_neg_blocks)
def get_haars( img_shape, startpoint = (0,0), windowSize = 1,
blockSize0 = 1, blockSize_intvl = 1, blockSize_loop = 256 ):
haars = []
for i in range(haar_cnt):
haar_pattern = np.array( globals()['haar'+str(i)] )
this_haar = Haar( haar_pattern, startpoint, blockSize0 )
haars += get_augumented_haars( this_haar, img_shape, windowSize,
blockSize_intvl, blockSize_loop )
return haars
def get_augumented_haars( haar, img_shape, windowSize, blockSize_intvl, blockSize_loop ):
row_res, col_res = img_shape[0] - haar.upperleft[0], img_shape[1] - haar.upperleft[1]
haars = []
sizeUps = min( blockSize_loop,
(row_res - haar.shape[0]) // (blockSize_intvl * haar.pattern_shape[0]) + 1,
(col_res - haar.shape[1]) // (blockSize_intvl * haar.pattern_shape[1]) + 1 )
for i in range(sizeUps):
this_haar = Haar( haar.pattern, haar.upperleft,
haar.blockSize + i * blockSize_intvl )
haars += get_mapped_haars( this_haar, row_res, col_res, windowSize )
return haars
def get_mapped_haars( this_haar, row_res, col_res, windowSize ):
row_iters = (row_res - this_haar.shape[0])//windowSize + 1
col_iters = (col_res - this_haar.shape[1])//windowSize + 1
haars = []
for col in range(col_iters):
for row in range(row_iters):
this_upperleft = ( this_haar.upperleft[0] + row * windowSize,
this_haar.upperleft[1] + col * windowSize )
haars.append( Haar(this_haar.pattern, this_upperleft, this_haar.blockSize ) )
return haars |
from django.urls import path
from django.conf.urls import url
from . import views
from .views import *
app_name = 'posts'
urlpatterns = [
path('subir_post/', PostCreateView.as_view(), name = 'post_create'),
path('<int:pk>/', views.post_detail, name='post_detail'),
path('category/<str:cats>', views.CategoryView, name = 'category'),
#path('<int:pk>/edit/', PostCreateView.as_view(), name='post_create'),
path('<int:pk>/edit/', PostUpdateView.as_view(), name='post_edit'),
path('<int:pk>/delete/', PostDeleteView.as_view(), name='post_delete'),
path('article/<int:pk>/comment/', AddCommentView.as_view(), name='add_comment'),
path('article/<int:pk>/del_comment/', DelCommentView.as_view(), name='del_comment'),
path('article/<int:pk>/edit_comment/', UpDateCommentView.as_view(), name='edit_comment'),
#path('MyPosts/<int:pk>', views.MyPostsView, name = 'myposts'),
path('<int:year>/<str:month>/',
PostMonthArchiveView.as_view(),
name="archive_month"),
]
|
import pandas
import webbrowser
import os
import numpy as np
# dtype={'userId': np.int32, 'movieId': np.int32, 'rating': np.uint8}
# Read the dataset into a data table using Pandas
data_table = pandas.read_csv("review_matrix.csv", dtype={'userId': np.int32, 'movieId': np.int32, 'rating': np.uint8})
# clean the data
#data_table.drop('timestamp', axis=1, inplace=True)
#data_table.to_csv("ratings.csv", na_rep="")
# Create a web page view of the data for easy viewing
html = data_table.to_html()
# Save the html to a temporary file
with open("review_matrix.html", "w", encoding='utf-8') as f:
f.write(html)
# Open the web page in our web browser
full_filename = os.path.abspath("review_matrix.html")
webbrowser.open("file://{}".format(full_filename)) |
import numpy as np
import pandas as pd
def process_data(data, timePortion) :
data = data.iloc[::-1]
size = int(len(data) / len(data.columns))
trainX = []
trainY = []
features = []
days = []
for i in range(0, size):
v = float(data['Close'][i].replace('$','').replace(' ',''))
features.append(v)
days.append(data['Date'][i])
# Scale the values
scaledData = minMaxScaler(features, np.min(features), np.max(features))
scaledFeatures = scaledData["data"]
try :
for i in range(timePortion, timePortion + size) :
for j in range(i - timePortion, i):
trainX.append(scaledFeatures[j])
trainY.append(scaledFeatures[i])
except Exception as ex:
print(ex)
return {
"size": (size - timePortion),
"timePortion": timePortion,
"trainX": trainX,
"trainY": trainY,
"min": scaledData["min"],
"max": scaledData["max"],
"originalData": features,
'date' : days
}
def minMaxScaler (data, min, max) :
scaled_data = (data - min) / (max - min)
return {
"data" : scaled_data,
"min" : min,
"max" : max
}
def minMaxInverseScaler(data, min, max) :
scaledData = data * (max - min) + min
return {
"data": scaledData,
"min": min,
"max": max
}
def generate(filename, timeportion) :
v = pd.read_csv(filename)
ret = process_data(v, timeportion)
print (ret)
return ret
if __name__ == "__main__":
v = generate('./appl.csv')
print (v)
|
from django.shortcuts import render
from django.http import HttpResponse
from website import forms
def index(request):
from website.models import Product, Order
context = dict()
context["orderTable"] = Order.objects.values()
context["productTable"] = Product.objects.values()
newEditTransactionsForm = forms.newEditTransactionsForm()
context["newEditTransactionsForm"] = newEditTransactionsForm
newEditStockForm = forms.newEditStockForm()
context["newEditStockForm"] = newEditStockForm
return render(request, 'storeManagement/management.html', context)
def saveOrder(request):
from website.models import Order
if request.method == 'POST':
formset = forms.newEditTransactionsForm(request.POST, request.FILES)
if formset.is_valid():
formset.save()
return HttpResponse('saved!')
def saveProduct(request):
from website.models import Product
if request.method == 'POST':
f = forms.newEditStockForm(request.POST, request.FILES)
if f.is_valid():
f.save()
return HttpResponse('saved!')
else:
return HttpResponse(str(f.errors))
return HttpResponse('error!')
def editOrder(request):
from website.models import Order
from dateutil.parser import parse
dateTimeString = request.POST['order_date']
parsedString = parse(dateTimeString)
request.POST['order_date'] = parsedString
if request.method == 'POST':
idToEdit = int(request.POST['hiddenInput'])
instance = Order.objects.get(pk=idToEdit)
f = forms.newEditTransactionsForm(request.POST, request.FILES, instance=instance)
if f.is_valid():
f.save()
return HttpResponse('saved!')
else:
return HttpResponse(str(f.errors))
return HttpResponse('error!')
def editProduct(request):
from website.models import Product
if request.method == 'POST':
idToEdit = int(request.POST['hiddenInput'])
instance = Product.objects.get(pk=idToEdit)
f = forms.newEditStockForm(request.POST, request.FILES, instance=instance)
if f.is_valid():
f.save()
return HttpResponse('saved!')
else:
return HttpResponse(str(f.errors))
return HttpResponse('error!')
def removeOrder(request):
from website.models import Order
if request.method == 'POST':
idToEdit = int(request.POST['hiddenInput'])
instance = Order.objects.get(pk=idToEdit)
instance.delete()
return HttpResponse("saved!")
return HttpResponse('error!')
def removeProduct(request):
from website.models import Product
if request.method == 'POST':
idToEdit = int(request.POST['hiddenInput'])
instance = Product.objects.get(pk=idToEdit)
instance.delete()
return HttpResponse("saved!")
return HttpResponse('error!') |
import bfs
import fire
# proceed from top-left to bottom right
def run(maze, flammability):
start = (0, 0)
end = (maze.height-1, maze.width-1)
# setup the fire
fire.add_fire(maze, flammability)
# calculate the first path
path = []
bfs.bfs(path, maze, start, end)
if len(path) == 0: # No path found
return None
# step on the path, spread fire, then create a new path
while True:
# if we stepped into fire, fail
if maze.maze[ path[1][0] ][ path[1][1] ] != 0:
return False
# if we reached the end, pass
if path[1] == end:
return True
# spread the fire
fire.advance_fire_one_step(maze)
# if fire moved onto us, fail
if maze.maze[ path[1][0] ][ path[1][1] ] != 0:
return False
# recalculate path
bfs.bfs(path, maze, path[1], end)
if len(path) == 0: # No path found
return False
return
# example usage of strategy_2.py
"""
import maze
m = maze.Maze(10, 10, .15)
print( run(m, .5) )
""" |
from django.shortcuts import render
from django.views.generic import TemplateView, ListView, DetailView, DeleteView
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.contrib.auth.forms import UserCreationForm, User
from django.urls import reverse_lazy, reverse
from rest_framework.response import Response
from sherlock.forms import ContactForm, BirthdateSearchForm
from rest_framework.views import APIView
from sherlock.models import Profile, About, Relative, Image
from haystack.generic_views import FacetedSearchView
from haystack.query import SearchQuerySet
from haystack.utils import Highlighter
from haystack.forms import SearchForm
from django.core.paginator import InvalidPage, Paginator
from django.conf import settings
from django.core.mail import send_mail
class UserCreateView(CreateView):
model = User
form_class = UserCreationForm
success_url = reverse_lazy("login")
class IndexView(TemplateView):
template_name = "index.html"
class ProfileDetailView(DetailView):
model = Profile
class MyProfileDetailView(ProfileDetailView):
def get_object(self):
return self.request.user.profile
class ProfileUpdateView(UpdateView):
model = Profile
# success_url = reverse_lazy('profile_detail_view')
fields = ('picture', 'first_name', 'middle_name', 'last_name', 'gender', )
success_url = reverse_lazy('my_profile_detail_view')
def get_queryset(self):
return Profile.objects.filter(user=self.request.user)
class AboutUpdateView(UpdateView):
model = About
success_url = reverse_lazy('profile_detail_view')
fields = ('birthdate', 'city_of_birth', 'state_of_birth',
'country_of_birth', 'sex_at_birth', 'eye_color', 'mother_first_name', 'mother_maiden_name',
'mother_last_name', 'father_first_name', 'father_last_name', 'birth_hospital', 'searching_for',
'biography',)
def get_queryset(self):
return About.objects.filter(user=self.request.user)
def get_success_url(self, **kwargs):
return reverse_lazy('profile_detail_view', args=[int(self.kwargs['pk'])])
class ContactUsView(TemplateView):
template_name = "contact.html"
def get_context_data(self):
context = super().get_context_data()
context["form"] = ContactForm()
return context
class SendEmailView(FormView):
form_class = ContactForm
success_url = reverse_lazy("index_view")
def form_valid(self, form):
form.send_email()
return super().form_valid(form)
class BirthdateSearchView(SearchForm):
form_class = BirthdateSearchForm
class AboutUsView(TemplateView):
template_name = "about.html"
class ImageAllView(TemplateView):
template_name = "images.html"
def get_context_data(self):
context = super().get_context_data()
context["image"] = Image.objects.all()
return context
class ImageAddView(CreateView):
model = Image
fields = ('picture', 'description', )
def form_valid(self, form):
instance = form.save(commit=False)
instance.user = self.request.user
return super().form_valid(form)
def get_success_url(self, *args, **kwargs):
return reverse('profile_detail_view', args=[self.request.user.profile.id])
class ImageUpdateView(UpdateView):
model = Image
fields = ('picture', 'description', )
def get_queryset(self):
return Image.objects.filter(user=self.request.user)
def get_success_url(self, *args, **kwargs):
return reverse('profile_detail_view', args=[self.request.user.profile.id])
class ImageDeleteView(DeleteView):
model = Image
fields = ('picture', 'description', )
def get_queryset(self):
return Image.objects.filter(user=self.request.user)
def get_success_url(self, *args, **kwargs):
return reverse('profile_detail_view', args=[self.request.user.profile.id])
|
# -*- coding: utf-8 -*-
import serial, codecs, time
port = None
def writeAndRead(port,data):
ser = serial.Serial(port=port,baudrate=9600,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS)
ser.flush()
print(codecs.encode(bytes(data),'hex'))
ser.write(bytes(data))
time.sleep(0.5)
response = ser.read(24)
print(codecs.encode(response,'hex'))
ser.close()
return response
def open(p):
global port
port = serial.Serial(port=p,baudrate=9600,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS)
port.flush()
def close():
global port
if port:
port.close()
port = None
def write(data):
global port
port.write(bytes(data))
def readS(size):
global port
return port.read(size)
def read():
global port
return port.read(port.inWaiting())
|
import sys
import random
import numpy as np
from shapely.geometry import Polygon, MultiPolygon, LineString, MultiLineString, Point, LinearRing
from shapely.ops import polygonize, cascaded_union
from scipy.spatial.qhull import Delaunay
from crowddynamics.simulation.agents import Agents, AgentGroup, Circular
from crowddynamics.core.geometry import geom_to_linear_obstacles
from crowddynamics.core.sampling import triangle_area_cumsum, random_sample_triangle
from crowddynamics.core.vector2D import length
from crowddynamics.core.distance import distance_circle_line, distance_circles
# Use the finlandia_talo_ga file, which contains the class for FinlandiaTalo2ndFloor, used with the genetic algorithm
from simple_scenario_ga_stochastic import SimpleScenarioFloor
# A helper function to create spawn points for leaders out of their cell coordinates.
# The input is the array of leader spawn cells and the number of leaders in the simulation.
#def generate_leader_pos(cell, n_lead, seed_number):
def generate_leader_pos(self, cell, n_lead):
# Load data of followers
followers = np.load('agents_initialization_simple.npy')
follower_positions = followers['position']
follower_radii = followers['radius']
# Minimal radius of a guide (the same value given in agents.py to the guides).
max_r = 0.27
# Number of times spawned leaders are allowed to overlap each other before the program is
# terminated.
overlaps = 10000
# Import Simple Scenario floor field
field = SimpleScenarioFloor().field
# Bound box representing the room. Used later in making Voronoi tessalation.
width = 30
height = 30
# Create a grid structure over the room geometry.
# Cell size in the grid, determines the resolution of the micro-macro converted data
cell_size = 3
m = np.round(width / cell_size)
n = np.round(height / cell_size)
m = m.astype(int)
n = n.astype(int)
X = np.linspace(0, width, m + 1)
Y = np.linspace(0, height, n + 1)
hlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], X[1:]) for yi in Y]
vlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]
grids = list(polygonize(MultiLineString(hlines + vlines)))
# Leaders' spawn areas
leader_spawns = []
# Leader's spawn points
spawn_points = []
# Loop through the cells and calculate intersections with spawn areas.
for i in range(n_lead):
poly = field.domain.intersection(grids[cell[i]])
if not poly.is_empty:
leader_spawns.append(poly)
# Import obstacles
obstacles = field.obstacles
# Spawn a random position from the starting area.
# Loop through all the leaders.
# (1) Take into account that there might be obstacles in the spawn areas, and take also
# into account that agents have a buffer radius.
# (2) If the spawn area is a MultiPolygon, loop through the polygons in a MultiPolygon. Create a
# mesh grid of the spawn area with Delaunay triangulation.
# (2.1) Spawn a random point from the mesh grid.
# (2.2) Check that the position doesn't interfere with other agents' positions
# (2.3) Set the Boolean value for if the leader is initially inside the Finlandiahall
# (this is needed for the movement simulation).
# (3) If the spawn area is not a MultiPolygon, just directly create a mesh grid of the spawn area
# with Delaunay triangulation.
# (3.1) Spawn a random point from the mesh grid.
# (3.2) Check that the position doesn't interfere with other agents' positions
# (3.3) Set the Boolean value for if the leader is initially inside the Finlandiahall (this is
# is needed for the movement simulation).
for i in range(n_lead):
seed = 0
# (1)
n_spawnpoints = len(spawn_points)
geom = leader_spawns[i] - obstacles.buffer(max_r)
j = 0 # set overlaps counter to zero
# (2)
if isinstance(geom, MultiPolygon):
n_polygons = len(geom)
for j in range(n_polygons):
vertices = np.asarray(geom[j].convex_hull.exterior)
delaunay = Delaunay(vertices)
mesh = vertices[delaunay.simplices]
if j == 0:
meshes = mesh
else:
meshes = np.concatenate((mesh, meshes), axis=0)
# Computes cumulative sum of the areas of the triangle mesh.
weights = triangle_area_cumsum(meshes)
weights /= weights[-1]
while j < overlaps:
seed += 1
distances = [] # temporarily store distances from the spawned point to the previously spawned
n_overlaps = 0 # for each attempt to position the guide, set number of overlaps to zero
# (2.1) Spawn a random point for the guide.
np.random.seed(seed)
x = np.random.random()
k = np.searchsorted(weights, x)
a, b, c = meshes[k]
# spawn_point = random_sample_triangle(a, b, c)
spawn_point = random_sample_triangle(a, b, c)
# (2.2)
if n_spawnpoints != 0: # if there are no other spawned guides skip this step
for k in range(0, n_spawnpoints):
d = length(spawn_point - spawn_points[k])
h = d - 2 * max_r
distances.append(h)
distances_array = distances
distances_array = np.asarray(distances_array)
n_overlaps += len(np.where(distances_array < 0)[0])
for obstacle in obstacles:
obstacle = list(obstacle.coords)
n_obstacle_points = len(obstacle)
for k in range(0, n_obstacle_points):
if k == n_obstacle_points - 1:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[k]),
np.asarray(obstacle[0]))
else:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[k]),
np.asarray(obstacle[k + 1]))
if h < 0.0:
n_overlaps += 1
for agent in range(len(follower_radii)):
h, _ = distance_circles(follower_positions[agent], follower_radii[agent], spawn_point, max_r)
if h < 0.0:
n_overlaps += 1
if n_overlaps == 0:
# (2.3)
# Append the point to spawn points
spawn_points.append([spawn_point[0], spawn_point[1]])
# print("Guide spawned")
# sys.stdout.flush()
break
j += 1
if j == overlaps:
raise Exception('Leaders do not fit in the cell')
# (3)
else:
vertices = np.asarray(geom.convex_hull.exterior)
delaunay = Delaunay(vertices)
mesh = vertices[delaunay.simplices]
weights = triangle_area_cumsum(mesh)
weights /= weights[-1]
while j < overlaps:
seed += 1
distances = [] # temporarily store distances from the spawned point to the previously spawned
n_overlaps = 0 # for each attempt to position the guide, set number of overlaps to zero
# (3.1) Spawn a random point for the guide
np.random.seed(seed)
x = np.random.random()
k = np.searchsorted(weights, x)
a, b, c = mesh[k]
# spawn_point = random_sample_triangle(a, b, c)
spawn_point = random_sample_triangle(a, b, c)
if n_spawnpoints != 0:
for k in range(0, n_spawnpoints):
d = length(spawn_point - spawn_points[k])
h = d - 2 * max_r
distances.append(h)
distances_array = distances
distances_array = np.asarray(distances_array)
n_overlaps += len(np.where(distances_array < 0)[0])
for obstacle in obstacles:
obstacle = list(obstacle.coords)
n_obstacle_points = len(obstacle)
for k in range(0, n_obstacle_points):
if k == n_obstacle_points - 1:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[k]),
np.asarray(obstacle[0]))
else:
h, _ = distance_circle_line(spawn_point, max_r, np.asarray(obstacle[k]),
np.asarray(obstacle[k + 1]))
if h < 0.0:
n_overlaps += 1
for agent in range(len(follower_radii)):
h, _ = distance_circles(follower_positions[agent], follower_radii[agent], spawn_point, max_r)
if h < 0.0:
n_overlaps += 1
if n_overlaps == 0:
# (3.3)
# Append the point to spawn points
spawn_points.append([spawn_point[0], spawn_point[1]])
# print("Guide spawned")
# sys.stdout.flush()
break
j += 1
if j == overlaps:
raise Exception('Leaders do not fit in the cell')
return spawn_points
def attributes(self, familiar, has_target: bool = True, is_follower: bool = True):
def wrapper():
target = familiar if has_target else NO_TARGET
orientation = np.random.uniform(-np.pi, np.pi)
d = dict(
target=target,
is_leader=not is_follower,
is_follower=is_follower,
body_type=self.body_type,
orientation=orientation,
velocity=np.zeros(2),
angular_velocity=0.0,
target_direction=np.zeros(2),
target_orientation=orientation,
familiar_exit=familiar,
)
return d
return wrapper
def attributes_leader(self, target_iter, has_target: bool = True, is_follower: bool = False):
def wrapper():
target = next(target_iter)
orientation = np.random.uniform(-np.pi, np.pi)
d = dict(
target=target,
is_leader=not is_follower,
is_follower=is_follower,
body_type=self.body_type,
orientation=orientation,
velocity=np.zeros(2),
angular_velocity=0.0,
target_direction=np.zeros(2),
target_orientation=orientation,
familiar_exit=4,
)
return d
return wrapper
# The "objective function", i.e., the evacuation simulator, which returns the total evacuation time.
def run(individual, n_leaders, seed):
#print('hep')
# Import simulation
simulation = SimpleScenarioFloor()
#simulation.data['seed'] = seed
# Import Finlandia Building floor field
field = simulation.field
# Generate iterators for group of leaders.
target_exits = [] # Target exit for the leader
cells = []
if n_leaders > 0:
for i in range(n_leaders):
target_exits.append(individual[i][0])
cells.append(individual[i][1])
# Number of followers
size_spawn1 = 25
size_spawn2 = 25
size_spawn3 = 25
size_spawn4 = 25
size_spawn5 = 25
size_spawn6 = 25
# Add followers.
# Followers in Spawn 1
group_follower_spawn1 = AgentGroup(
agent_type=Circular,
size=size_spawn1,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn1",
group_follower_spawn1,
position_gen=False,
position_iter=iter([]),
spawn=0,
obstacles=geom_to_linear_obstacles(field.obstacles))
# Followers in Spawn 2
group_follower_spawn2 = AgentGroup(
agent_type=Circular,
size=size_spawn2,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn2",
group_follower_spawn2,
position_gen=False,
position_iter=iter([]),
spawn=1,
obstacles=geom_to_linear_obstacles(field.obstacles))
# Followers in Spawn 3
group_follower_spawn3 = AgentGroup(
agent_type=Circular,
size=size_spawn3,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn3",
group_follower_spawn3,
position_gen=False,
position_iter=iter([]),
spawn=2,
obstacles=geom_to_linear_obstacles(field.obstacles))
# Followers in Spawn 4
group_follower_spawn4 = AgentGroup(
agent_type=Circular,
size=size_spawn4,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn4",
group_follower_spawn4,
position_gen=False,
position_iter=iter([]),
spawn=3,
obstacles=geom_to_linear_obstacles(field.obstacles))
# Followers in Spawn 5
group_follower_spawn5 = AgentGroup(
agent_type=Circular,
size=size_spawn5,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn5",
group_follower_spawn5,
position_gen=False,
position_iter=iter([]),
spawn=4,
obstacles=geom_to_linear_obstacles(field.obstacles))
# Followers in Spawn 6
group_follower_spawn6 = AgentGroup(
agent_type=Circular,
size=size_spawn6,
attributes=attributes(simulation, familiar=0, has_target=True, is_follower=True))
simulation.agents.add_non_overlapping_group(
"group_follower_spawn6",
group_follower_spawn6,
position_gen=False,
position_iter=iter([]),
spawn=5,
obstacles=geom_to_linear_obstacles(field.obstacles))
if n_leaders > 0:
# generate_leader_pos() should check that guides are not spawned in unfeasible positions
init_pos = generate_leader_pos(simulation, cells, n_leaders)
print(init_pos)
target_exits = iter(target_exits)
init_pos = iter(init_pos)
# Add leaders.
# NB! If there are multiple leaders, the function that is set to create the leaders should check that the leaders do
# not overlap each other.
group_leader = AgentGroup(
agent_type=Circular,
size=n_leaders,
attributes=attributes_leader(simulation, target_iter=target_exits, has_target=True, is_follower=False))
# If it is not taken care before hand that leaders can't overlap, the function will terminate here.
simulation.agents.add_non_overlapping_group(
"group_leader",
group_leader,
position_gen=True,
position_iter=init_pos,
spawn=0,
obstacles=geom_to_linear_obstacles(field.obstacles))
np.random.seed(seed)
simulation.update()
#print(len(simulation.agents.array))
#sys.stdout.flush()
simulation.run()
#print(np.sum(simulation.agents.array['target_reached']))
print(simulation.data['time_tot'])
return simulation.data['time_tot']
if __name__ == '__main__':
arguments = sys.argv
arguments = arguments[1:]
gene_data = arguments[:-1]
# Seed number
seed = int(arguments[len(arguments)-1])
gene_data = arguments[:-1]
# Cells of guides
cells_data = gene_data[0::2]
n_guides = len(cells_data) # Number of guides
cells_data = [int(cells_data[i]) for i in range(n_guides)]
# Exits of guides
exits_data = gene_data[1::2]
exits_data = [int(exits_data[i]) for i in range(n_guides)]
cells=[]
exits=[]
for i in range(n_guides):
cells.append(cells_data[i])
exits.append(exits_data[i])
# Number of guides
n_guides = len(cells)
# Run the evacuation simulation
if n_guides == 0:
run([[],[]], 0, seed)
if n_guides == 1:
run([[exits[0], cells[0]]], 1, seed)
elif n_guides == 2:
run([[exits[0], cells[0]], [exits[1], cells[1]]], 2, seed)
elif n_guides == 3:
run([[exits[0], cells[0]], [exits[1], cells[1]], [exits[2], cells[2]]], 3, seed)
elif n_guides == 4:
run([[exits[0], cells[0]], [exits[1], cells[1]], [exits[2], cells[2]], [exits[3], cells[3]]], 4, seed)
elif n_guides == 5:
run([[exits[0], cells[0]], [exits[1], cells[1]], [exits[2], cells[2]], [exits[3], cells[3]], [exits[4], cells[4]]], 5, seed)
elif n_guides == 6:
run([[exits[0], cells[0]], [exits[1], cells[1]], [exits[2], cells[2]], [exits[3], cells[3]], [exits[4], cells[4]], [exits[5], cells[5]]], 6, seed)
|
import unittest
from katas.kyu_7.bug_fixing_algorithmic_predicament import highest_age
class HighestAgeTestCase(unittest.TestCase):
def setUp(self):
self.g1 = [{'name': 'kay', 'age': 1}, {'name': 'john', 'age': 13},
{'name': 'kay', 'age': 76}]
self.g2 = [{'name': 'john', 'age': 1}, {'name': 'alice', 'age': 76}]
def test_equals(self):
self.assertEqual(highest_age(self.g1, [
{'name': 'john', 'age': 1}, {'name': 'alice', 'age': 77}
]), 'alice')
def test_equals_2(self):
self.assertEqual(highest_age(self.g1, self.g2), 'kay')
def test_equals_3(self):
self.assertEqual(highest_age([
{'name': 'kay', 'age': 1}, {'name': 'john', 'age': 130},
{'name': 'kay', 'age': 76}
], self.g2), 'john')
def test_equals_4(self):
self.assertEqual(highest_age([
{'name': 'kay', 'age': 1}, {'name': 'john', 'age': 130},
{'name': 'kay', 'age': 130}
], self.g2), 'john')
def test_equals_5(self):
self.assertEqual(highest_age([
{'name': 'kay', 'age': 2}, {'name': 'john', 'age': 130},
{'name': 'kay', 'age': 130}
], self.g2), 'kay')
|
from objectBase import ObjectBase
from time_class import Time
class Car(ObjectBase):
"""
Car class.
Parent object is stream to which car belongs,
as it defines origin and destination
"""
def __init__(self, id_=None, parent_object=None):
super().__init__(id_=id_, parent_object=parent_object)
# Arrival and departure events for later calculations
self.arrival = None
self.departing = None
self.departed = None
self.time_in_system = Time()
def get_stream(self):
"""Method returns car's stream"""
return self.parent_object
def get_time_in_system(self):
"""Method returns car's time in system"""
return self.time_in_system
def get_wait_time(self):
"""Method returns car's wait time in queue"""
return self.wait_time
def has_arrived(self):
return self.arrival.is_executed()
def has_departed(self):
if self.departed is None:
return False
return self.departed.is_executed()
def is_awaiting(self):
return self.has_arrived() and not self.has_departed() and not self.is_departing()
def is_departing(self):
if self.departing is None:
return False
return self.is_departing()
def is_first_in_queue(self):
"""Method checks if car is first in queue"""
if self is self.parent_object.get_queue_first_car():
return True
else:
return False
def move_to_intersection(self):
"""
Method moves car onto the intersection
so the car can drive through it
"""
self.parent_object.move_car_to_intersection(self)
def remove_from_intersection(self):
"""
Method removes car from the intersection
simulating driving through it
"""
self.parent_object.remove_car_from_intersection(self)
def move_to_queue(self):
self.parent_object.add_to_queue(self)
def set_arrival_event(self, event):
"""Method sets arrival event"""
self.arrival = event
def set_departing_event(self, event):
"""Method sets departing event"""
self.departing = event
def set_departure_event(self, event):
"""Method sets departure event"""
self.departed = event
def calculate_time_in_system(self):
"""
Method calculates time spent in system
from arrival to full departure
"""
arrival_time = self.arrival.get_event_time()
if self.departed is not None:
departed_time = self.departed.get_event_time()
self.time_in_system = departed_time - arrival_time
return self.get_time_in_system()
else:
current_time = self.get_current_time()
self.time_in_system = current_time - arrival_time
return self.get_time_in_system()
def calculate_wait_time(self):
"""Method calculates time spent in queue"""
arrival_time = self.arrival.get_event_time()
if self.departing is not None:
departing_time = self.departing.get_event_time()
self.wait_time = departing_time - arrival_time
return self.get_wait_time()
else:
current_time = self.get_current_time()
self.wait_time = current_time - arrival_time
return self.get_wait_time()
|
import dash_bootstrap_components as dbc
import pandas as pd
df = pd.DataFrame(
{
("Score", "Max"): {
"Arthur Dent": 6.0,
"Ford Prefect": 4.0,
"Zaphod Beeblebrox": 1.0,
"Trillian Astra": 3.0,
},
("Score", "Average"): {
"Arthur Dent": 2.0,
"Ford Prefect": 2.0,
"Zaphod Beeblebrox": 0.7,
"Trillian Astra": 1.9,
},
}
)
df.index.set_names("Name", inplace=True)
table = dbc.Table.from_dataframe(
df, striped=True, bordered=True, hover=True, index=True
)
|
from typing import Set, Optional, List
from pydantic import BaseModel, Field, HttpUrl
class Image(BaseModel):
url: HttpUrl
name: str
class Item(BaseModel):
name: str
description: Optional[str] = Field(
None, title="The description of the item", max_length=300
)
price: float = Field(..., gt=0,
description="The price must be greater than zero")
tax: Optional[float] = 10.5
tags: Set[str] = set()
image: Optional[List[Image]] = None
class Config:
schema_extra = {
"example": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
"tags": { "test1", "test2" }
}
}
class Offer(BaseModel):
name: str
description: Optional[str] = None
price: float
items: List[Item]
|
from flask import Flask, render_template
import db
app = Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/fipe_second', methods = ['POST', 'GET'])
def fipe_second():
if request.method == 'POST':
dados = request.form
nome = result_fipe2['veiculo']
ItensCar.car_name_brand = car_name_brand
var = Progpy.name_car(ItensCar.var, ItensCar.car_name_brand)
ItensCar.id_code = var[-1]
return render_template('fipe_second.html', var=var, car_name_brand =car_name_brand )
if __name__ == '__main__':
#app.run(host='0.0.0.0', port=8080, debug=True)
app.run(port=8080, debug=True)
|
"""
Created by Alex Wang on 20170620
"""
from alexutil.logutil import LogUtil
import traceback
logger = LogUtil()
def except_test():
try:
fh = open("abc")
except Exception as e:
logger.error(e)
traceback.print_exc()
logger.error(traceback.format_exc())
def test():
logger.info("test log")
except_test() |
# 没什么技术含量
class Solution:
def countGoodRectangles(self, rectangles: List[List[int]]) -> int:
maxv, cnt = -1, 0
for l, v in rectangles:
minv = min(l, v)
if minv > maxv:
cnt = 1
maxv = minv
continue
if minv == maxv:
cnt += 1
return cnt |
from django.contrib import admin
from .models import User
from .models import student_info
from .models import familyBackground_info
from .models import educationalBackground_Info
admin.site.register(User)
admin.site.register(student_info)
admin.site.register(familyBackground_info)
admin.site.register(educationalBackground_Info)
# Register your models here.
|
from django.contrib import admin
from .models import Application, Client
@admin.register(Application)
class ApplicationAdmin(admin.ModelAdmin):
list_display = (
'pk', 'date_created', 'product', 'client', 'decision', 'decision_comment')
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('pk', 'first_name', 'last_name', 'phone')
|
"""
Livestock Heat transfer - Transmission. Calculates the transmission losses and gains of various geometries under stationary conditions
-----
Please notice only type of geometry type is allowed as input
-----
LIVESTOCK made by Kristoffer Negendahl, DTU.BYG
Args:
Geo: List of Surfaces, Breps or Meshes
U: List of U-values [W/(m2K)] for each Geometry (if only one U, all Geometries will be evaluated on the single U-value)
Ti: Temperature (C) on the inner side of the geometry (20C is set as default)
To: Temperature (C) on the outside of the geometry (-12C is set as default)
Returns:
Q: Output the transmission heat balance [kW] - if negative the transmission is considered as a loss from inside to the outside, and if positive the transmission is a gain from outside to the inside
Qi: Output the individual geometry transmission heat balance [W]
"""
# Add the message thingie underneath the component
ghenv.Component.Name = 'Livestock_Heat_Transmission'
ghenv.Component.NickName = 'Livestock Heat Transmission'
ghenv.Component.Message = 'Transmission v.0.1'
# Importing classes and modules
import Rhino as rc
# Setting defaults
U_def = [0]
Ti_def = 20
To_def = -12
### Ordering inputs into dedicated lists
## Supports three types of geometry
srfs = []
breps = []
meshes = []
for g in Geo:
#print type(g)
if isinstance(g,rc.Geometry.Surface): #is surface
#print("a surface")
srfs.append(g)
elif isinstance(g,rc.Geometry.Brep): #is Brep
#print("a brep")
breps.append(g)
elif isinstance(g,rc.Geometry.Mesh): #is Mesh
#print("a mesh")
meshes.append(g)
else:
print("not a geometry I can work with")
# Output relevant information
noSrfs = str(len(srfs))
noBreps = str(len(breps))
noMeshes = str(len(meshes))
print "Transmission from {} surfaces,".format(noSrfs), "{} breps,".format(noBreps), "{} meshes".format(noMeshes)
# Function that calucalates an area of a geometry
def area(list_of_geometry):
area = []
areaerror = []
#try: # -- can't use this - only works on breps and srfs
#for i in range(len(list_of_geometry)):
#area.append(list_of_geometry[i].GetArea())
#except:
#print "Geometry had no GetArea properties"
try:
for i in range(len(list_of_geometry)):
area.append(rc.Geometry.AreaMassProperties.Compute(list_of_geometry[i]).Area)
areaerror.append(rc.Geometry.AreaMassProperties.Compute(list_of_geometry[i]).AreaError)
except:
print "Geometry had no AreaMassProperties properties"
#print(areaerror)
return area
# Function that calculates the transmission balance
def transm(areas, Uvals, ti, to):
qli = []
if len(areas) > 1 and len(areas) == len(Uvals): #when multiple Uvals
for i in range(len(areas)):
qli.append(areas[i]*Uvals[i]*(to-ti))
else: #when only one Uval
for i in range(len(areas)):
qli.append(areas[i]*Uvals[0]*(to-ti))
Qi = qli
Q = round(sum(qli)/1000,2)
return Q, Qi
# Setting defaults if nothing else is assigned
if not U:
U = U_def
if not Ti:
Ti = Ti_def
if not To:
To = To_def
# Check geometry lists and assign area function
if srfs:
try: A = area(srfs)
except: print("not a geometry I can work with")
elif breps:
try: A = area(breps)
except: print("not a geometry I can work with")
elif meshes:
try: A = area(meshes)
except: print("not a geometry I can work with")
# Calculate heat transmission
Transmission = transm(A, U, Ti, To)
Q = Transmission[0]
Qi = Transmission[1]
|
from CSVinfo import *
import re
class MemoryData:
'''
This class contains only static methods. Methods name are descriptive of their function.
Additional required information, wherever necessary, has been specified.
'''
def get_memory_price(row):
return float(re.findall("\d+\.\d+", row[MEMORY_PRICES])[0])
def get_memory_performance_score(row):
return MemoryData.get_memory_cas_latency(row) + \
(10 if MemoryData.is_memory_ecc_supported(row) else 0) + \
(10 if MemoryData.is_memory_heat_spreader_supported(row) else 0) + \
abs(MemoryData.get_memory_size(row) * 5) + \
(abs(MemoryData.get_memory_ddr4_speed(row) / 100) if MemoryData.is_memory_ddr4(row) \
else abs(MemoryData.get_memory_ddr3_speed(row) / 100))
def extract_num_data(col, start, str):
'''
Returns the value in 'col' as a float. The value is converted starting from the index
'start' and ending at the index before the first occurence of 'str'.
'''
if str not in col:
return 0
return float(col[start:col.find(str)])
def get_memory_cas_latency(row):
return float(row[MEMORY_CAS_LATENCY])
def is_memory_ecc_supported(row):
return True if row[MEMORY_ECC] == 'Yes' else False
def is_memory_heat_spreader_supported(row):
return True if row[MEMORY_HEAT_SPREADER] == 'Yes' else False
def get_memory_size(row):
return MemoryData.extract_num_data(row[MEMORY_SIZE], 0, 'G')
def is_memory_ddr4(row):
return True if row[MEMORY_IS_DDR4] == 'FALSE' else False
def get_memory_ddr3_speed(row):
return float(row[MEMORY_DDR3_SPEED])
def get_memory_ddr4_speed(row):
return float(row[MEMORY_DDR4_SPEED])
def get_memory_speed(row):
return float(row[MEMORY_DDR_SPEED])
def get_memory_name(row):
return row[MEMORY_NAME] |
#!/usr/bin/python3
'''Convert point cloud from velodyne to h5.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import h5py
import argparse
import numpy as np
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--folder', '-f', help='Path to data folder')
args = parser.parse_args()
print(args)
# batch_size = 2048
batch_size = 64 #几帧点云打包成一个h5文件
sample_num = 2048 #单帧采样点数
# 数据文件夹
folder_mynet = args.folder if args.folder else '../../data' # 类似于c语言的 ? : 表达式
train_test_folders = ['train', 'test']
data = np.zeros((batch_size, sample_num, 3)) # 3要根据点云类型进行修改,表示通道数
label = np.zeros((batch_size), dtype=np.int32)
for folder in train_test_folders: # 获取数据文件夹下的train和test子文件夹
folder_pts = os.path.join(folder_mynet, folder)
filename_filelist_h5 = os.path.join(folder_mynet, '%s_files.txt' % folder) # 存 文件名列表 的文件
idx_h5 = 0
for label_id in os.listdir(folder_pts):
folder_label = os.path.join(folder_pts, label_id) # 获取各标签文件夹路径
filelist = os.listdir(folder_label) # 所有点云文件名列表
for idx_pts, filename in enumerate(filelist): # enumerate能同时返回idx和元素,因为batch_size需要文件编号
filename_pts = os.path.join(folder_label, filename) # 点云所在路径
with open(filename_pts) as f: # 读取pcd文件
xyzi_array = np.array([ [float(value) for value in line.split(' ')]
for line in f.readlines()[11:len(f.readlines())-1]]) # 从12行到末尾
'''
np.random.shuffle(xyzi_array) # 打乱点云中的点,为了随机采样
pt_num = xyzi_array.shape[0] # 当前帧总点数
indices = np.random.choice(pt_num, sample_num, replace=(pt_num < sample_num)) # 若总点数少于采样数则进行重复采样
points_array = xyzi_array[indices] # 取得采样后的点云数据
#points_array[..., 3:] = points_array[..., 3:]/255 - 0.5 # normalize colors
'''
idx_in_batch = idx_pts % batch_size
#data[idx_in_batch, ...] = points_array
data[idx_in_batch, ...] = xyzi_array
label[idx_in_batch] = int(label_id)
# 打包为hdf5
if ((idx_pts + 1) % batch_size == 0) or idx_pts == len(filelist) - 1:
item_num = idx_in_batch + 1
filename_h5 = os.path.join(folder_mynet, '%s_%d.h5' % (folder, idx_h5))
print('{}-Saving {}...'.format(datetime.now(), filename_h5))
with open(filename_filelist_h5, 'a') as filelist_h5: #准备写入训练测试的文件名列表
filelist_h5.write('./%s_%d.h5\n' % (folder, idx_h5))
file = h5py.File(filename_h5, 'w')
file.create_dataset('data', data=data[0:item_num, ...])
file.create_dataset('label', data=label[0:item_num, ...])
file.close()
idx_h5 = idx_h5 + 1
if __name__ == '__main__':
main()
|
import logging
from gensim import corpora, models, similarities
class TopicModelling:
def topicModelling(self, documents):
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
dictionary = corpora.Dictionary(documents)
corpus = [dictionary.doc2bow(document) for document in documents]
corpora.WikiCorpus.serialize |
"""Given an array of integers, return a new array such
that each elemnt at index i of the new array is the product of all
the numbers in the original array expect the one at i
This Solution runs in O(n) time and O(n) space
"""
def products(nums):
inorderProduct = []
reverseProduct = []
outarr = []
for num in nums:
if inorderProduct:
inorderProduct.append(inorderProduct[-1] * num)
else:
inorderProduct.append(num)
for num in reversed(nums):
if reverseProduct:
reverseProduct.append(reverseProduct[-1] * num)
else:
reverseProduct.append(num)
reverseProduct.reverse()
for i in range(len(nums)):
if i == 0:
outarr.append(reverseProduct[1])
elif i == len(nums) - 1:
outarr.append(inorderProduct[i - 1])
else:
outarr.append(inorderProduct[i - 1] * reverseProduct[i + 1])
return outarr
assert products([1,2,3,4]) == [24,12,8,6]
|
#!/home/sxl1036/python/installing/python355/bin/python3
from bokeh.plotting import figure, output_file, show,save
output_file("structure.html")
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉") # subscript
SUP = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
p = figure(title='BeGeN2'.translate(SUB),plot_width=500,plot_height=300, y_axis_label='Energy (eV)')
import numpy as np
data =np.genfromtxt('BNDS.DAT', delimiter='\t')
nband=len(data[0,:])
#print (nband)
for i in range(nband-1):
p.line(data[:,0],data[:,i+1],line_width=1.5,color='red')
p.line([0,0],[-19,16])
p.line([1,1],[-19,16])
p.line([2.2202,2.2202],[-19,16])
p.line([3.2202,3.2202],[-19,16])
p.line([4.3673,4.3673],[-19,16])
p.line([5.5875,5.5875],[-19,16])
p.line([6.5875,6.5875],[-19,16])
p.line([7.7345,7.7345],[-19,16])
p.line([0,7.7345],[0,0])
p.line([0,7.7345],[-19,-19])
p.line([0,7.7345],[16,16])
GG=str(u'\u0393') #unicode of \Gamma
#print (GG)
p.xaxis.ticker = [ 0,1,2.2202,3.2202,4.3673,5.5875,6.5875,7.7345]
p.xaxis.major_label_overrides = {0:'X',1:GG,2.2202:'Z',3.2202:'U',4.3673:'R',5.5875:'S',6.5875:'Y',7.7345:GG}
save(p)
|
# Python Standard Libraries
# N/A
# Third-Party Libraries
from rest_framework import serializers
# Custom Libraries
from . import digger_model
from . import user_serializer
class DiggerSerializer(serializers.ModelSerializer):
amount_to_dig = serializers.FloatField(write_only=True)
depth_dug = serializers.FloatField(read_only=True)
dubloons_in_possession = serializers.IntegerField(read_only=True)
linked_user = user_serializer.UserSerializer(read_only=True)
class Meta:
model = digger_model.Digger
fields = "__all__"
|
# Given a non-empty, singly linked list with head node head, return a middle node of linked list.
# If there are two middle nodes, return the second middle node.
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printMiddle(self):
slow = self.head
fast = self.head
if self.head is not None:
while fast.next is not None and fast.next.next is not None:
fast = fast.next.next
slow = slow.next
print("The middle element is: ", slow.data)
list1 = LinkedList()
list1.push(10)
list1.push(5)
list1.push(4)
list1.push(2)
list1.push(3)
list1.push(1)
list1.printMiddle()
|
__version__ = "0.2"
def get_interface(interface_name):
if interface_name == "ctp":
from ctpbee_api.ctp import MdApi, TdApi
return MdApi, TdApi
elif interface_name == "ctp_mini":
from ctpbee_api.ctp_mini import MiniMdApi, MiniTdApi
return MiniMdApi, MiniTdApi
else:
raise ValueError("INTERFACE NOT SUPPORTED")
|
# Below is description given by proctor:
# Please write a function reverse_array() that returns the provided array in reverse order.
# NOTE: Do not use built-in array reversal functions
# Example:
# >>> a = ['a',2,3,9,5]
# >>> print(reverse_array(a))
# [5,9,3,2,'a']
def reverse_array(input_array):
# Initialize a new array.
# N -> Proctor noted it would've been more efficient to reverse in place instead of making a whole new array.
return_array = []
# Slice the array, going backwards, should functionally reverse it.
return_array = input_array[::-1]
# print("The reversed array kind of looks like this: " + str(return_array))
return return_array
reverse_array([1, 2, 3, 4, 5])
# Below is description given by proctor
# Please write a find_duplicates() function that takes
# in an array and makes a new array with any values that appear
# more than once. The returned array should not have any duplicates.
# Example:
# >>> a = [8, 1, 2, 3, 2, 4, 1, 1, 2]
# >>> print(find_duplicates(a))
# [1, 2]
def find_duplicates(input_array):
# Make a set, iterate through array, if you find duplicates, add to another array to return.
# N -> Also could've just used this set and returned it as an array. Less lines that way too.
duplicate_set = set()
# Initialize return array.
return_array = []
# Iterate through each element
for element in input_array:
# If not in set, add it.
if element not in duplicate_set:
duplicate_set.add(element)
else:
if element not in return_array:
return_array.append(element)
# print("The duplicates list looks like this: " + str(return_array))
return return_array
find_duplicates([8, 1, 2, 3, 2, 4, 1, 1, 2])
|
from django.contrib import admin
#Importamos nuestros modelos
from .models import Tipo_Comunicacion, Medio, Acciones, Comunicacion, Organismo
#Agregamos utilidades
class MedioAdmin(admin.ModelAdmin):
def has_module_permission(self, request):#Tiene permiso para mostrarlo?
return False #Aqui podriamos poner una logica de validacion compleja o simplemente> return request.user.is_superuser
class Tipo_ComunicacionAdmin(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class ComunicacionInline(admin.TabularInline):
model = Comunicacion
extra= 1
class AccionAdmin(admin.ModelAdmin):
search_fields = ['nombre', 'descripcion']
list_filter = ['importancia', 'departamento_id', 'organismo_ws']
readonly_fields = ['id_ws', 'nombre', 'descripcion', 'organismo_ws', 'estado_id', 'monto', 'financiacion_id', 'latitud', 'longitud', 'departamento_id', 'municipio_id', 'localidad_id', 'borrado', 'publicado', 'fecha_creacion']
inlines = [
ComunicacionInline,
]
# Register your models here.
admin.site.register(Tipo_Comunicacion, Tipo_ComunicacionAdmin)#registrado para poder editar en inline/ pero no visible por el FooAdmin
admin.site.register(Medio, MedioAdmin)#registrado para poder editar en inline/ pero no visible por el FooAdmin
admin.site.register(Acciones, AccionAdmin)
admin.site.register(Comunicacion)
admin.site.register(Organismo) |
from django import forms
from base.models import *
fancy = lambda: forms.TextInput(attrs={'class': 'derp'})
class AddressForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddressForm, self).__init__(*args, **kwargs)
for name, field in self.fields.items():
if (field.widget.__class__ == forms.widgets.TextInput or field.widget.__class__ == forms.widgets.Select):
if 'class' in field.widget.attrs:
print(field.widget.attrs)
print(field.widget.__class__)
print(name)
#field.widget.attrs['class'] += ' form-control'
else:
print(field.widget.attrs)
print(field.widget.__class__)
print(name)
field.widget.attrs.update({'class':'form-control'})
class Meta:
model = Address
fields = ('name', 'street_line1', 'street_line2', 'city', 'state', 'zipcode', 'country',)
|
import numpy
import json
import cv2
import numpy as np
import os
import scipy.misc as misc
# Create semantic map from instance map
#############################################################################################
def show(Im):
cv2.imshow("show",Im.astype(np.uint8))
cv2.waitKey()
cv2.destroyAllWindows()
###############################################################################################
def GenerateSemanticMap(InDir,SubDir):
ppp=0
for DirName in os.listdir(InDir):
print(DirName)
ppp+=1
print(ppp)
pig = False
DirName=InDir+"//"+DirName
SemDir=DirName+"//Semantic//"
Im = cv2.imread(DirName + "/Image.png")
for p in range(4):
SgDir = DirName + "/" + SubDir[p] + "//"
if not os.path.exists(SgDir): continue
for name in os.listdir(SgDir):
path1 = SgDir + "/" + name
if not os.path.exists(path1): continue
sg = cv2.imread(path1)
sg[:,:,1]*=0
sg[:, :, 2] *= 0
sg[sg>2] = 0
I1 = Im.copy()
if np.ndim(sg)==2:
I1[:, :, 0] *= 1 - sg
I1[:, :, 1] *= 1 - sg
I1 = np.concatenate([Im, I1], axis=1)
else:
I1=(I1/3+sg*50).astype(np.uint8)
I1=np.concatenate([Im,I1,(sg*70).astype(np.uint8)],axis=1)
print(path1)
#show(I1)
cv2.imwrite(path1,I1)
os.rename(SgDir,SgDir.replace(SubDir[p],SubDir[p]+"V"))
####################################################################################################
InDir=r"C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Instance\\"
SubDir=[r"Semantic","Material",r"Parts",r"Vessel"]
GenerateSemanticMap(InDir,SubDir) |
import sys
sys.path.append('C:\\Users\\nikit\\AppData\\Local\\Programs\\Python\\python38\\lib\\site-packages')
import NBodyPlotter as nbp
from NBodyPlotter import NBodySolver
from NBodyPlotter import Body
import matplotlib.pyplot as plt
import numpy as np
#Define scale values to keep close to unity
mass_scale = 1e30 #Kg
dist_scale = 1e11 #m
vel_scal = 1000 #m/s (such that inputted units are in Km/s)
orbit_period = 356*24*60*60 #s
solver = NBodySolver()
solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
fig = plt.figure(figsize=(15, 15))
ax = []
for i in range(12):
ax.append(fig.add_subplot(3,4,i+1, projection='3d'))
#
# star_vel = np.sqrt(nbp.G * 1*mass_scale/(dist_scale))/(vel_scal*2)
#
# t = 30
# time_span=np.linspace(0,t,t*1000)
# #Initiate solver
#
# solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
# solver.AddBody(Body("star 1", 1, [-1, 0, 0], [0,star_vel,0]))
# solver.AddBody(Body("star 2", 1, [1, 0, 0], [0,-star_vel,0]))
# solver.SolveNBodyProblem(time_span)
# solver.AnimateNBodySolution()
#
star_vel = np.sqrt(nbp.G *mass_scale/(dist_scale))/(vel_scal*2)
t = 10
time_span=np.linspace(0,t,t*100000)
#Initiate solver
solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
solver.AddBody(Body("star 1", 1, [0, -1, 0], [star_vel,0,0]))
solver.AddBody(Body("star 2", 1, [0, 1, 0], [-star_vel,0,0]))
solver.AddBody(Body("mid boi", 0.1, [0, 0, 1], [0,0,0]))
#
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax[0])
ax[0].set_title("Halo system 1 after " + str(t) + " solar years")
#solver.AnimateNBodySolution()
|
import cv2
img = cv2.imread('C:\\') #Your photo path
face_cascade = cv2.CascadeClassifier('C:\\') #Your frontal face cascade
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
eye_cascade = cv2.CascadeClassifier('C:\\') #Your eye cascade
img2 = img[y:y+h, x:x+w]
gray2 = gray[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(gray2, 1.3, 7)
for (x, y, w, h) in eyes:
cv2.rectangle(img2, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
# Copyright 2020, Alex Badics, All Rights Reserved
import os
from flask import Flask
from .index import INDEX_BLUEPRINT
from .act_writer import ACT_BLUEPRINT
class Config:
SECRET_KEY = os.urandom(24)
class DevelopmentConfig(Config):
ENV = 'development'
DEBUG = 1
class TestConfig(Config):
TESTING = 1
def create_app(config: Config) -> Flask:
# create and configure the app
app = Flask(__name__)
app.config.from_object(config)
app.register_blueprint(INDEX_BLUEPRINT)
app.register_blueprint(ACT_BLUEPRINT)
return app
|
from kivy.properties import ListProperty
from kivy.uix.widget import Widget
class HomePage(Widget):
# composed of one Label "Apps" and 4 sub labels which are the latest apps used
# could have at least 4 recent apps
def on_pos(self, instance, value):
self.icon.pos = value
# reposition each sub components
def on_size(self, instance, value):
self.icon.size = value
# resize each sub components
class RecentUsedAppsList(Widget):
recently_used_apps = ListProperty()
def on_recently_used_apps(self):
self.build() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from scipy.stats import ttest_rel
import pandas as pd
"""
两样本成对数据的t检验。所谓成对数据, 是指两个样本的样本容量相等,
且两个样本之间除均值之外没有另的差异。例如比较某一班同一单元内容的第二次考试是否比第一次的高?
"""
x = [20.5, 18.8, 19.8, 20.9, 21.5, 19.5, 21.0, 21.2]
y = [17.7, 20.3, 20.0, 18.8, 19.0, 20.1, 20.0, 19.1]
# 配对样本t检验
print(ttest_rel(x, y))
# Ttest_relResult(statistic=1.8001958337730648, pvalue=0.1148515300576627)
# 结论: 因为p值=0.1149>0.05, 故接受原假设, 认为在70℃时的平均断裂强力与80℃时的平均断裂强力间无显著差别
|
from selenium.webdriver.support.ui import Select
import pytest
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from src.base_test import BaseTest
class TestBooking(BaseTest):
def test_search_flight(self):
wait = WebDriverWait(self.driver, 3)
self.lunch_site()
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR,"input[value='Find Flights']"))).click()
|
#-------------------------------------------------------------------------------
# debugging variables and functions
#-------------------------------------------------------------------------------
# python libs
import sys
import math
# rasmus libs
from rasmus import util
from rasmus import stats
from rasmus import treelib
# globals
DEBUG = sys.stdout
# constants
DEBUG_NONE = 0
DEBUG_LOW = 1
DEBUG_MED = 2
DEBUG_HIGH = 3
DEBUG_LEVEL = DEBUG_NONE
def setDebug(level=DEBUG_NONE):
global DEBUG_LEVEL
DEBUG_LEVEL = level
def isDebug(level):
return DEBUG_LEVEL >= level
def debug(* text, **args):
args.setdefault("level", DEBUG_NONE)
# check debug level
if DEBUG_LEVEL < args["level"]:
return
output = " ".join(map(str, text))
if "nonl" in args:
DEBUG.write(output)
else:
print >>DEBUG, output
def setDebugStream(stream):
globals()["DEBUG"] = stream
def drawTreeLogl(tree, out=None, events={}, baserate=1.0):
labels = {}
if out == None:
out = DEBUG
if "baserate" in tree.data:
baserate = tree.data["baserate"]
for node in tree.nodes.values():
notes = ""
if "extra" in node.data:
notes += "E"
if "unfold" in node.data:
notes += "U"
if "logl" in node.data:
if isinstance(node.data["logl"], float):
labels[node.name] = "[%s]\n%.3f (%.3f) %s" % \
(node.name, node.dist, node.data["logl"], notes)
#logl += node.data["logl"]
else:
labels[node.name] = "[%s]\n%.3f (%s) %s" % \
(node.name, node.dist, str(node.data["logl"]), notes)
else:
labels[node.name] = "[%s]\n%.3f (*) %s" % \
(node.name, node.dist, notes)
if "params" in node.data:
try:
fracs = map(stats.mean, zip(* node.data["fracs"]))
mean = sum(util.vmul(util.cget(node.data["params"], 0), fracs))
sdev = sum(util.vmul(util.cget(node.data["params"], 1), fracs))
mean *= baserate
sdev *= baserate
labels[node.name] += "\n%.3f %.3f" % (mean, sdev)
except:
print fracs, node.data['params']
#if "error" in node.data:
# labels[node.name] += "\nerr %.4f" % node.data["error"]
if node in events:
labels[node.name] += " %s" % events[node]
if "logl" in tree.data:
debug("logl: %f" % tree.data["logl"])
debug("eventlogl: %f" % tree.data["eventlogl"])
debug("errorlogl: %f" % tree.data["errorlogl"])
debug("baserate: %f" % baserate)
debug("treelen: %f" % sum(x.dist for x in tree.nodes.values()))
if "error" in tree.data:
debug("error: %f" % tree.data["error"])
treelib.drawTree(tree, minlen=20, maxlen=100, labels=labels, spacing=4,
labelOffset=-3, out=out)
class SindirError (Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return str(self.msg)
def printVisitedTrees(visited):
if len(visited) == 0:
return
nleaves = len(visited.values()[0][1].leaves())
debug("\n\nmost likily trees out of %d visited (%.2e total): " % \
(len(visited), numPossibleTrees(nleaves)))
mat = [[key, logl,
tree.data["error"],
tree.data["baserate"],
count]
for key, (logl, tree, count) in visited.iteritems()]
mat.sort(key=lambda x: x[1], reverse=True)
util.printcols([["TREE", "LOGL", "ERROR", "BASERATE", "COUNT"]] +
mat[:80], spacing=4, out=DEBUG)
debug()
mat.sort(key=lambda x: x[2])
util.printcols([["TREE", "LOGL", "ERROR", "BASERATE", "COUNT"]] +
mat[:80], spacing=4, out=DEBUG)
debug()
def numPossibleTrees(nleaves):
n = 1.0
for i in range(3, 2*nleaves-5+1, 2):
n *= i
return (2*nleaves - 3) * n
def log(x):
"""Safe logarithm function"""
if x <= 0:
return -util.INF
else:
return math.log(x)
|
import math
import numpy as np
import torch
from torchvision import datasets, transforms
from spikingjelly.datasets.cifar10_dvs import CIFAR10DVS
from spikingjelly.datasets.dvs128_gesture import DVS128Gesture
from spikingjelly.datasets.n_mnist import NMNIST
def data_generator(dataset, batch_size, dataroot, shuffle=True):
n_classes = 10
seq_length = -1
input_channels = -1
if dataset == 'CIFAR-10':
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
])
transform_test=transforms.Compose([
transforms.ToTensor(),
normalize,
])
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = datasets.CIFAR10(root=dataroot, train=True,
download=True, transform=transform)
test_set = datasets.CIFAR10(root=dataroot, train=False,
download=True, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, num_workers=0)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,
shuffle=True, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
n_classes = 10
seq_length = 32*32
input_channels = 3
elif dataset == 'MNIST-10':
train_set = datasets.MNIST(root=dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.MNIST(root=dataroot, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
train_loader = torch.utils.data.DataLoader(train_set, shuffle=shuffle, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_set, shuffle=False, batch_size=batch_size)
n_classes = 10
seq_length = 28*28
input_channels = 1
elif dataset == 'FMNIST':
train_set = datasets.FashionMNIST(root=dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.FashionMNIST(root=dataroot, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
train_loader = torch.utils.data.DataLoader(train_set, shuffle=shuffle, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_set, shuffle=False, batch_size=batch_size)
n_classes = 10
seq_length = 28*28
input_channels = 1
elif dataset == 'CIFAR-DVS':
dataset_dir ='./data/cifar10_dvs/'
split_by = 'number'
T = 100
normalization = None
train_loader = torch.utils.data.DataLoader(
dataset=CIFAR10DVS(dataset_dir, train=True, use_frame=True, frames_num=T,
split_by=split_by, normalization=normalization),
batch_size=batch_size,
shuffle=True,
drop_last=True)
test_loader = torch.utils.data.DataLoader(
dataset=CIFAR10DVS(dataset_dir, train=False, use_frame=True, frames_num=T,
split_by=split_by, normalization=normalization),
batch_size=int(batch_size),
shuffle=False,
drop_last=False)
n_classes = 10
seq_length = T#128*128
input_channels = 2
elif dataset == 'DVS-Gesture':
dataset_dir ='../IBM-dvs128/data/'
split_by = 'number'
T =20
normalization = None
train_loader = torch.utils.data.DataLoader(
dataset=DVS128Gesture(dataset_dir, train=True, use_frame=True, frames_num=T,
split_by=split_by, normalization=normalization),
batch_size=batch_size,
shuffle=True,
drop_last=True)
test_loader = torch.utils.data.DataLoader(
dataset=DVS128Gesture(dataset_dir, train=False, use_frame=True, frames_num=T,
split_by=split_by, normalization=normalization),
batch_size=int(batch_size),
shuffle=False,
drop_last=False)
n_classes = 11
seq_length = T
input_channels = 2
else:
print('Please provide a valid dataset name.')
exit(1)
return train_loader, test_loader, seq_length, input_channels, n_classes
def adding_problem_generator(N, seq_len=6, high=1, number_of_ones=2):
X_num = np.random.uniform(low=0, high=high, size=(N, seq_len, 1))
X_mask = np.zeros((N, seq_len, 1))
Y = np.ones((N, 1))
for i in range(N):
# Default uniform distribution on position sampling
positions1 = np.random.choice(np.arange(math.floor(seq_len/2)), size=math.floor(number_of_ones/2), replace=False)
positions2 = np.random.choice(np.arange(math.ceil(seq_len/2), seq_len), size=math.ceil(number_of_ones/2), replace=False)
positions = []
positions.extend(list(positions1))
positions.extend(list(positions2))
positions = np.array(positions)
X_mask[i, positions] = 1
Y[i, 0] = np.sum(X_num[i, positions])
X = np.append(X_num, X_mask, axis=2)
return torch.FloatTensor(X), torch.FloatTensor(Y)
|
import itchat
import math
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os
import shutil
# 获取到当前文件的目录,并检查是否有saveImg文件夹,如果不存在则自动新建saveImg文件
File_Path = os.getcwd()
result = os.listdir(File_Path)
if "saveImg" in result:
print('Directory already exists')
shutil.rmtree('./saveImg')
print('delete success')
os.makedirs('./saveImg')
else:
print('Directory not exists')
os.makedirs('./saveImg')
itchat.auto_login()
friends = itchat.get_friends(update=True)
user = friends[0]["UserName"]
num = 0
for i in friends:
img = itchat.get_head_img(userName=i["UserName"])
fileImage = open('./saveImg' + "/" + str(num) + ".jpg", 'wb')
fileImage.write(img)
fileImage.close()
num += 1
ls = os.listdir('./saveImg')
each_size = int(math.sqrt(float(640 * 640) / len(ls)))
lines = int(640 / each_size)
image = Image.new('RGB', (640, 640))
x = 0
y = 0
for i in range(0, len(ls) + 1):
try:
img = Image.open('./saveImg' + "/" + str(i) + ".jpg")
if img.mode != "RGB":
img = img.convert("RGB")
except IOError:
print(i)
print("Error")
else:
img = img.resize((each_size, each_size), Image.ANTIALIAS)
image.paste(img, (x * each_size, y * each_size))
x += 1
if x == lines:
x = 0
y += 1
image.save('./saveImg/' + 'all.jpg')
itchat.send_image('./saveImg/' + 'all.jpg', 'filehelper')
|
import datetime
import json
from decimal import Decimal, ROUND_DOWN
from functools import reduce
from typing import Optional
from marshmallow import post_dump
from sqlalchemy import func, or_, select, ForeignKey
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import column_property
from grant.comment.models import Comment
from grant.email.send import send_email
from grant.extensions import ma, db
from grant.milestone.models import Milestone
from grant.settings import PROPOSAL_STAKING_AMOUNT, PROPOSAL_TARGET_MAX
from grant.task.jobs import ContributionExpired
from grant.utils.enums import (
ProposalStatus,
ProposalStage,
ContributionStatus,
ProposalArbiterStatus,
MilestoneStage,
ProposalChange
)
from grant.utils.exceptions import ValidationException
from grant.utils.misc import dt_to_unix, make_url, make_admin_url, gen_random_id
from grant.utils.requests import blockchain_get
from grant.utils.stubs import anonymous_user
from grant.utils.validate import is_z_address_valid
proposal_team = db.Table(
'proposal_team', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('proposal_id', db.Integer, db.ForeignKey('proposal.id'))
)
proposal_follower = db.Table(
"proposal_follower",
db.Model.metadata,
db.Column("user_id", db.Integer, db.ForeignKey("user.id")),
db.Column("proposal_id", db.Integer, db.ForeignKey("proposal.id")),
)
proposal_liker = db.Table(
"proposal_liker",
db.Model.metadata,
db.Column("user_id", db.Integer, db.ForeignKey("user.id")),
db.Column("proposal_id", db.Integer, db.ForeignKey("proposal.id")),
)
class ProposalTeamInvite(db.Model):
__tablename__ = "proposal_team_invite"
id = db.Column(db.Integer(), primary_key=True)
date_created = db.Column(db.DateTime)
proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
address = db.Column(db.String(255), nullable=False)
accepted = db.Column(db.Boolean)
def __init__(self, proposal_id: int, address: str, accepted: bool = None):
self.proposal_id = proposal_id
self.address = address[:255]
self.accepted = accepted
self.date_created = datetime.datetime.now()
@staticmethod
def get_pending_for_user(user):
return ProposalTeamInvite.query.filter(
ProposalTeamInvite.accepted == None,
(func.lower(user.email_address) == func.lower(ProposalTeamInvite.address))
).all()
class ProposalUpdate(db.Model):
__tablename__ = "proposal_update"
id = db.Column(db.Integer(), primary_key=True)
date_created = db.Column(db.DateTime)
proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
title = db.Column(db.String(255), nullable=False)
content = db.Column(db.Text, nullable=False)
def __init__(self, proposal_id: int, title: str, content: str):
self.id = gen_random_id(ProposalUpdate)
self.proposal_id = proposal_id
self.title = title[:255]
self.content = content
self.date_created = datetime.datetime.now()
class ProposalContribution(db.Model):
__tablename__ = "proposal_contribution"
id = db.Column(db.Integer(), primary_key=True)
date_created = db.Column(db.DateTime, nullable=False)
proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True)
status = db.Column(db.String(255), nullable=False)
amount = db.Column(db.String(255), nullable=False)
tx_id = db.Column(db.String(255), nullable=True)
refund_tx_id = db.Column(db.String(255), nullable=True)
staking = db.Column(db.Boolean, nullable=False)
private = db.Column(db.Boolean, nullable=False, default=False, server_default='true')
user = db.relationship("User")
def __init__(
self,
proposal_id: int,
amount: str,
user_id: int = None,
staking: bool = False,
private: bool = True,
):
self.proposal_id = proposal_id
self.amount = amount
self.user_id = user_id
self.staking = staking
self.private = private
self.date_created = datetime.datetime.now()
self.status = ContributionStatus.PENDING
@staticmethod
def get_existing_contribution(user_id: int, proposal_id: int, amount: str, private: bool = False):
return ProposalContribution.query.filter_by(
user_id=user_id,
proposal_id=proposal_id,
amount=amount,
private=private,
status=ContributionStatus.PENDING,
).first()
@staticmethod
def get_by_userid(user_id):
return ProposalContribution.query \
.filter(ProposalContribution.user_id == user_id) \
.filter(ProposalContribution.status != ContributionStatus.DELETED) \
.filter(ProposalContribution.staking == False) \
.order_by(ProposalContribution.date_created.desc()) \
.all()
@staticmethod
def validate(contribution):
proposal_id = contribution.get('proposal_id')
user_id = contribution.get('user_id')
status = contribution.get('status')
amount = contribution.get('amount')
tx_id = contribution.get('tx_id')
# Proposal ID (must belong to an existing proposal)
if proposal_id:
proposal = Proposal.query.filter(Proposal.id == proposal_id).first()
if not proposal:
raise ValidationException('No proposal matching that ID')
contribution.proposal_id = proposal_id
else:
raise ValidationException('Proposal ID is required')
# User ID (must belong to an existing user)
if user_id:
from grant.user.models import User
user = User.query.filter(User.id == user_id).first()
if not user:
raise ValidationException('No user matching that ID')
contribution.user_id = user_id
else:
raise ValidationException('User ID is required')
# Status (must be in list of statuses)
if status:
if not ContributionStatus.includes(status):
raise ValidationException('Invalid status')
contribution.status = status
else:
raise ValidationException('Status is required')
# Amount (must be a Decimal parseable)
if amount:
try:
contribution.amount = str(Decimal(amount))
except:
raise ValidationException('Amount must be a number')
else:
raise ValidationException('Amount is required')
def confirm(self, tx_id: str, amount: str):
self.status = ContributionStatus.CONFIRMED
self.tx_id = tx_id
self.amount = amount
@hybrid_property
def refund_address(self):
return self.user.settings.refund_address if self.user else None
class ProposalArbiter(db.Model):
__tablename__ = "proposal_arbiter"
id = db.Column(db.Integer(), primary_key=True)
proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True)
status = db.Column(db.String(255), nullable=False)
proposal = db.relationship("Proposal", lazy=True, back_populates="arbiter")
user = db.relationship("User", uselist=False, lazy=True, back_populates="arbiter_proposals")
def __init__(self, proposal_id: int, user_id: int = None, status: str = ProposalArbiterStatus.MISSING):
self.id = gen_random_id(ProposalArbiter)
self.proposal_id = proposal_id
self.user_id = user_id
self.status = status
def accept_nomination(self, user_id: int):
if self.user_id == user_id:
self.status = ProposalArbiterStatus.ACCEPTED
db.session.add(self)
db.session.commit()
return
raise ValidationException('User not nominated for arbiter')
def reject_nomination(self, user_id: int):
if self.user_id == user_id:
self.status = ProposalArbiterStatus.MISSING
self.user = None
db.session.add(self)
db.session.commit()
return
raise ValidationException('User is not arbiter')
class ProposalRevision(db.Model):
__tablename__ = "proposal_revision"
id = db.Column(db.Integer(), primary_key=True)
date_created = db.Column(db.DateTime)
# user who submitted the changes
author_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
author = db.relationship("User", uselist=False, lazy=True)
# the proposal these changes are associated with
proposal_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
proposal = db.relationship("Proposal", foreign_keys=[proposal_id], back_populates="revisions")
# the archived proposal id associated with these changes
proposal_archive_id = db.Column(db.Integer, db.ForeignKey("proposal.id"), nullable=False)
# the detected changes as a JSON string
changes = db.Column(db.Text, nullable=False)
# the placement of this revision in the total revisions
revision_index = db.Column(db.Integer)
def __init__(self, author, proposal_id: int, proposal_archive_id: int, changes: str, revision_index: int):
self.id = gen_random_id(ProposalRevision)
self.date_created = datetime.datetime.now()
self.author = author
self.proposal_id = proposal_id
self.proposal_archive_id = proposal_archive_id
self.changes = changes
self.revision_index = revision_index
@staticmethod
def calculate_milestone_changes(old_milestones, new_milestones):
changes = []
old_length = len(old_milestones)
new_length = len(new_milestones)
# determine the longer milestone collection so we can enumerate it
long_ms = None
short_ms = None
if old_length >= new_length:
long_ms = old_milestones
short_ms = new_milestones
else:
long_ms = new_milestones
short_ms = old_milestones
# detect whether we're adding or removing milestones
is_adding = False
is_removing = False
if old_length > new_length:
is_removing = True
if new_length > old_length:
is_adding = True
for i, ms in enumerate(long_ms):
compare_ms = short_ms[i] if len(short_ms) - 1 >= i else None
# when compare milestone doesn't exist, the current milestone is either being added or removed
if not compare_ms:
if is_adding:
changes.append({"type": ProposalChange.MILESTONE_ADD, "milestone_index": i})
if is_removing:
changes.append({"type": ProposalChange.MILESTONE_REMOVE, "milestone_index": i})
continue
if ms.days_estimated != compare_ms.days_estimated:
changes.append({"type": ProposalChange.MILESTONE_EDIT_DAYS, "milestone_index": i})
if ms.immediate_payout != compare_ms.immediate_payout:
changes.append({"type": ProposalChange.MILESTONE_EDIT_IMMEDIATE_PAYOUT, "milestone_index": i})
if ms.payout_percent != compare_ms.payout_percent:
changes.append({"type": ProposalChange.MILESTONE_EDIT_PERCENT, "milestone_index": i})
if ms.content != compare_ms.content:
changes.append({"type": ProposalChange.MILESTONE_EDIT_CONTENT, "milestone_index": i})
if ms.title != compare_ms.title:
changes.append({"type": ProposalChange.MILESTONE_EDIT_TITLE, "milestone_index": i})
return changes
@staticmethod
def calculate_proposal_changes(old_proposal, new_proposal):
proposal_changes = []
if old_proposal.brief != new_proposal.brief:
proposal_changes.append({"type": ProposalChange.PROPOSAL_EDIT_BRIEF})
if old_proposal.content != new_proposal.content:
proposal_changes.append({"type": ProposalChange.PROPOSAL_EDIT_CONTENT})
if old_proposal.target != new_proposal.target:
proposal_changes.append({"type": ProposalChange.PROPOSAL_EDIT_TARGET})
if old_proposal.title != new_proposal.title:
proposal_changes.append({"type": ProposalChange.PROPOSAL_EDIT_TITLE})
milestone_changes = ProposalRevision.calculate_milestone_changes(old_proposal.milestones,
new_proposal.milestones)
return proposal_changes + milestone_changes
def default_proposal_content():
return """### If you have any doubts about the questions below, please reach out to anyone on the ZOMG on the [Zcash forums](https://forum.zcashcommunity.com/).
# Description of Problem or Opportunity
In addition to describing the problem/opportunity, please give a sense of how serious or urgent of a need you believe this to be. What evidence do you have? What validation have you already done, or how do you think you could validate this?
# Proposed Solution
Describe the solution at a high level. Please be specific about who the users and stakeholders are and how they would interact with your solution. E.g. retail ZEC holders, Zcash core devs, wallet devs, DeFi users, potential Zcash community participants.
# Solution Format
What is the exact form of the deliverable you’re creating? E.g. code shipped within the zcashd and zebra code bases, a website, a feature within a wallet, a text/markdown file, user manuals, etc.
# Technical approach
Dive into the _how_ of your project. Describe your approaches, components, workflows, methodology, etc. Bullet points and diagrams are appreciated!
# How big of a problem would it be to not solve this problem?
# Execution risks
What obstacles do you expect? What is most likely to go wrong? Which unknown factors or dependencies could jeopardize success? Who would have to incorporate your work in order for it to be usable?
# Unintended Consequences Downsides
What are the negative ramifications if your project is successful? Consider usability, stability, privacy, integrity, availability, decentralization, interoperability, maintainability, technical debt, requisite education, etc.
# Evaluation plan
What metrics for success can you share with the community once you’re done? In addition to quantitative metrics, what qualitative metrics do you think you could report?
# Schedule and Milestones
What is your timeline for the project? Include concrete milestones and the major tasks required to complete each milestone.
# Budget and Payout Timeline
How much funding do you need, and how will it be allocated (e.g., compensation for your effort, specific equipment, specific external services)? Please tie your payout timelines to the milestones presented in the previous step. Convention has been for applicants to base their budget on hours of work and an hourly rate, but we are open to proposals based on the value of outcomes instead.
# Applicant background
Summarize you and/or your team’s background and experience. Demonstrate that you have the skills and expertise necessary for the project that you’re proposing. Institutional bona fides are not required, but we want to hear about your track record.
"""
class Proposal(db.Model):
__tablename__ = "proposal"
id = db.Column(db.Integer(), primary_key=True)
date_created = db.Column(db.DateTime)
rfp_id = db.Column(db.Integer(), db.ForeignKey('rfp.id'), nullable=True)
version = db.Column(db.String(255), nullable=True)
# Content info
status = db.Column(db.String(255), nullable=False)
title = db.Column(db.String(255), nullable=False)
brief = db.Column(db.String(255), nullable=False)
stage = db.Column(db.String(255), nullable=False)
content = db.Column(db.Text, nullable=False, default=default_proposal_content())
category = db.Column(db.String(255), nullable=True)
date_approved = db.Column(db.DateTime)
date_published = db.Column(db.DateTime)
reject_reason = db.Column(db.String())
kyc_approved = db.Column(db.Boolean(), nullable=True, default=False)
funded_by_zomg = db.Column(db.Boolean(), nullable=True, default=False)
accepted_with_funding = db.Column(db.Boolean(), nullable=True)
changes_requested_discussion = db.Column(db.Boolean(), nullable=True)
changes_requested_discussion_reason = db.Column(db.String(255), nullable=True)
# Payment info
target = db.Column(db.String(255), nullable=False)
payout_address = db.Column(db.String(255), nullable=False)
deadline_duration = db.Column(db.Integer(), nullable=True)
contribution_matching = db.Column(db.Float(), nullable=False, default=0, server_default=db.text("0"))
contribution_bounty = db.Column(db.String(255), nullable=False, default='0', server_default=db.text("'0'"))
rfp_opt_in = db.Column(db.Boolean(), nullable=True)
contributed = db.column_property()
tip_jar_address = db.Column(db.String(255), nullable=True)
tip_jar_view_key = db.Column(db.String(255), nullable=True)
# Relations
team = db.relationship("User", secondary=proposal_team)
comments = db.relationship(Comment, backref="proposal", lazy=True, cascade="all, delete-orphan")
updates = db.relationship(ProposalUpdate, backref="proposal", lazy=True, cascade="all, delete-orphan")
contributions = db.relationship(ProposalContribution, backref="proposal", lazy=True, cascade="all, delete-orphan")
milestones = db.relationship("Milestone", backref="proposal",
order_by="asc(Milestone.index)", lazy=True, cascade="all, delete-orphan")
invites = db.relationship(ProposalTeamInvite, backref="proposal", lazy=True, cascade="all, delete-orphan")
arbiter = db.relationship(ProposalArbiter, uselist=False, back_populates="proposal", cascade="all, delete-orphan")
followers = db.relationship(
"User", secondary=proposal_follower, back_populates="followed_proposals"
)
followers_count = column_property(
select([func.count(proposal_follower.c.proposal_id)])
.where(proposal_follower.c.proposal_id == id)
.correlate_except(proposal_follower)
)
likes = db.relationship(
"User", secondary=proposal_liker, back_populates="liked_proposals"
)
likes_count = column_property(
select([func.count(proposal_liker.c.proposal_id)])
.where(proposal_liker.c.proposal_id == id)
.correlate_except(proposal_liker)
)
live_draft_parent_id = db.Column(db.Integer, ForeignKey('proposal.id'))
live_draft = db.relationship("Proposal", uselist=False,
backref=db.backref('live_draft_parent', remote_side=[id], uselist=False))
revisions = db.relationship(ProposalRevision, foreign_keys=[ProposalRevision.proposal_id], lazy=True,
cascade="all, delete-orphan")
def __init__(
self,
status: str = ProposalStatus.DRAFT,
title: str = '',
brief: str = '',
content: str = default_proposal_content(),
stage: str = ProposalStage.PREVIEW,
target: str = '0',
payout_address: str = '',
deadline_duration: int = 5184000, # 60 days
category: str = ''
):
self.id = gen_random_id(Proposal)
self.date_created = datetime.datetime.now()
self.status = status
self.title = title
self.brief = brief
self.content = content
self.category = category
self.target = target
self.payout_address = payout_address
self.deadline_duration = deadline_duration
self.stage = stage
self.version = '2'
self.funded_by_zomg = True
@staticmethod
def simple_validate(proposal):
# Validate fields to be database save-able.
# Stricter validation is done in validate_publishable.
stage = proposal.get('stage')
if stage and not ProposalStage.includes(stage):
raise ValidationException("Proposal stage {} is not a valid stage".format(stage))
def validate_publishable_milestones(self):
payout_total = 0.0
for i, milestone in enumerate(self.milestones):
if milestone.immediate_payout and i != 0:
raise ValidationException("Only the first milestone can have an immediate payout")
if len(milestone.title) > 60:
raise ValidationException("Milestone title cannot be longer than 60 chars")
if len(milestone.content) > 200:
raise ValidationException("Milestone content cannot be longer than 200 chars")
try:
p = float(milestone.payout_percent)
if not p.is_integer():
raise ValidationException("Milestone payout percents must be whole numbers, no decimals")
if p <= 0 or p > 100:
raise ValidationException("Milestone payout percent must be greater than zero")
except ValueError:
raise ValidationException("Milestone payout percent must be a number")
payout_total += p
if payout_total != 100.0:
raise ValidationException("Payout percentages of milestones must add up to exactly 100%")
def validate_publishable(self):
self.validate_publishable_milestones()
# Require certain fields
required_fields = ['title', 'content', 'brief', 'target', 'payout_address']
for field in required_fields:
if not hasattr(self, field):
raise ValidationException("Proposal must have a {}".format(field))
# Stricter limits on certain fields
if len(self.title) > 60:
raise ValidationException("Proposal title cannot be longer than 60 characters")
if len(self.brief) > 140:
raise ValidationException("Brief cannot be longer than 140 characters")
if len(self.content) > 250000:
raise ValidationException("Content cannot be longer than 250,000 characters")
if Decimal(self.target) > PROPOSAL_TARGET_MAX:
raise ValidationException("Target cannot be more than {} USD".format(PROPOSAL_TARGET_MAX))
if Decimal(self.target) < 0:
raise ValidationException("Target cannot be less than 0")
if not self.target.isdigit():
raise ValidationException("Target must be a whole number")
if self.deadline_duration > 7776000:
raise ValidationException("Deadline duration cannot be more than 90 days")
# Validate payout address
if not is_z_address_valid(self.payout_address):
raise ValidationException("Payout address is not a valid z address")
# Validate tip jar address
if self.tip_jar_address and not is_z_address_valid(self.tip_jar_address):
raise ValidationException("Tip address is not a valid z address")
# Then run through regular validation
Proposal.simple_validate(vars(self))
def validate_milestone_days(self):
for milestone in self.milestones:
if milestone.immediate_payout:
continue
try:
p = float(milestone.days_estimated)
if not p.is_integer():
raise ValidationException("Milestone days estimated must be whole numbers, no decimals")
if p <= 0:
raise ValidationException("Milestone days estimated must be greater than zero")
if p > 365:
raise ValidationException("Milestone days estimated must be less than 365")
except ValueError:
raise ValidationException("Milestone days estimated must be a number")
return
@staticmethod
def create(**kwargs):
Proposal.simple_validate(kwargs)
proposal = Proposal(
**kwargs
)
# arbiter needs proposal.id
db.session.add(proposal)
db.session.flush()
arbiter = ProposalArbiter(proposal_id=proposal.id)
db.session.add(arbiter)
return proposal
@staticmethod
def get_by_user(user, statuses=[ProposalStatus.LIVE, ProposalStatus.DISCUSSION]):
status_filter = or_(Proposal.status == v for v in statuses)
return Proposal.query \
.join(proposal_team) \
.filter(proposal_team.c.user_id == user.id) \
.filter(status_filter) \
.all()
@staticmethod
def get_by_user_contribution(user):
return Proposal.query \
.join(ProposalContribution) \
.filter(ProposalContribution.user_id == user.id) \
.order_by(ProposalContribution.date_created.desc()) \
.all()
def update(
self,
title: str = '',
brief: str = '',
category: str = '',
content: str = '',
target: str = '0',
payout_address: str = '',
tip_jar_address: Optional[str] = None,
deadline_duration: int = 5184000 # 60 days
):
self.title = title[:255]
self.brief = brief[:255]
self.category = category
self.content = content[:300000]
self.target = target[:255] if target != '' else '0'
self.payout_address = payout_address[:255]
self.tip_jar_address = tip_jar_address[:255] if tip_jar_address is not None else None
self.deadline_duration = deadline_duration
Proposal.simple_validate(vars(self))
def update_rfp_opt_in(self, opt_in: bool):
self.rfp_opt_in = opt_in
def create_contribution(
self,
amount,
user_id: int = None,
staking: bool = False,
private: bool = True,
):
contribution = ProposalContribution(
proposal_id=self.id,
amount=amount,
user_id=user_id,
staking=staking,
private=private
)
db.session.add(contribution)
db.session.flush()
if user_id:
task = ContributionExpired(contribution)
task.make_task()
db.session.commit()
return contribution
def get_staking_contribution(self, user_id: int):
contribution = None
remaining = PROPOSAL_STAKING_AMOUNT - Decimal(self.amount_staked)
# check funding
if remaining > 0:
# find pending contribution for any user of remaining amount
contribution = ProposalContribution.query.filter_by(
proposal_id=self.id,
status=ProposalStatus.PENDING,
staking=True,
).first()
if not contribution:
contribution = self.create_contribution(
user_id=user_id,
amount=str(remaining.normalize()),
staking=True,
)
return contribution
def send_admin_email(self, type: str):
from grant.user.models import User
admins = User.get_admins()
for a in admins:
send_email(a.email_address, type, {
'user': a,
'proposal': self,
'proposal_url': make_admin_url(f'/proposals/{self.id}'),
})
# state: status (DRAFT || REJECTED) -> (PENDING)
def submit_for_approval(self):
self.validate_publishable()
self.validate_milestone_days()
allowed_statuses = [ProposalStatus.DRAFT, ProposalStatus.REJECTED]
# specific validation
if self.status not in allowed_statuses:
raise ValidationException(f"Proposal status must be draft or rejected to submit for approval")
self.set_pending()
def set_pending_when_ready(self):
if self.status == ProposalStatus.STAKING and self.is_staked:
self.set_pending()
# state: status STAKING -> PENDING
def set_pending(self):
self.send_admin_email('admin_approval')
self.status = ProposalStatus.PENDING
db.session.add(self)
db.session.flush()
# approve a proposal moving from PENDING to DISCUSSION status
# state: status PENDING -> (DISCUSSION || REJECTED)
def approve_discussion(self, is_open_for_discussion, reject_reason=None):
if not self.status == ProposalStatus.PENDING:
raise ValidationException("Proposal must be pending to open for public discussion")
if is_open_for_discussion:
self.status = ProposalStatus.DISCUSSION
for t in self.team:
send_email(t.email_address, 'proposal_approved_discussion', {
'user': t,
'proposal': self,
'proposal_url': make_url(f'/proposals/{self.id}')
})
else:
if not reject_reason:
raise ValidationException("Please provide a reason for rejecting the proposal")
self.status = ProposalStatus.REJECTED
self.reject_reason = reject_reason
for t in self.team:
send_email(t.email_address, 'proposal_rejected', {
'user': t,
'proposal': self,
'proposal_url': make_url(f'/proposals/{self.id}'),
'admin_note': reject_reason
})
# request changes for a proposal with a DISCUSSION status
def request_changes_discussion(self, reason):
if self.status != ProposalStatus.DISCUSSION:
raise ValidationException("Proposal does not have a DISCUSSION status")
if not reason:
raise ValidationException("Please provide a reason for requesting changes")
self.changes_requested_discussion = True
self.changes_requested_discussion_reason = reason
for t in self.team:
send_email(t.email_address, 'proposal_rejected_discussion', {
'user': t,
'proposal': self,
'proposal_url': make_url(f'/proposals/{self.id}'),
'admin_note': reason
})
# mark a request changes as resolve for a proposal with a DISCUSSION status
def resolve_changes_discussion(self):
if self.status != ProposalStatus.DISCUSSION:
raise ValidationException("Proposal does not have a DISCUSSION status")
if not self.changes_requested_discussion:
raise ValidationException("Proposal does not have changes requested")
self.changes_requested_discussion = False
self.changes_requested_discussion_reason = None
# state: status DISCUSSION -> (LIVE)
def accept_proposal(self, with_funding):
self.validate_publishable()
# specific validation
if not self.status == ProposalStatus.DISCUSSION:
raise ValidationException(f"Proposal must have a DISCUSSION status to approve or reject")
self.status = ProposalStatus.LIVE
self.date_approved = datetime.datetime.now()
self.accepted_with_funding = with_funding
# also update date_published and stage since publish() is no longer called by user
self.date_published = datetime.datetime.now()
self.stage = ProposalStage.WIP
if with_funding:
self.fully_fund_contibution_bounty()
for t in self.team:
if with_funding:
admin_note = 'Congratulations! Your proposal has been accepted with funding from the Zcash Foundation.'
send_email(t.email_address, 'proposal_approved', {
'user': t,
'proposal': self,
'proposal_url': make_url(f'/proposals/{self.id}'),
'admin_note': admin_note
})
else:
admin_note = '''
We've chosen to list your proposal on ZF Grants, but we won't be funding your proposal at this time.
Your proposal can still receive funding from the community in the form of tips if you have set a tip address for your proposal.
If you have not yet done so, you can do this from the actions dropdown at your proposal.
'''
send_email(t.email_address, 'proposal_approved_without_funding', {
'user': t,
'proposal': self,
'proposal_url': make_url(f'/proposals/{self.id}'),
'admin_note': admin_note
})
def update_proposal_with_funding(self):
self.accepted_with_funding = True
self.fully_fund_contibution_bounty()
# state: status APPROVE -> LIVE, stage PREVIEW -> FUNDING_REQUIRED
def publish(self):
self.validate_publishable()
# specific validation
if not self.status == ProposalStatus.APPROVED:
raise ValidationException(f"Proposal status must be approved")
self.date_published = datetime.datetime.now()
self.status = ProposalStatus.LIVE
self.stage = ProposalStage.WIP
def set_contribution_bounty(self, bounty: str):
# do not allow changes on funded/WIP proposals
if self.is_funded:
raise ValidationException("Cannot change contribution bounty on fully-funded proposal")
# wrap in Decimal so it throws for non-decimal strings
self.contribution_bounty = str(Decimal(bounty))
db.session.add(self)
db.session.flush()
def fully_fund_contibution_bounty(self):
self.set_contribution_bounty(self.target)
def cancel(self):
if self.status != ProposalStatus.LIVE:
raise ValidationException("Cannot cancel a proposal until it's live")
self.stage = ProposalStage.CANCELED
db.session.add(self)
db.session.flush()
# Send emails to team & contributors
for u in self.team:
send_email(u.email_address, 'proposal_canceled', {
'proposal': self,
'support_url': make_url('/contact'),
})
for u in self.contributors:
send_email(u.email_address, 'contribution_proposal_canceled', {
'proposal': self,
'refund_address': u.settings.refund_address,
'account_settings_url': make_url('/profile/settings?tab=account')
})
def follow(self, user, is_follow):
if is_follow:
self.followers.append(user)
else:
self.followers.remove(user)
db.session.flush()
def like(self, user, is_liked):
if is_liked:
self.likes.append(user)
else:
self.likes.remove(user)
db.session.flush()
def send_follower_email(self, type: str, email_args={}, url_suffix=""):
for u in self.followers:
send_email(
u.email_address,
type,
{
"user": u,
"proposal": self,
"proposal_url": make_url(f"/proposals/{self.id}{url_suffix}"),
**email_args,
},
)
@hybrid_property
def contributed(self):
contributions = ProposalContribution.query \
.filter_by(proposal_id=self.id, status=ContributionStatus.CONFIRMED, staking=False) \
.all()
funded = reduce(lambda prev, c: prev + Decimal(c.amount), contributions, 0)
return str(funded)
@hybrid_property
def amount_staked(self):
contributions = ProposalContribution.query \
.filter_by(proposal_id=self.id, status=ContributionStatus.CONFIRMED, staking=True) \
.all()
amount = reduce(lambda prev, c: prev + Decimal(c.amount), contributions, 0)
return str(amount)
@hybrid_property
def funded(self):
target = Decimal(self.target)
# apply matching multiplier
funded = Decimal(self.contributed) * Decimal(1 + self.contribution_matching)
# apply bounty
if self.contribution_bounty:
funded = funded + Decimal(self.contribution_bounty)
# if funded > target, just set as target
if funded > target:
return str(target.quantize(Decimal('.001'), rounding=ROUND_DOWN))
return str(funded.quantize(Decimal('.001'), rounding=ROUND_DOWN))
@hybrid_property
def is_staked(self):
return True
@hybrid_property
def is_funded(self):
return self.is_staked and Decimal(self.funded) >= Decimal(self.target)
@hybrid_property
def is_failed(self):
if not self.status == ProposalStatus.LIVE or not self.date_published:
return False
if self.stage == ProposalStage.FAILED or self.stage == ProposalStage.CANCELED:
return True
deadline = self.date_published + datetime.timedelta(seconds=self.deadline_duration)
passed = deadline < datetime.datetime.now()
return passed and not self.is_funded
@hybrid_property
def current_milestone(self):
if self.milestones:
for ms in self.milestones:
if ms.stage != MilestoneStage.PAID:
return ms
return self.milestones[-1] # return last one if all PAID
return None
@hybrid_property
def contributors(self):
d = {c.user.id: c.user for c in self.contributions if c.user and c.status == ContributionStatus.CONFIRMED}
return d.values()
@hybrid_property
def authed_follows(self):
from grant.utils.auth import get_authed_user
authed = get_authed_user()
if not authed:
return False
res = (
db.session.query(proposal_follower)
.filter_by(user_id=authed.id, proposal_id=self.id)
.count()
)
if res:
return True
return False
@hybrid_property
def authed_liked(self):
from grant.utils.auth import get_authed_user
authed = get_authed_user()
if not authed:
return False
res = (
db.session.query(proposal_liker)
.filter_by(user_id=authed.id, proposal_id=self.id)
.count()
)
if res:
return True
return False
@hybrid_property
def get_tip_jar_view_key(self):
from grant.utils.auth import get_authed_user
authed = get_authed_user()
if authed not in self.team:
return None
else:
return self.tip_jar_view_key
# make a LIVE_DRAFT proposal by copying the relevant fields from an existing proposal
@staticmethod
def make_live_draft(proposal):
live_draft_proposal = Proposal.create(
title=proposal.title,
brief=proposal.brief,
content=proposal.content,
target=proposal.target,
payout_address=proposal.payout_address,
status=ProposalStatus.LIVE_DRAFT
)
live_draft_proposal.tip_jar_address = proposal.tip_jar_address
live_draft_proposal.changes_requested_discussion_reason = proposal.changes_requested_discussion_reason
live_draft_proposal.rfp_opt_in = proposal.rfp_opt_in
live_draft_proposal.team = proposal.team
db.session.add(live_draft_proposal)
Milestone.clone(proposal, live_draft_proposal)
return live_draft_proposal
# port changes made in LIVE_DRAFT proposal to self and delete the draft
def consume_live_draft(self, author):
if self.status != ProposalStatus.DISCUSSION:
raise ValidationException("Proposal is not open for public review")
live_draft = self.live_draft
revision_changes = ProposalRevision.calculate_proposal_changes(self, live_draft)
if len(revision_changes) == 0:
if live_draft.rfp_opt_in == self.rfp_opt_in \
and live_draft.payout_address == self.payout_address \
and live_draft.tip_jar_address == self.tip_jar_address \
and live_draft.team == self.team:
raise ValidationException("Live draft does not appear to have any changes")
else:
# cover special cases where properties not tracked in revisions have changed:
self.rfp_opt_in = live_draft.rfp_opt_in
self.payout_address = live_draft.payout_address
self.tip_jar_address = live_draft.tip_jar_address
self.team = live_draft.team
self.live_draft = None
db.session.add(self)
db.session.delete(live_draft)
return False
# if this is the first revision, create a base revision that's a snapshot of the original proposal
if len(self.revisions) == 0:
base_draft = self.make_live_draft(self)
base_draft.status = ProposalStatus.ARCHIVED
base_draft.invites = []
db.session.add(base_draft)
base_revision = ProposalRevision(
author=author,
proposal_id=self.id,
proposal_archive_id=base_draft.id,
changes=json.dumps([]),
revision_index=0
)
self.revisions.append(base_revision)
revision_index = len(self.revisions)
revision = ProposalRevision(
author=author,
proposal_id=self.id,
proposal_archive_id=live_draft.id,
changes=json.dumps(revision_changes),
revision_index=revision_index
)
self.title = live_draft.title
self.brief = live_draft.brief
self.content = live_draft.content
self.target = live_draft.target
self.payout_address = live_draft.payout_address
self.tip_jar_address = live_draft.tip_jar_address
self.rfp_opt_in = live_draft.rfp_opt_in
self.team = live_draft.team
self.invites = []
self.live_draft = None
self.revisions.append(revision)
db.session.add(self)
# copy milestones
Milestone.clone(live_draft, self)
# archive live draft
live_draft.status = ProposalStatus.ARCHIVED
live_draft.invites = []
db.session.add(live_draft)
return True
class ProposalSchema(ma.Schema):
class Meta:
model = Proposal
# Fields to expose
fields = (
"stage",
"status",
"date_created",
"date_approved",
"date_published",
"reject_reason",
"title",
"brief",
"proposal_id",
"target",
"contributed",
"is_staked",
"is_failed",
"funded",
"content",
"updates",
"milestones",
"current_milestone",
"team",
"payout_address",
"deadline_duration",
"contribution_matching",
"contribution_bounty",
"invites",
"rfp",
"rfp_opt_in",
"arbiter",
"accepted_with_funding",
"is_version_two",
"authed_follows",
"followers_count",
"authed_liked",
"likes_count",
"tip_jar_address",
"tip_jar_view_key",
"changes_requested_discussion",
"changes_requested_discussion_reason",
"live_draft_id",
"kyc_approved",
"funded_by_zomg"
)
date_created = ma.Method("get_date_created")
date_approved = ma.Method("get_date_approved")
date_published = ma.Method("get_date_published")
proposal_id = ma.Method("get_proposal_id")
is_version_two = ma.Method("get_is_version_two")
tip_jar_view_key = ma.Method("get_tip_jar_view_key")
live_draft_id = ma.Method("get_live_draft_id")
funded_by_zomg = ma.Method("get_funded_by_zomg")
updates = ma.Nested("ProposalUpdateSchema", many=True)
team = ma.Nested("UserSchema", many=True)
milestones = ma.Nested("MilestoneSchema", many=True)
current_milestone = ma.Nested("MilestoneSchema")
invites = ma.Nested("ProposalTeamInviteSchema", many=True)
rfp = ma.Nested("RFPSchema", exclude=["accepted_proposals"])
arbiter = ma.Nested("ProposalArbiterSchema", exclude=["proposal"])
def get_funded_by_zomg(self, obj):
if obj.funded_by_zomg is None:
return False
elif obj.funded_by_zomg is False:
return False
else:
return True
def get_proposal_id(self, obj):
return obj.id
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
def get_date_approved(self, obj):
return dt_to_unix(obj.date_approved) if obj.date_approved else None
def get_date_published(self, obj):
return dt_to_unix(obj.date_published) if obj.date_published else None
def get_is_version_two(self, obj):
return True if obj.version == '2' else False
def get_tip_jar_view_key(self, obj):
return obj.get_tip_jar_view_key
def get_live_draft_id(self, obj):
return obj.live_draft.id if obj.live_draft else None
proposal_schema = ProposalSchema()
proposals_schema = ProposalSchema(many=True)
user_fields = [
"proposal_id",
"status",
"title",
"brief",
"target",
"is_staked",
"funded",
"contribution_matching",
"date_created",
"date_approved",
"date_published",
"reject_reason",
"changes_requested_discussion_reason",
"team",
"accepted_with_funding",
"is_version_two",
"authed_follows",
"authed_liked"
]
user_proposal_schema = ProposalSchema(only=user_fields)
user_proposals_schema = ProposalSchema(many=True, only=user_fields)
class ProposalUpdateSchema(ma.Schema):
class Meta:
model = ProposalUpdate
# Fields to expose
fields = (
"update_id",
"date_created",
"proposal_id",
"title",
"content"
)
date_created = ma.Method("get_date_created")
proposal_id = ma.Method("get_proposal_id")
update_id = ma.Method("get_update_id")
def get_update_id(self, obj):
return obj.id
def get_proposal_id(self, obj):
return obj.proposal_id
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
proposal_update_schema = ProposalUpdateSchema()
proposals_update_schema = ProposalUpdateSchema(many=True)
class ProposalRevisionSchema(ma.Schema):
class Meta:
model = ProposalRevision
# Fields to expose
fields = (
"revision_id",
"date_created",
"author",
"proposal_id",
"proposal_archive_id",
"changes",
"revision_index"
)
revision_id = ma.Method("get_revision_id")
date_created = ma.Method("get_date_created")
changes = ma.Method("get_changes")
author = ma.Nested("UserSchema")
def get_revision_id(self, obj):
return obj.id
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
def get_changes(self, obj):
return json.loads(obj.changes)
proposal_revision_schema = ProposalRevisionSchema()
proposals_revisions_schema = ProposalRevisionSchema(many=True)
class ProposalTeamInviteSchema(ma.Schema):
class Meta:
model = ProposalTeamInvite
fields = (
"id",
"date_created",
"address",
"accepted"
)
date_created = ma.Method("get_date_created")
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
proposal_team_invite_schema = ProposalTeamInviteSchema()
proposal_team_invites_schema = ProposalTeamInviteSchema(many=True)
class InviteWithProposalSchema(ma.Schema):
class Meta:
model = ProposalTeamInvite
fields = (
"id",
"date_created",
"address",
"accepted",
"proposal"
)
date_created = ma.Method("get_date_created")
proposal = ma.Nested("ProposalSchema")
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
invite_with_proposal_schema = InviteWithProposalSchema()
invites_with_proposal_schema = InviteWithProposalSchema(many=True)
class ProposalContributionSchema(ma.Schema):
class Meta:
model = ProposalContribution
# Fields to expose
fields = (
"id",
"proposal",
"user",
"status",
"tx_id",
"amount",
"date_created",
"addresses",
"is_anonymous",
"private"
)
proposal = ma.Nested("ProposalSchema")
user = ma.Nested("UserSchema", default=anonymous_user)
date_created = ma.Method("get_date_created")
addresses = ma.Method("get_addresses")
is_anonymous = ma.Method("get_is_anonymous")
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
def get_addresses(self, obj):
# Omit 'memo' and 'sprout' for now
# NOTE: Add back in 'sapling' when ready
addresses = blockchain_get('/contribution/addresses', {'contributionId': obj.id})
return {
'transparent': addresses['transparent'],
}
def get_is_anonymous(self, obj):
return not obj.user_id or obj.private
@post_dump
def stub_anonymous_user(self, data):
if 'user' in data and data['user'] is None or data['private']:
data['user'] = anonymous_user
return data
proposal_contribution_schema = ProposalContributionSchema()
proposal_contributions_schema = ProposalContributionSchema(many=True)
user_proposal_contribution_schema = ProposalContributionSchema(exclude=['user', 'addresses'])
user_proposal_contributions_schema = ProposalContributionSchema(many=True, exclude=['user', 'addresses'])
proposal_proposal_contribution_schema = ProposalContributionSchema(exclude=['proposal', 'addresses'])
proposal_proposal_contributions_schema = ProposalContributionSchema(many=True, exclude=['proposal', 'addresses'])
class AdminProposalContributionSchema(ma.Schema):
class Meta:
model = ProposalContribution
# Fields to expose
fields = (
"id",
"proposal",
"user",
"status",
"tx_id",
"amount",
"date_created",
"addresses",
"refund_address",
"refund_tx_id",
"staking",
"private",
)
proposal = ma.Nested("ProposalSchema")
user = ma.Nested("UserSchema")
date_created = ma.Method("get_date_created")
addresses = ma.Method("get_addresses")
def get_date_created(self, obj):
return dt_to_unix(obj.date_created)
def get_addresses(self, obj):
return blockchain_get('/contribution/addresses', {'contributionId': obj.id})
admin_proposal_contribution_schema = AdminProposalContributionSchema()
admin_proposal_contributions_schema = AdminProposalContributionSchema(many=True)
class ProposalArbiterSchema(ma.Schema):
class Meta:
model = ProposalArbiter
fields = (
"id",
"user",
"proposal",
"status"
)
user = ma.Nested("UserSchema") # , exclude=['arbiter_proposals'] (if UserSchema ever includes it)
proposal = ma.Nested("ProposalSchema", exclude=['arbiter'])
user_proposal_arbiter_schema = ProposalArbiterSchema(exclude=['user'])
user_proposal_arbiters_schema = ProposalArbiterSchema(many=True, exclude=['user'])
|
from django import template
from badge_embed import WidgetStore
from django.template.base import Template
from django.template.context import Context
from django.utils.html import escape
register = template.Library()
def get_widget(name, **kwargs):
"""
Fetches widget code from widgetstore, substites values recieved as kwargs in the widget code.
returns the final code.
"""
widget = WidgetStore.widgets.get(name, None)
if not widget:
raise Exception('Widget "{}" not registered or improperly registered'.format(name))
final_code = Template(widget).render(Context(kwargs))
return final_code
@register.simple_tag
def preivew_widget(name, **kwargs):
return get_widget(name, **kwargs)
@register.simple_tag
def widget_code(name, **kwargs):
return escape(get_widget(name, **kwargs))
|
import fetchurls
import sys
from urllib.parse import urlparse
if len(sys.argv) < 2:
sys.exit(0)
print('fetching_base_url')
url=urlparse(sys.argv[1])
"""if url.netloc == "":
base=url.path
else:
base=url.netloc"""
base=a=url.scheme+"://"+url.netloc
i=fetchurls.fetching(base,sys.argv[1])
j=[]
for a in i:
j=j+fetchurls.fetching(base,a)
print('fetching_done')
j=i+j
j=set(j)
j=list(j)
#p=[]
#for a in j:
# p=p+fetchurls.fetching(a)
#p=i+j+p
#p=set(p)
#p=list(p)
if len(sys.argv) < 3:
f=open('crawlerdata.txt','a')
else:
f=open(sys.argv[2],'a')
for k in j:
f.write(str(k)+'\n')
f.close()
#j=j+i
#j=set(j)
#j=list(j)
|
import operator
from functools import reduce
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.urls import reverse
from django.db.models import Count, Q
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views.generic import CreateView, ListView, UpdateView, View
from hitcount.views import HitCountDetailView
from qa.models import (Answer, AnswerComment, AnswerVote, Question,
QuestionComment, QuestionVote, UserQAProfile)
from taggit.models import Tag, TaggedItem
from .forms import QuestionForm
from .mixins import AuthorRequiredMixin, LoginRequired
from .utils import question_score
try:
qa_messages = 'django.contrib.messages' in settings.INSTALLED_APPS and\
settings.QA_SETTINGS['qa_messages']
except AttributeError: # pragma: no cover
qa_messages = False
if qa_messages:
from django.contrib import messages
"""Dear maintainer:
Once you are done trying to 'optimize' this routine, and have realized what a
terrible mistake that was, please increment the following counter as a warning
to the next guy:
total_hours_wasted_here = 2
"""
class AnswerQuestionView(LoginRequired, View):
"""
View to select an answer as the satisfying answer to the question,
validating than the user who created que
question is the only one allowed to make those changes.
"""
model = Answer
def post(self, request, answer_id):
answer = get_object_or_404(self.model, pk=answer_id)
if answer.question.user != request.user:
raise ValidationError(
"Sorry, you're not allowed to close this question.")
else:
answer.question.answer_set.update(answer=False)
answer.answer = True
answer.save()
try:
points = settings.QA_SETTINGS['reputation']['ACCEPT_ANSWER']
except KeyError:
points = 0
qa_user = UserQAProfile.objects.get(user=answer.user)
qa_user.modify_reputation(points)
next_url = request.POST.get('next', '')
if next_url is not '':
return redirect(next_url)
else:
return redirect(reverse('qa_index'))
class CloseQuestionView(LoginRequired, View):
"""View to
mark the question as closed, validating than the user who created que
question is the only one allowed to make those changes.
"""
model = Question
def post(self, request, question_id):
question = get_object_or_404(self.model, pk=question_id)
if question.user != request.user:
raise ValidationError(
"Sorry, you're not allowed to close this question.")
else:
if not question.closed:
question.closed = True
else:
raise ValidationError("Sorry, this question is already closed")
question.save()
next_url = request.POST.get('next', '')
if next_url is not '':
return redirect(next_url)
else:
return redirect(reverse('qa_index'))
class QuestionIndexView(ListView):
"""CBV to render the index view
"""
model = Question
paginate_by = 10
context_object_name = 'questions'
template_name = 'qa/index.html'
ordering = '-pub_date'
def get_context_data(self, *args, **kwargs):
context = super(
QuestionIndexView, self).get_context_data(*args, **kwargs)
noans = Question.objects.order_by('-pub_date').filter(
answer__isnull=True).select_related('user')\
.annotate(num_answers=Count('answer', distinct=True),
num_question_comments=Count('questioncomment',
distinct=True))
context['totalcount'] = Question.objects.count()
context['anscount'] = Answer.objects.count()
paginator = Paginator(noans, 10)
page = self.request.GET.get('noans_page')
context['active_tab'] = self.request.GET.get('active_tab', 'latest')
tabs = ['latest', 'unans', 'reward']
context['active_tab'] = 'latest' if context['active_tab'] not in\
tabs else context['active_tab']
try:
noans = paginator.page(page)
except PageNotAnInteger:
noans = paginator.page(1)
except EmptyPage: # pragma: no cover
noans = paginator.page(paginator.num_pages)
context['totalnoans'] = paginator.count
context['noans'] = noans
context['reward'] = Question.objects.order_by('-reward').filter(
reward__gte=1)[:10]
question_contenttype = ContentType.objects.get_for_model(Question)
items = TaggedItem.objects.filter(content_type=question_contenttype)
context['tags'] = Tag.objects.filter(
taggit_taggeditem_items__in=items).order_by('-id').distinct()[:10]
return context
def get_queryset(self):
queryset = super(QuestionIndexView, self).get_queryset()\
.select_related('user')\
.annotate(num_answers=Count('answer', distinct=True),
num_question_comments=Count('questioncomment',
distinct=True))
return queryset
class QuestionsSearchView(QuestionIndexView):
"""
Display a ListView page inherithed from the QuestionIndexView filtered by
the search query and sorted by the different elements aggregated.
"""
def get_queryset(self):
result = super(QuestionsSearchView, self).get_queryset()
query = self.request.GET.get('word', '')
if query:
query_list = query.split()
result = result.filter(
reduce(operator.and_,
(Q(title__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(description__icontains=q) for q in query_list)))
return result
def get_context_data(self, *args, **kwargs):
context = super(
QuestionsSearchView, self).get_context_data(*args, **kwargs)
context['totalcount'] = Question.objects.count
context['anscount'] = Answer.objects.count
context['noans'] = Question.objects.order_by('-pub_date').filter(
answer__isnull=True)[:10]
context['reward'] = Question.objects.order_by('-reward').filter(
reward__gte=1)[:10]
return context
class QuestionsByTagView(ListView):
"""View to call all the questions clasiffied under one specific tag.
"""
model = Question
paginate_by = 10
context_object_name = 'questions'
template_name = 'qa/index.html'
def get_queryset(self, **kwargs):
return Question.objects.filter(tags__slug=self.kwargs['tag'])
def get_context_data(self, *args, **kwargs):
context = super(
QuestionsByTagView, self).get_context_data(*args, **kwargs)
context['active_tab'] = self.request.GET.get('active_tab', 'latest')
tabs = ['latest', 'unans', 'reward']
context['active_tab'] = 'latest' if context['active_tab'] not in\
tabs else context['active_tab']
context['totalcount'] = Question.objects.count
context['anscount'] = Answer.objects.count
context['noans'] = Question.objects.order_by('-pub_date').filter(
tags__name__contains=self.kwargs['tag'], answer__isnull=True)[:10]
context['reward'] = Question.objects.order_by('-reward').filter(
tags__name__contains=self.kwargs['tag'],
reward__gte=1)[:10]
context['totalnoans'] = len(context['noans'])
return context
class CreateQuestionView(LoginRequired, CreateView):
"""
View to handle the creation of a new question
"""
template_name = 'qa/create_question.html'
message = _('Thank you! your question has been created.')
form_class = QuestionForm
def form_valid(self, form):
"""
Create the required relation
"""
form.instance.user = self.request.user
return super(CreateQuestionView, self).form_valid(form)
def get_success_url(self):
if qa_messages:
messages.success(self.request, self.message)
return reverse('qa_index')
class UpdateQuestionView(LoginRequired, AuthorRequiredMixin, UpdateView):
"""
Updates the question
"""
template_name = 'qa/update_question.html'
model = Question
pk_url_kwarg = 'question_id'
fields = ['title', 'description', 'tags']
def get_success_url(self):
question = self.get_object()
return reverse('qa_detail', kwargs={'pk': question.pk})
class CreateAnswerView(LoginRequired, CreateView):
"""
View to create new answers for a given question
"""
template_name = 'qa/create_answer.html'
model = Answer
fields = ['answer_text']
message = _('Thank you! your answer has been posted.')
def form_valid(self, form):
"""
Creates the required relationship between answer
and user/question
"""
form.instance.user = self.request.user
form.instance.question_id = self.kwargs['question_id']
return super(CreateAnswerView, self).form_valid(form)
def get_success_url(self):
if qa_messages:
messages.success(self.request, self.message)
return reverse('qa_detail', kwargs={'pk': self.kwargs['question_id']})
class UpdateAnswerView(LoginRequired, AuthorRequiredMixin, UpdateView):
"""
Updates the question answer
"""
template_name = 'qa/update_answer.html'
model = Answer
pk_url_kwarg = 'answer_id'
fields = ['answer_text']
def get_success_url(self):
answer = self.get_object()
return reverse('qa_detail', kwargs={'pk': answer.question.pk})
class CreateAnswerCommentView(LoginRequired, CreateView):
"""
View to create new comments for a given answer
"""
template_name = 'qa/create_comment.html'
model = AnswerComment
fields = ['comment_text']
message = _('Thank you! your comment has been posted.')
def form_valid(self, form):
"""
Creates the required relationship between answer
and user/comment
"""
form.instance.user = self.request.user
form.instance.answer_id = self.kwargs['answer_id']
return super(CreateAnswerCommentView, self).form_valid(form)
def get_success_url(self):
if qa_messages:
messages.success(self.request, self.message)
question_pk = Answer.objects.get(
id=self.kwargs['answer_id']).question.pk
return reverse('qa_detail', kwargs={'pk': question_pk})
class CreateQuestionCommentView(LoginRequired, CreateView):
"""
View to create new comments for a given question
"""
template_name = 'qa/create_comment.html'
model = QuestionComment
fields = ['comment_text']
message = _('Thank you! your comment has been posted.')
def form_valid(self, form):
"""
Creates the required relationship between question
and user/comment
"""
form.instance.user = self.request.user
form.instance.question_id = self.kwargs['question_id']
return super(CreateQuestionCommentView, self).form_valid(form)
def get_success_url(self):
if qa_messages:
messages.success(self.request, self.message)
return reverse('qa_detail', kwargs={'pk': self.kwargs['question_id']})
class UpdateQuestionCommentView(LoginRequired,
AuthorRequiredMixin, UpdateView):
"""
Updates the comment question
"""
template_name = 'qa/create_comment.html'
model = QuestionComment
pk_url_kwarg = 'comment_id'
fields = ['comment_text']
def get_success_url(self):
question_comment = self.get_object()
return reverse('qa_detail',
kwargs={'pk': question_comment.question.pk})
class UpdateAnswerCommentView(UpdateQuestionCommentView):
"""
Updates the comment answer
"""
model = AnswerComment
def get_success_url(self):
answer_comment = self.get_object()
return reverse('qa_detail',
kwargs={'pk': answer_comment.answer.question.pk})
class QuestionDetailView(HitCountDetailView):
"""
View to call a question and to render all the details about that question.
"""
model = Question
template_name = 'qa/detail_question.html'
context_object_name = 'question'
slug_field = 'slug'
try:
count_hit = settings.QA_SETTINGS['count_hits']
except KeyError:
count_hit = True
def get_context_data(self, **kwargs):
answers = self.object.answer_set.all().order_by('pub_date')
context = super(QuestionDetailView, self).get_context_data(**kwargs)
context['last_comments'] = self.object.questioncomment_set.order_by(
'pub_date')[:5]
context['answers'] = list(answers.select_related(
'user').select_related(
'user__userqaprofile')
.annotate(answercomment_count=Count('answercomment')))
return context
def get(self, request, **kwargs):
my_object = self.get_object()
slug = kwargs.get('slug', '')
if slug != my_object.slug:
kwargs['slug'] = my_object.slug
return redirect(reverse('qa_detail', kwargs=kwargs))
else:
return super(QuestionDetailView, self).get(request, **kwargs)
def get_object(self):
question = super(QuestionDetailView, self).get_object()
return question
class ParentVoteView(View):
"""Base class to create a vote for a given model (question/answer)
"""
model = None
vote_model = None
def get_vote_kwargs(self, user, vote_target):
"""
This takes the user and the vote and adjusts the kwargs
depending on the used model.
"""
object_kwargs = {'user': user}
if self.model == Question:
target_key = 'question'
elif self.model == Answer:
target_key = 'answer'
else:
raise ValidationError('Not a valid model for votes')
object_kwargs[target_key] = vote_target
return object_kwargs
def post(self, request, object_id):
vote_target = get_object_or_404(self.model, pk=object_id)
if vote_target.user == request.user:
raise ValidationError(
'Sorry, voting for your own answer is not possible.')
else:
upvote = request.POST.get('upvote', None) is not None
object_kwargs = self.get_vote_kwargs(request.user, vote_target)
vote, created = self.vote_model.objects.get_or_create(
defaults={'value': upvote},
**object_kwargs)
if created:
vote_target.user.userqaprofile.points += 1 if upvote else -1
if upvote:
vote_target.positive_votes += 1
else:
vote_target.negative_votes += 1
else:
if vote.value == upvote:
vote.delete()
vote_target.user.userqaprofile.points += -1 if upvote else 1
if upvote:
vote_target.positive_votes -= 1
else:
vote_target.negative_votes -= 1
else:
vote_target.user.userqaprofile.points += 2 if upvote else -2
vote.value = upvote
vote.save()
if upvote:
vote_target.positive_votes += 1
vote_target.negative_votes -= 1
else:
vote_target.negative_votes += 1
vote_target.positive_votes -= 1
vote_target.user.userqaprofile.save()
if self.model == Question:
vote_target.reward = question_score(vote_target)
if self.model == Answer:
vote_target.question.reward = question_score(
vote_target.question)
vote_target.question.save()
vote_target.save()
next_url = request.POST.get('next', '')
if next_url is not '':
return redirect(next_url)
else:
return redirect(reverse('qa_index'))
class AnswerVoteView(ParentVoteView):
"""
Class to upvote answers
"""
model = Answer
vote_model = AnswerVote
class QuestionVoteView(ParentVoteView):
"""
Class to upvote questions
"""
model = Question
vote_model = QuestionVote
def profile(request, user_id):
user_ob = get_user_model().objects.get(id=user_id)
user = UserQAProfile.objects.get(user=user_ob)
context = {'user': user}
return render(request, 'qa/profile.html', context)
|
from django.db.models import *
from django.contrib.auth.models import User
# 課程
class Course(Model):
name = CharField('課程名稱', max_length=32)
enroll_password = CharField('選課密碼', max_length=32)
teacher = ForeignKey(User, CASCADE) # 開課教師
def __str__(self):
return "{}: {}({})".format(
self.id,
self.name,
self.teacher.first_name
)
# 選課
class Enroll(Model):
stu = ForeignKey(User, CASCADE) # 選修學生
course = ForeignKey(Course, CASCADE) # 選修課程
seat = IntegerField('座號', default=0)
remark_score = IntegerField('心得成績', default=0)
def __str__(self):
return "{}: {}-{}-{}".format(
self.id,
self.course.name,
self.seat,
self.stu.first_name
)
# 作業
class Assignment(Model):
title = CharField('作業名稱', max_length=255)
desc = TextField('作業說明', null=True, default=None)
course = ForeignKey(Course, CASCADE, related_name='assignments')
created = DateTimeField('建立時間', auto_now_add=True)
def __str__(self):
return "{}:{}:{}".format(
self.id,
self.course.name,
self.title
)
import os
# 自訂上傳檔案的存檔檔名
def work_attach(instance, filename):
_, ext = os.path.splitext(filename)
return "assignment/{}/{}{}".format(
instance.assignment.id,
instance.user.username,
ext
)
# 作品
class Work(Model):
assignment = ForeignKey(Assignment, CASCADE, related_name='works')
user = ForeignKey(User, CASCADE, related_name='works')
memo = TextField('心得', default='')
attachment = FileField('附件', upload_to=work_attach, null=True, blank=True)
created = DateTimeField(auto_now_add=True)
score = IntegerField('成績', default=0)
def save(self, *args, **kwargs):
try:
original = Work.objects.get(id=self.id)
if original.attachment != self.attachment:
original.attachment.delete(save=False)
except:
pass
super().save(*args, **kwargs)
def __str__(self):
return "{}:({}){}-{}".format(
self.id,
self.assignment.course.name,
self.assignment.title,
self.user.first_name,
) |
from myapp.factories.celery import create_celery
from myapp.factories.application import create_application
celery = create_celery(create_application())
|
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
def user_email_confirmation(recipient_email,email_subject,email_body,notify_admins=False):
admin_users_email = []
if(notify_admins):
admin_users_email = [u.email for u in User.objects.filter(is_staff=True)]
if recipient_email:
email_object = EmailMessage()
email_object.subject = email_subject
email_object.body = email_body
email_object.from_email = "info@jvbwellness.com"
email_object.to = recipient_email
email_object.bcc = admin_users_email
email_object.send() |
__author__ = 'chenglp'
class Json2HTML(object):
key_value_conf = {}
type_config = {}
depth = 0
html_data = ""
color_config = {
'{' : 'color: #ff0000',
'[' : 'color: #ff0000',
':' : 'color: #ff00ff',
'key': 'color: #0000ff',
'value': 'color: #000088',
}
indent = 4
def __init__(self):
pass
def set_conf(self, type_config=None, indent=4, color_config=None, **kwargs):
self.indent = indent
if type_config:
self.type_config.update(type_config)
if color_config:
self.color_config.update(color_config)
def __add_indent(self):
self.html_data += '%s' % ' ' * self.indent*self.depth
def __dict_start(self):
self.__add_indent()
self.html_data += "<span style='%s'>{</span></br>" % self.color_config['{']
self.depth += 1
def __dict_end(self):
self.depth -= 1
self.__add_indent()
self.html_data += "<span style='%s'>}</span>,</br>" % self.color_config['{']
def __list_start(self):
self.__add_indent()
self.html_data += "<span style='%s'>[</span></br>" % self.color_config['[']
self.depth += 1
def __list_end(self):
self.depth -= 1
self.__add_indent()
self.html_data += "<span style='%s'>]</span>,</br>" % self.color_config['[']
def __add_key(self, key):
self.__add_indent()
self.html_data += "<span style='%s'>%s</span>:" % (self.color_config['key'], self.add_type_tip(key))
def add_type_tip(self, value):
if isinstance(value, str):
return '"%s"' % value
if isinstance(value, bool):
return "1" if value else 0
return value
def __add_value(self, value):
self.html_data += " "
self.html_data += "<span style='%s'>%s</span>,</br>" % (self.color_config['value'], self.add_type_tip(value))
def __add_list_item(self, value):
self.__add_indent()
self.html_data += "<span style='%s'>%s</span>,</br>" % (self.color_config['value'], self.add_type_tip(value))
def __play_list(self, data):
self.__list_start()
for each in data:
if isinstance(each, dict):
self.html_data += '</br>'
self.__play_dict(each)
elif isinstance(each, list):
self.html_data += '</br>'
self.__play_list(each)
else:
self.__add_list_item(each)
self.__list_end()
def __play_dict(self, data):
self.__dict_start()
for each in data.keys():
self.__add_key(each)
if isinstance(data[each], dict):
self.html_data += '</br>'
self.__play_dict(data[each])
elif isinstance(data[each], list):
self.html_data += '</br>'
self.__play_list(data[each])
else:
self.__add_value(data[each])
self.__dict_end()
def play(self, data):
self.html_data = ""
if isinstance(data, dict):
self.__play_dict(data)
if isinstance(data, list):
self.__play_list(data)
return self.html_data
|
from tools.primes import primes_under
thousands_prime = primes_under(10000)
thousands_prime = thousands_prime[thousands_prime > 999]
# ^replace this with your sieve and then eliminate every prime under 1000
prime_digits = {}
primes = [[],[],[],[],[]]
for i in thousands_prime:
a = set([d for d in str(i)])
if len(a) > 2:
prime_digits[i] = a
primes[len(a)].append(i)
print("done with hashing")
groups = []
for ndigit in [2,3,4]:
pr = primes[ndigit]
for a, n1 in enumerate(pr):
for b, n2 in enumerate(pr[a+1:]):
for n3 in pr[a+b+2:]:
if prime_digits[n1] == prime_digits[n2] and prime_digits[n2] == prime_digits[n3]:
groups.append([n1, n2, n3])
print("done with digits:", ndigit)
for group in groups:
group = sorted(group)
if group[1] - group[0] == group[2] - group[1]:
print(group)
|
from math import sin, cos, tan, radians
print('===== EXERCICIO 018 =====')
print('- Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo -')
angulo = int(input('Qual o ângulo? '))
seno = sin(radians(angulo))
cosseno = cos(radians(angulo))
tangente = tan(radians(angulo))
print("O angulo de {} tem o SENO de {:.2f}, COSSENO de {:.2f} e TANGENTE de {:.2f}".format(angulo, seno, cosseno, tangente))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.