text stringlengths 8 6.05M |
|---|
## DAVID ROTHBLATT
## SOFT DEV
## Mr Zamansky
## Fall 2015
from flask import Flask, render_template, request, session, redirect, url_for
from random import randrange
import auth
app = Flask(__name__)
@app.route("/")
@app.route("/home")
@app.route("/home/")
def home():
return redirect(url_for("about"))
@app.route("/about")
@app.route("/about/")
def about():
return render_template("about.html")
@app.route("/secret")
@app.route("/secret/")
def secret():
if 'username' in session:
return render_template("secret.html")
else:
err = "You are not logged in!"
return render_template("login.html", err = err)
@app.route("/login", methods=["GET","POST"])
@app.route("/login/", methods=["GET","POST"])
def login():
if request.method == "GET":
return render_template("login.html")
else:
uname = request.form['username']
pword = request.form['password']
button = request.form['button']
if button=="cancel":
return render_template('login.html')
if auth.authenticate(uname, pword):
if 'username' not in session:
session['username'] = uname
return redirect(url_for("secret"))
else:
err = "INVALID USERNAME OR PASSWORD!!"
return render_template("login.html", err = err)
@app.route("/logout")
@app.route("/logout/")
def logout():
del session['username']
return redirect(url_for("about"))
if __name__ == "__main__":
app.debug = True
app.secret_key = "cronut"
app.run(host = '0.0.0.0', port = 8000)
|
import pandas as pd
import numpy as np
import sys
trainxcsv = sys.argv[3]
trainycsv = sys.argv[4]
testcsv = sys.argv[5]
anscsv = sys.argv[6]
train_X = pd.read_csv(trainxcsv)
train_Y = pd.read_csv(trainycsv,header = None)
test_X = pd.read_csv(testcsv)
train_X['fnlwgt'] = train_X['fnlwgt'].clip(0,800000)
test_X['fnlwgt'] = test_X['fnlwgt'].clip(0,800000)
def stander(x):
for c in x.columns:
mean = x[c].mean()
std = x[c].std()
if std != 0 :
x[c] = x[c].map(lambda x : (x-mean)/std)
return x
def pro(x):
if x > 0 :
x = 1
else:
x = 0
return x
def normalval(x,mu,cov):
cov_inv = pd.DataFrame(np.linalg.pinv(cov.values), cov.columns)
vl = np.sqrt(((2*np.pi) ** x.shape[0]) * abs(np.linalg.det(cov)))
vr1 = np.dot((x - mu), cov_inv)
vr2 = (x - mu).to_numpy()
vr = np.exp(-0.5*np.dot(vr1, vr2))
value = vr*vl
return value
def pgm(x,y):
x['pred'] = y.values
pc0 = (y == 0).sum()[0]/y.shape[0]
pc1 = 1-pc0
mu0 = x[x['pred'] == 0].mean()
mu0 = mu0.drop(['pred'])
mu1 = x[x['pred'] == 1].mean()
mu1 = mu1.drop(['pred'])
std0 = x[x['pred'] == 0].std()
std0 = std0.drop(['pred'])
std1 = x[x['pred'] == 1].std()
std1 = std1.drop(['pred'])
cov0 = x[x['pred'] == 0].cov().drop(['pred'],axis=1).drop(['pred'])
cov1 = x[x['pred'] == 1].cov().drop(['pred'],axis=1).drop(['pred'])
cov = (cov0*x[x['pred']==0].shape[0] + cov1*x[x['pred']==1].shape[0])/(x.shape[0])
cov_inv = np.linalg.pinv(cov.values)
w = np.dot(mu0-mu1, cov_inv)
b = -0.5*np.dot(np.dot(mu0, cov_inv), mu0) +0.5*np.dot(np.dot(mu1, cov_inv), mu1) + np.log(x[x['pred']==0].shape[0]/x[x['pred']==1].shape[0])
return w,b
conx = pd.concat((train_X, test_X))
conx['capital_gain'] = conx['capital_gain'].map(pro)
conx['capital_loss'] = conx['capital_loss'].map(pro)
conx = stander(conx)
train_X = conx.iloc[0:train_X.shape[0],:]
test_X = conx.iloc[train_X.shape[0]::,:]
w,b = pgm(train_X,train_Y)
predict = pd.DataFrame()
ids = []
values = []
z = np.dot(test_X, w) + b
pc0x = 1/(1 + np.exp(-z))
for i in range(len(pc0x)):
ids.append(i+1)
if pc0x[i] > 0.5:
values.append(0)
else:
values.append(1)
predict['id'] = ids
predict['label'] = values
predict.to_csv(anscsv,index=False) |
#!/usr/bin/python2.7
import dbus
import gobject
from dbus.mainloop.glib import DBusGMainLoop
import subprocess
# MAC address of the bluetooth device
DEV_MAC = '00:1A:7D:DA:71:13'
dbus_loop = DBusGMainLoop()
bus = dbus.SystemBus(mainloop=dbus_loop)
man = bus.get_object('org.bluez', '/')
print man
iface = dbus.Interface(man, 'org.bluez.Manager')
adapterPath = iface.DefaultAdapter()
outdevice = bus.get_object('org.bluez', adapterPath + "/dev_" + DEV_MAC)
def cb(iface=None, mbr=None, path=None):
if ("org.bluez.Headset" == iface and path.find(DEV_MAC) > -1):
print 'iface: %s' % iface
print 'mbr: %s' % mbr
print 'path: %s' % path
print "\n"
print "matched"
if mbr == "Connected":
print "Connected :)"
elif mbr == "Disconnected":
print "Disconnected :("
outdevice.connect_to_signal("Connected", cd, interface_keyword="iface", member_keyword="mbr", path_keyword="path")
outdevice.connect_to_signal("Disconnected", cd, interface_keyword="iface", member_keyword="mbr", path_keyword="path")
loop = gobject.MainLoop()
loop.run()
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(university)
admin.site.register(Student)
admin.site.register(Faculty)
admin.site.register(course)
admin.site.register(Attendance)
admin.site.register(SRS)
admin.site.register(SRS_Question) |
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
urlpatterns=[
url(r'^$',views.pics,name='pics'),
url(r'^single_pic/(\d+)',views.single_pic,name='single_pic'),
# url(r'^single_car/<art_id>', views.single_car, name='single-car'),
url(r'^search/',views.search_results,name = 'search_results'),
url(r'^editprofile/$',views.edit_profile,name='editprofile'),
url(r'^location/(\d+)',views.viewPics_by_location,name='locationpic'),
url(r'^category/(\d+)',views.viewPics_by_category, name = 'categorypic'),
url(r'^newpost/$',views.new_post,name='newpost'),
url(r'^logout/$',views.logout_request,name="logout"),
url(r'^accounts/profile/$',views.profile,name='profile'),
]
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT)
|
from __future__ import annotations
import sys
import threading
import time
from typing import Union, Optional, Dict
import serial
import _thread
import queue
import RPi.GPIO as GPIO
from LoRaUI import LoRaUI
# from Protocol import Protocol
# often used strings
from Protocol import Protocol
from Util import LoopingThread
AT_OK = b'AT,OK'
LINEBREAK = b'\r\n'
# block for x seconds to wait for the answer to a command
wait_secs_for_uart_answer = 5
# sleep for x seconds to send next command
wait_secs_to_next_cmd = 0.5
# send hello every x seconds
hello_secs = 10
class LoRaError(Exception):
pass
class CmdAndAnswers:
def __init__(self, cmd: bytes, answers: Union[tuple[bytes, ...], bytes], callback=None):
# make sure cmd ends with LINEBREAK
self.cmd = cmd if cmd.endswith(LINEBREAK) else cmd + LINEBREAK
# make sure answers is iterable
self.answers = answers if isinstance(answers, tuple) else (answers,)
self.callback = callback
class LoRaController:
def __init__(self, address: bytes = b'0004'):
self.address = address
self.log_bools: Dict[str, bool] = {}
self.log_cmds = ["all", "debug-in", "debug-out", "log-in", "log-out", "info"]
for x in self.log_cmds:
self.log_bools[x] = True
self.gui_active = False
# lock to use for synchronized sequential job-queueing
self.lock = threading.RLock()
# Create queues for loop / thread communication
self.cmd_out = queue.Queue()
self.cmd_in = queue.Queue()
self.msg_in = queue.Queue()
def get_message_queue(self):
"""
Returns the queue to which incoming messages (starting with 'LR') will be put
"""
return self.msg_in
def break_queues(self):
for q in [self.cmd_out, self.cmd_in, self.msg_in]:
q.put(b'break')
def send_message(self, msg: bytes, address: Union[bytes, str]):
"""
Writes to cmd_out queue to send a message. Includes the following commands:
AT+DEST='address' -> AT+SEND=len('msg') -> msg\n
:param msg: message to send
:type msg: bytes
:param address: address to send message to
:type address: Union[bytes, str]
"""
# if address was passed as a string, encode it as ascii
if isinstance(address, str):
if address.isascii():
address = address.encode('ascii')
else:
self.display_protocol('error', f'Address {address}) was not ASCII-encoded, discarded.')
return
# define callback for when the message was sent TODO: may be impractical
def print_msg_sent(answer):
if answer == 'AT,SENDED':
self.display_protocol('msg-out', str(msg), str(address))
# queue commands in order, so that there is no interruption by other threads
send_cmds = list()
send_cmds.append(CmdAndAnswers(b'AT+DEST=' + address, AT_OK))
send_cmds.append(CmdAndAnswers(b'AT+SEND=' + str(len(msg)).encode('ascii'), AT_OK))
send_cmds.append(CmdAndAnswers(msg, (b'AT,SENDING', b'AT,SENDED'), print_msg_sent))
self.to_out_queue(send_cmds)
def to_out_queue(self, cmds_and_answers: list[CmdAndAnswers]):
"""
Writes (multiple) commands to cmd_out queue (synchronized)
:param cmds_and_answers: commands to write to uart and answers to expect
:type cmds_and_answers: CmdAndAnswers
"""
# synchronize (for sequential puts)
with self.lock:
for elem in cmds_and_answers:
# put command and expected answer(s) into queue
self.cmd_out.put(elem)
def write_msg_out_loop(self):
""" write the commands in cmd_out queue to uart, then wait for and handle the answers in cmd_in """
# get next command and expected answer(s) (as instance of CmdAndAnswers)
cmd_and_answers = self.cmd_out.get(True)
# break loop on command
if cmd_and_answers == b'break':
return
# make sure cmd_and_answers is of type CmdAndAnswers
if not isinstance(cmd_and_answers, CmdAndAnswers):
raise TypeError('Did not use correct class to represent command')
# write command to uart
ser.write(cmd_and_answers.cmd)
# debug log outgoing commands
self.display_protocol('debug-out', str(cmd_and_answers.cmd[:-2]))
# for each expected answer
for elem in cmd_and_answers.answers:
try:
# get the actual answer (and strip off LINEBREAK)
answer = self.cmd_in.get(True, timeout=wait_secs_for_uart_answer)
# break loop on command
if answer == b'break':
break
# debug log answer
self.display_protocol('debug-in', str(answer))
# if actual answer is not the expected answer (should not happen), raise error
if elem is not None and answer != elem:
self.handle_errors(b'"' + cmd_and_answers.cmd + b'" was not answered with "' +
elem + b'", but instead with "' + answer + b'"')
# if cmd_and_answers contains a callback, call it with the actual answer
if cmd_and_answers.callback:
cmd_and_answers.callback(answer)
except queue.Empty:
self.handle_errors(
b'Got no answer to command "' + bytes(cmd_and_answers.cmd[:-2]) + b'"')
def read_uart_to_protocol_loop(self):
""" Reads from serial port and hands out the result according to the context. """
# start with an empty string to put the message in
msg = b''
# block until there is something received
msg += ser.read()
# block to read until LINEBREAK (can naturally be sent if LR, so read until content_length, see below)
while not msg.endswith(LINEBREAK):
msg += ser.read()
# remove LINEBREAK
msg = msg[:-2]
# handle actual messages from outside
if msg.startswith(b'LR'):
# if message incomplete, read rest
msg_arr = msg.split(b',', 3)
expected_length = int(msg_arr[2].decode('ascii'), base=16)
if len(msg_arr[3]) < expected_length:
# reattach LINEBREAK which apparently was part of message
msg += LINEBREAK
# read remaining bytes of content (minus the LINEBREAK in message)
msg += ser.read(expected_length - (len(msg_arr[3]) + 2))
# remove following LINEBREAK from input
ser.read(2)
# handle_incoming_msg(msg)
self.msg_in.put(msg)
# debug log incoming message
self.display_protocol('debug-in', str(msg))
# handle possible errors
elif msg.startswith(b'AT,ERR') or msg.startswith(b'ERR'):
self.handle_errors(msg)
# handle answers to commands (put them in a queue). 'Vendor' just to deal properly with AT+RST
elif msg.startswith(b'Vendor') or msg.startswith(b'AT'):
self.cmd_in.put(msg)
# log everything else
else:
self.display_protocol('log-in', f'Ignored message: {msg}')
def handle_errors(self, err_msg: bytes):
if err_msg == b'ERR:CPU_BUSY':
# lock cmd_out, so nothing new will be put in
with self.lock:
# display the error
self.display_protocol('error', f'Got: "{err_msg.decode("ascii")}". Reset module.')
# break writing loop out of waiting for an answer
self.cmd_in.put(b'break')
# reset the module
self.do_setup()
else:
self.display_protocol('error', err_msg.decode('ascii'))
def display_protocol(self, cmd: str, msg: Union[str, int, bytes], address: Optional[str] = None,
state: Optional[str] = None) -> Optional[int]:
"""
Protocol machine for communication from aodv-protocol to GUI. The 'msg' is displayed according
to the 'cmd'. However, if the 'cmd' is ['msg-lost','msg-sent','msg-ack'], 'msg' should only consist of the
display_id, which identifies the message to update the state on.\n
:param cmd: what type of message is msg: (msg-lost, msg-sent, msg_ack, info, debug, error, msg)
:type cmd: str
:param msg: message to display or display_id of lost message
:type msg: str
:param address: optional parameter that has to be given when cmd is 'msg' or 'msg-state' and contains the
address of the sender, which needs to be decimal and in range from 1 to 20 (inclusive)
:type address: str
:param state: optional parameter that has to be given when cmd is 'msg-state' and contains the address
of the sender
:type state: str
"""
# if flag to write to gui is not set, don't do that
if not self.gui_active:
return
result = None
try:
if isinstance(msg, bytes):
if msg.isascii():
msg = msg.decode('ascii')
else:
raise ValueError('Message is non ascii, will not be displayed')
if address and int(address) not in range(1, 21):
win.write_error(f'Got information concerning an address {address}, which is not in valid range.')
if cmd == 'msg-state':
# update the state of the message as LOST
if address:
win.update_message_state(address, msg, state)
elif cmd == 'msg-in':
result = win.write_to_messages(msg, address, False)
elif cmd == 'msg-out':
result = win.write_to_messages(msg, address, True)
elif cmd in self.log_cmds[1:-1]:
# if either 'all' is True or the kind of logging is False, do not display
if self.log_bools.get(self.log_cmds[0]) and self.log_bools.get(cmd):
splt_cmd = cmd.split('-')
kind = splt_cmd[0]
is_out = True if splt_cmd[1] == 'out' else False
win.write_to_logs(msg, is_out, header=kind.capitalize())
elif cmd == 'info':
if self.log_bools.get(self.log_cmds[0]) and self.log_bools.get(cmd):
win.write_info(msg)
elif cmd == 'error':
if self.log_bools.get(self.log_cmds[0]):
win.write_error(msg)
else:
raise ValueError(f'Command {cmd} unknown')
except (TypeError, ValueError) as e:
win.write_error(f'Display protocol violated:'
f'\nErrormessage: {str(e)}'
f'\nParameters: cmd={cmd}, msg={msg}, address={address}, state={state}')
return result
def send_via_protocol(self, msg: str, address: str):
display_id = self.display_protocol('msg-out', msg, address)
if address == 'FFFF':
self.display_protocol('msg-state', display_id, address=address, state='ERROR: address "FFFF" invalid')
self.display_protocol('error', f'User tried to send Message ({msg}) to address "FFFF", discarded.')
return
elif not msg.isascii():
self.display_protocol('msg-state', display_id, address=address, state='ERROR: Non-ascii')
self.display_protocol('error', f'Message ({msg}) was not ASCII-encoded, discarded.')
return
else:
protocol.send_s_t_r(
dest_addr=address,
payload=msg.encode('ascii'),
display_id=display_id
)
def handle_user_commands(self, cmd: str, address: str):
if cmd == 'table':
table = ''
for y in protocol.routes.keys():
table += f'{y}: {protocol.routes.get(y)}\n'
print(table)
self.display_protocol('info', f"Printed table to console.")
# handle logging commands
elif cmd in self.log_cmds:
self.log_bools[cmd] = not self.log_bools.get(cmd)
self.display_protocol('info', f'Displaying of "{cmd}" now {"ON" if self.log_bools[cmd] else "OFF"}')
# handle shortcuts for logging commands
elif cmd in ['debug', 'log']:
# set both versions of cmd (-in, -out) to their collective counterpart
self.log_bools[f'{cmd}-in'] = self.log_bools[f'{cmd}-out'] = \
not self.log_bools.get(f'{cmd}-in') and self.log_bools.get(f'{cmd}-out')
# log the change
self.display_protocol(
'info', f'Displaying of "{cmd}s" now {"ON" if self.log_bools[f"{cmd}-in"] else "OFF"}'
)
else:
self.display_protocol('error', f'Unknown user command: {cmd}')
def do_setup(self):
self.reset_module()
setup_cmd_list = list()
# Test cmd
setup_cmd_list.append(CmdAndAnswers(b'AT', AT_OK))
# Reset module
setup_cmd_list.append(CmdAndAnswers(b'AT+RST', (AT_OK, b'Vendor:Himalaya')))
# Set config string
setup_cmd_list.append(CmdAndAnswers(b'AT+CFG=433000000,20,9,10,4,1,0,0,0,0,3000,8,10',
AT_OK)) # AT+CFG=433000000,5,9,7,4,1,0,0,0,0,3000,8,10
# Set address
setup_cmd_list.append(CmdAndAnswers(b'AT+ADDR=' + self.address, AT_OK))
# Set Destination
setup_cmd_list.append(CmdAndAnswers(b'AT+DEST=FFFF', AT_OK))
# Activate modules receive mode
setup_cmd_list.append(CmdAndAnswers(b'AT+RX', AT_OK, lambda x: self.display_protocol('info', 'Setup Done.')))
self.to_out_queue(setup_cmd_list)
def reset_module(self):
print("resetting lora module...")
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.output(18, GPIO.HIGH)
time.sleep(1)
GPIO.output(18, GPIO.LOW)
GPIO.cleanup()
time.sleep(1)
ser.reset_input_buffer()
ser.reset_output_buffer()
def input_address():
input_addr = None
while input_addr is None:
input_addr = input('Address: ')
if len(input_addr) != 4 \
or int(input_addr) not in range(1, 21) \
or not input_addr.isascii():
print('Wrong address format.')
input_addr = None
return input_addr.encode('ascii')
def input_logging_bool(kind: str) -> bool:
input_deb = None
while input_deb is None:
input_deb = input(f'Print {kind} messages to "Logs and Errors" (y/n): ')
input_deb = input_deb.lower()
if input_deb not in ['y', 'n']:
print('Please only enter "y" for yes or "n" for no')
input_deb = None
return input_deb == 'y'
if __name__ == '__main__':
# let user input address
in_address = input_address()
# setup uart
ser = serial.Serial(
port='/dev/ttyS0',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
# make sure uart is open
try:
if ser.is_open:
ser.close()
ser.open()
except serial.SerialException:
print("Error opening serial port. Probably already open.", file=sys.stderr)
sys.exit()
lora_controller = LoRaController(
address=in_address
)
# create protocol-machine
protocol = Protocol(
address=in_address.decode('ascii'),
msg_in=lora_controller.get_message_queue(),
msg_out=lora_controller.send_message,
to_display=lora_controller.display_protocol
)
# create GUI
win = LoRaUI(
on_send=lora_controller.send_via_protocol,
on_cmd=lora_controller.handle_user_commands,
title=f'AODV_Light for Node {in_address.decode("ascii")}')
# schedule setup in cmd_out queue
lora_controller.do_setup()
# start loop threads
prot_thread = LoopingThread('Protocol_Loop', protocol.protocol_loop)
# _thread.start_new_thread(protocol.protocol_loop, ())
read_thread = LoopingThread('Read_uart_Loop', lora_controller.read_uart_to_protocol_loop)
# _thread.start_new_thread(lora_controller.read_uart_to_protocol_loop, ())
write_thread = LoopingThread('Write_uart_Loop', lora_controller.write_msg_out_loop)
# _thread.start_new_thread(lora_controller.write_msg_out_loop, ())
threads = [prot_thread, read_thread, write_thread]
for thread in threads:
thread.start()
# set flag to activate gui
lora_controller.gui_active = True
# catch main thread in GUI-Loop, so program ends when window closes
win.mainloop()
# stop logging to avoid Exceptions:
lora_controller.gui_active = False
for thread in threads:
thread.stop()
# break all thread-blocks after window closes
lora_controller.break_queues()
# close uart
ser.cancel_read() # should not throw an error according to docs, but does (TypeError)
ser.cancel_write()
ser.close()
for thread in threads:
if thread.isAlive():
thread.join()
|
# -*- coding: utf-8 -*-
import signal
import sys
def signal_term_handler(signal, frame):
print 'got SIGTERM'
sys.exit(0)
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGABRT, signal_term_handler)
signal.signal(signal.SIGBREAK, signal_term_handler)
while True:
pass |
from datetime import date
from onegov.ballot import Candidate, List, ListResult
from onegov.ballot import CandidateResult
from onegov.ballot import ProporzElection
from onegov.ballot import ElectionResult
def test_candidate_percentages(session):
election = ProporzElection(
title='Election',
domain='federation',
date=date(2015, 6, 14),
number_of_mandates=1
)
session.add(election)
session.flush()
# Add four entities/two districts
election_result_1 = ElectionResult(
name='1',
district='1',
entity_id=1,
counted=True,
eligible_voters=2000,
received_ballots=1015,
blank_ballots=10,
invalid_ballots=5,
blank_votes=80,
invalid_votes=120
)
election_result_2 = ElectionResult(
name='2',
district='1',
entity_id=2,
counted=True,
eligible_voters=2000,
received_ballots=1005,
blank_ballots=3,
invalid_ballots=2,
blank_votes=8,
invalid_votes=1
)
election_result_3 = ElectionResult(
name='3',
district='2',
entity_id=3,
counted=False,
eligible_voters=500,
)
election_result_4 = ElectionResult(
name='4',
district='2',
entity_id=4,
counted=True,
eligible_voters=200,
received_ballots=0,
blank_ballots=0,
invalid_ballots=0,
blank_votes=0,
invalid_votes=0
)
election.results.append(election_result_1)
election.results.append(election_result_2)
election.results.append(election_result_3)
election.results.append(election_result_4)
session.flush()
# Add 5 lists
list_1 = List(
number_of_mandates=1,
list_id='1',
name='1'
)
list_2 = List(
list_id='2',
name='2'
)
list_3 = List(
list_id='3',
name='3'
)
list_4 = List(
list_id='4',
name='4'
)
list_5 = List(
list_id='5',
name='5'
)
election.lists.append(list_1)
election.lists.append(list_2)
election.lists.append(list_3)
election.lists.append(list_4)
election.lists.append(list_5)
session.flush()
# Add the list results to the first entity
election_result_1.list_results.append(
ListResult(
list_id=list_1.id,
votes=52,
)
)
election_result_1.list_results.append(
ListResult(
list_id=list_2.id,
votes=11
)
)
election_result_1.list_results.append(
ListResult(
list_id=list_3.id,
votes=20
)
)
election_result_1.list_results.append(
ListResult(
list_id=list_4.id,
votes=1
)
)
election_result_1.list_results.append(
ListResult(
list_id=list_5.id,
votes=0
)
)
# Add only two list results to the second entity.
election_result_2.list_results.append(
ListResult(
list_id=list_1.id,
votes=20
)
)
election_result_2.list_results.append(
ListResult(
list_id=list_5.id,
votes=5
)
)
# Add only one list results to the last entity
election_result_4.list_results.append(
ListResult(
list_id=list_1.id,
votes=10
)
)
session.flush()
# Add 5 candidates
candidate_1 = Candidate(
elected=True,
candidate_id='1',
family_name='1',
first_name='1',
)
candidate_2 = Candidate(
elected=False,
candidate_id='2',
family_name='2',
first_name='2',
)
candidate_3 = Candidate(
elected=False,
candidate_id='3',
family_name='3',
first_name='3',
)
candidate_4 = Candidate(
elected=False,
candidate_id='4',
family_name='4',
first_name='4',
)
candidate_5 = Candidate(
elected=False,
candidate_id='5',
family_name='5',
first_name='5',
)
election.candidates.append(candidate_1)
election.candidates.append(candidate_2)
election.candidates.append(candidate_3)
election.candidates.append(candidate_4)
election.candidates.append(candidate_5)
session.flush()
# Add the candidate results to the first entity
election_result_1.candidate_results.append(
CandidateResult(
candidate_id=candidate_1.id,
votes=50,
)
)
election_result_1.candidate_results.append(
CandidateResult(
candidate_id=candidate_2.id,
votes=10
)
)
election_result_1.candidate_results.append(
CandidateResult(
candidate_id=candidate_3.id,
votes=20
)
)
election_result_1.candidate_results.append(
CandidateResult(
candidate_id=candidate_4.id,
votes=1
)
)
election_result_1.candidate_results.append(
CandidateResult(
candidate_id=candidate_5.id,
votes=0
)
)
# Add only two candidate results to the second entity.
election_result_2.candidate_results.append(
CandidateResult(
candidate_id=candidate_1.id,
votes=30
)
)
election_result_2.candidate_results.append(
CandidateResult(
candidate_id=candidate_5.id,
votes=5
)
)
# Add only one candidate results to the last entity
election_result_4.candidate_results.append(
CandidateResult(
candidate_id=candidate_1.id,
votes=10
)
)
session.flush()
def round_(n, z):
return round(100 * n / z, 2)
tot = {t.entity_id: t.votes for t in election.votes_by_entity.all()}
tot_d = {t.district: t.votes for t in election.votes_by_district.all()}
print(tot)
print(tot_d)
assert candidate_1.percentage_by_entity == {
1: {'votes': 50, 'counted': True, 'percentage': round_(50, tot[1])},
2: {'votes': 30, 'counted': True, 'percentage': round_(30, tot[2])},
3: {'votes': 0, 'counted': False, 'percentage': 0.0},
4: {'votes': 10, 'counted': True, 'percentage': 100.0}
}
assert candidate_2.percentage_by_entity == {
1: {'votes': 10, 'counted': True, 'percentage': round_(10, tot[1])},
2: {'votes': 0, 'counted': True, 'percentage': 0.0},
3: {'votes': 0, 'counted': False, 'percentage': 0.0},
4: {'votes': 0, 'counted': True, 'percentage': 0.0}
}
assert candidate_3.percentage_by_entity == {
1: {'votes': 20, 'counted': True, 'percentage': round_(20, tot[1])},
2: {'votes': 0, 'counted': True, 'percentage': 0.0},
3: {'votes': 0, 'counted': False, 'percentage': 0.0},
4: {'votes': 0, 'counted': True, 'percentage': 0.0}
}
assert candidate_4.percentage_by_entity == {
1: {'votes': 1, 'counted': True, 'percentage': round_(1, tot[1])},
2: {'votes': 0, 'counted': True, 'percentage': 0.0},
3: {'votes': 0, 'counted': False, 'percentage': 0.0},
4: {'votes': 0, 'counted': True, 'percentage': 0.0}
}
assert candidate_5.percentage_by_entity == {
1: {'votes': 0, 'counted': True, 'percentage': 0.0},
2: {'votes': 5, 'counted': True, 'percentage': round_(5, tot[2])},
3: {'votes': 0, 'counted': False, 'percentage': 0.0},
4: {'votes': 0, 'counted': True, 'percentage': 0.0}
}
assert candidate_1.percentage_by_district == {
'1': {'votes': 80, 'counted': True,
'entities': [1, 2], 'percentage': round_(80, tot_d['1'])},
'2': {'votes': 0, 'counted': False,
'entities': [3, 4], 'percentage': 0.0}
}
assert candidate_2.percentage_by_district == {
'1': {'votes': 0, 'counted': True,
'entities': [1, 2], 'percentage': 0.0},
'2': {'votes': 0, 'counted': False,
'entities': [3, 4], 'percentage': 0.0}
}
assert candidate_3.percentage_by_district == {
'1': {'votes': 0, 'counted': True,
'entities': [1, 2], 'percentage': 0.0},
'2': {'votes': 0, 'counted': False,
'entities': [3, 4], 'percentage': 0.0}
}
assert candidate_4.percentage_by_district == {
'1': {'votes': 0, 'counted': True,
'entities': [1, 2], 'percentage': 0.0},
'2': {'votes': 0, 'counted': False,
'entities': [3, 4], 'percentage': 0.0}
}
assert candidate_5.percentage_by_district == {
'1': {'votes': 5, 'counted': True,
'entities': [1, 2], 'percentage': round_(5, tot_d['1'])},
'2': {'votes': 0, 'counted': False,
'entities': [3, 4], 'percentage': 0.0}
}
|
from rv.api import m
def test_spectravoice(read_write_read_synth):
mod: m.SpectraVoice = read_write_read_synth("spectravoice").module
assert mod.flags == 0x49
assert mod.name == "SpectraVoice"
assert mod.harmonic_freqs.values == EXPECTED_HARMONIC_FREQS
assert mod.harmonic_volumes.values == EXPECTED_HARMONIC_VOLUMES
assert mod.harmonic_widths.values == EXPECTED_HARMONIC_WIDTHS
assert mod.harmonic_types.values == EXPECTED_HARMONIC_TYPES
assert mod.volume == 219
assert mod.panning == -77
assert mod.attack == 234
assert mod.release == 324
assert mod.polyphony_ch == 21
assert mod.mode == mod.Mode.lq_mono
assert not mod.sustain
assert mod.spectrum_resolution == 4
assert mod.harmonic == 10
assert mod.h_freq_hz == 14729
assert mod.h_volume == 224
assert mod.h_width == 94
assert mod.h_type == mod.HarmonicType.overtones2
EXPECTED_HARMONIC_FREQS = [
17916,
7063,
5426,
7235,
18002,
10594,
775,
20586,
10853,
16279,
14729,
4479,
12231,
3876,
19036,
21275,
]
EXPECTED_HARMONIC_VOLUMES = [
114,
203,
245,
42,
191,
102,
243,
195,
184,
59,
224,
144,
213,
182,
21,
238,
]
EXPECTED_HARMONIC_WIDTHS = [
99,
242,
70,
26,
245,
9,
43,
10,
219,
135,
94,
235,
66,
124,
114,
16,
]
EXPECTED_HARMONIC_TYPES = [
m.SpectraVoice.HarmonicType.org2,
m.SpectraVoice.HarmonicType.rect,
m.SpectraVoice.HarmonicType.overtones4,
m.SpectraVoice.HarmonicType.sin,
m.SpectraVoice.HarmonicType.overtones4,
m.SpectraVoice.HarmonicType.overtones4,
m.SpectraVoice.HarmonicType.triangle1,
m.SpectraVoice.HarmonicType.overtones1,
m.SpectraVoice.HarmonicType.org4,
m.SpectraVoice.HarmonicType.rect,
m.SpectraVoice.HarmonicType.overtones2,
m.SpectraVoice.HarmonicType.overtones3,
m.SpectraVoice.HarmonicType.org4,
m.SpectraVoice.HarmonicType.overtones4,
m.SpectraVoice.HarmonicType.org1,
m.SpectraVoice.HarmonicType.overtones1,
]
|
import api
api = api.api(True,False,False,True)
print api.waitForButtonPress()
|
# --------------------------------------------------------------------
import os
# --------------------------------------------------------------------
# Oracle db
dsn_tns = """(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (COMMUNITY = tcp.world) (PROTOCOL = TCP) (HOST = aepw04-bulwscan.e-ssi.net)(PORT = 1521))
(ADDRESS = (COMMUNITY = tcp.world) (PROTOCOL = TCP) (HOST = aepw04-shwdscan.e-ssi.net)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = aepw04_clt.world)
(FAILOVER_MODE =
(TYPE = SELECT)
(RETRIES = 1000)
(DELAY = 5)
(METHOD = BASIC)
)
)
)""" # Triple " allows multine line string here. Also, nothing needs to be escaped within them
#db = cx_Oracle.connect ('C23833', 'V1lacsek123!', 'aepw04-shwdscan.e-ssi.net:1521/aepw04_clt.world') # Connect method 1
db = cx_Oracle.connect ('C23833', 'V1lacsek123!', dsn_tns) # Connect method 2
csr = db.cursor ()
csr.execute ('SELECT * FROM CSABA_SRC')
for row in csr:
print (row) # Comes back as a list
print (row[0]) # First column
csr.close ()
db.close ()
# --------------------------------------------------------------------
start = time.time ()
db2 = cx_Oracle.connect ('C23833', 'V1lacsek123!', dsn_tns) # Connect method 2
csr2 = db2.cursor ()
tbl_name_len = 10
csr2.arraysize = 10 # Allocate memory for 10 rows fetched at a time. Batches of 10 rows will be returned
csr2.prepare ("select * from all_tables where owner = 'C23833' and length (table_name) > :len")
csr2.execute (None, {'len': tbl_name_len}) # Bind variable. must be a dictionary object
res = csr2.fetchall()
for row in res:
print (row[1])
csr2.close ()
db2.close ()
elapsed = (time.time() - start)
print ("Elapsed:", elapsed, "seconds")
# --------------------------------------------------------------------
os.system ("pause")
|
from flask import Flask, request, jsonify, send_file
from utils import Config
from wrapper import SmartNews
import json
def init_model():
config = Config()
config.test_from = '../models/model_step_148000.pt'
model = SmartNews(config)
return model
app = Flask(__name__)
model = init_model()
@app.route('/hello', methods=['GET'])
def hello_world():
return 'Hello, World!'
@app.route('/summarize', methods=['POST'])
def summarize():
verbose = int(request.form['verbose'])
file_handle = request.files['upfile']
file_name= file_handle.filename
file_handle.save(f'../input_raw_text/{file_name}')
analytics = model.summarize(file_name, verbose=verbose)
with open(f'../pred/result_{file_name}','r') as f:
summarized_text = f.read()
res = {
'analytics': analytics,
'summarized_text': summarized_text
}
return jsonify(res)
if __name__ == '__main__':
app.run(host= '0.0.0.0')
|
# @Title: 访问所有点的最小时间 (Minimum Time Visiting All Points)
# @Author: 2464512446@qq.com
# @Date: 2019-12-17 10:16:27
# @Runtime: 48 ms
# @Memory: 11.5 MB
class Solution:
def minTimeToVisitAllPoints(self, points):
x0, x1 = points[0]
ans = 0
for i in range(1, len(points)):
y0, y1 = points[i]
ans += max(abs(x0 - y0), abs(x1 - y1))
x0, x1 = points[i]
return ans
|
from lid_driven_cavity_problem.residual_function import pure_python_residual_function, \
numba_residual_function, cython_residual_function, numpy_residual_function, \
cpp_residual_function, cpp_omp_residual_function
from lid_driven_cavity_problem.staggered_grid import Graph
import numpy as np
def test_residual_function():
x = np.array([
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
123.0, 0.002, -0.02,
])
graph = Graph(
1.0,
1.0,
3,
3,
0.01,
1.0,
1.0,
100.0,
initial_P=100.0,
initial_U=0.001,
initial_V=-0.01,
)
functions_to_try = [
numpy_residual_function.residual_function,
numba_residual_function.residual_function,
cython_residual_function.residual_function,
cpp_residual_function.residual_function,
cpp_omp_residual_function.residual_function,
]
reference_results = pure_python_residual_function.residual_function(x, graph)
for f in functions_to_try:
assert np.allclose(f(x, graph), reference_results, rtol=1e-6, atol=1e-4)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-01-10 06:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nova', '0067_config'),
]
operations = [
migrations.AlterField(
model_name='config',
name='comment',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='\u8bf4\u660e'),
),
migrations.AlterField(
model_name='config',
name='config_value',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='\u914d\u7f6e\u53c2\u6570\u503c'),
),
]
|
from flask import Flask, jsonify
import requests
application = Flask(__name__)
@application.route("/api/getmvplate/<string:plate>",methods=["GET"])
def extractplates(plate):
data = [('txt', plate),]
results = requests.post('http://159.203.187.227/ocpu/user/bando/library/mpesaoptim/R/extractplates/json', data=data)
return jsonify(results.text)
@application.route("/api/countcars/<string:plate1>/<string:plate2>",methods=["GET"])
def platediffs(plate1,plate2):
data = [('car1',plate1 ), ('car2', plate2),]
results = requests.post('http://159.203.187.227/ocpu/user/bando/library/mpesaoptim/R/carbetween/json', data=data)
return jsonify(results.text)
if __name__ == "__main__":
application.run(host='0.0.0.0')
|
#!/usr/bin/env python
import asyncio
from typing import Callable
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.application import Application
from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
from prompt_toolkit.document import Document
from prompt_toolkit.layout.processors import BeforeInput, PasswordProcessor
from prompt_toolkit.completion import Completer
from hummingbot.client.ui.layout import (
create_input_field,
create_log_field,
create_output_field,
create_search_field,
generate_layout,
create_timer,
create_process_monitor,
create_trade_monitor
)
from hummingbot.client.ui.interface_utils import start_timer, start_process_monitor, start_trade_monitor
from hummingbot.client.ui.style import load_style
import logging
# Monkey patching here as _handle_exception gets the UI hanged into Press ENTER screen mode
def _handle_exception_patch(self, loop, context):
if "exception" in context:
logging.getLogger(__name__).error(f"Unhandled error in prompt_toolkit: {context.get('exception')}",
exc_info=True)
Application._handle_exception = _handle_exception_patch
class HummingbotCLI:
def __init__(self,
input_handler: Callable,
bindings: KeyBindings,
completer: Completer):
self.search_field = create_search_field()
self.input_field = create_input_field(completer=completer)
self.output_field = create_output_field()
self.log_field = create_log_field(self.search_field)
self.timer = create_timer()
self.process_usage = create_process_monitor()
self.trade_monitor = create_trade_monitor()
self.layout = generate_layout(self.input_field, self.output_field, self.log_field, self.search_field, self.timer, self.process_usage, self.trade_monitor)
# add self.to_stop_config to know if cancel is triggered
self.to_stop_config: bool = False
self.live_updates = False
self.bindings = bindings
self.input_handler = input_handler
self.input_field.accept_handler = self.accept
self.app = Application(layout=self.layout, full_screen=True, key_bindings=self.bindings, style=load_style(),
mouse_support=True, clipboard=PyperclipClipboard())
# settings
self.prompt_text = ">>> "
self.pending_input = None
self.input_event = None
self.hide_input = False
# start ui tasks
loop = asyncio.get_event_loop()
loop.create_task(start_timer(self.timer))
loop.create_task(start_process_monitor(self.process_usage))
loop.create_task(start_trade_monitor(self.trade_monitor))
async def run(self):
await self.app.run_async()
def accept(self, buff):
self.pending_input = self.input_field.text.strip()
if self.input_event:
self.input_event.set()
try:
if self.hide_input:
output = ''
else:
output = '\n>>> {}'.format(self.input_field.text,)
self.input_field.buffer.append_to_history()
except BaseException as e:
output = str(e)
self.log(output)
self.input_handler(self.input_field.text)
def clear_input(self):
self.pending_input = None
def log(self, text: str, save_log: bool = True):
if save_log:
if self.live_updates:
self.output_field.log(text, silent=True)
else:
self.output_field.log(text)
else:
self.output_field.log(text, save_log=False)
def change_prompt(self, prompt: str, is_password: bool = False):
self.prompt_text = prompt
processors = []
if is_password:
processors.append(PasswordProcessor())
processors.append(BeforeInput(prompt))
self.input_field.control.input_processors = processors
async def prompt(self, prompt: str, is_password: bool = False) -> str:
self.change_prompt(prompt, is_password)
self.app.invalidate()
self.input_event = asyncio.Event()
await self.input_event.wait()
temp = self.pending_input
self.clear_input()
self.input_event = None
if is_password:
masked_string = "*" * len(temp)
self.log(f"{prompt}{masked_string}")
else:
self.log(f"{prompt}{temp}")
return temp
def set_text(self, new_text: str):
self.input_field.document = Document(text=new_text, cursor_position=len(new_text))
def toggle_hide_input(self):
self.hide_input = not self.hide_input
def exit(self):
self.app.exit()
|
from .whitelistedhashesmodel import WhitelistedHashesModel
from .votingdatamodel import VotingDataModel
# noinspection PyUnresolvedReferences
from pystratis.api.global_responsemodels import PollViewModel
__all__ = ['WhitelistedHashesModel', 'VotingDataModel', 'PollViewModel']
|
#-*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class News(models.Model):
title = models.CharField(max_length=60, verbose_name = 'Заголовок')
slug = models.SlugField(max_length=60, verbose_name = 'Адрес URL')
body = models.TextField(max_length=500, verbose_name='Текст')
image = models.ImageField(upload_to='media/news', verbose_name="Изображение", blank = True)
author = models.ForeignKey(User, verbose_name="Автор")
date_time = models.DateField(default=timezone.now, verbose_name="Дата создания")
published_date = models.DateTimeField(default=timezone.now(), verbose_name="Дата публикации")
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('detail', args=[str(self.slug)])
class Meta:
ordering = ['-published_date']
verbose_name="Новость"
verbose_name_plural = "Новости"
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login', methods=['POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
#print(f'{username}, {password}')
print('{}, {}'.format(username, password))
return 'Datos enviados' |
if key in counts:
counts[key] = counts[key] + 1
else:
counts[key] = 1
|
from sklearn import linear_model
import checks as c
import visualizations as viz
import regClass as rc
class OLS(rc.REG):
"""Object which performs ordinary least squares regression, checks assumptions, and makes plots."""
def check_model(self):
"""Checks assumptions of OLS regression. Inherits 4 from regression base class and adds 3 additional tests."""
rc.REG.check_model(self)
#Linearity Check object
self.linCheck = c.linCheck(self.independentVar, self.dependentVar)
self.linCheck.check()
#Normality Check object
self.normCheck = c.normCheck(self.residuals)
self.normCheck.check()
#Homoskedasticity Check object
self.homoskeCheck = c.homoskeCheck(self.residuals, self.independentVar)
self.homoskeCheck.check()
def plot_results(self):
"""Creates the base regression plots as well as a qq-Plot."""
rc.REG.plot_results(self)
viz.plot_qq(self.residuals).plot()
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from myapp import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'objects', views.ObjectViewSet)
router.register(r'categorys', views.CategoryViewSet)
urlpatterns = [
# url(r'^users/$', views.user_list),
# url(r'^users/(?P<pk>[0-9]+)$', views.user_detail),
url(r'^', include(router.urls)),
url(r'^index/$', views.index, name="index"),
url(r'^userapiview/$', views.UserApiView.as_view()),
]
# urlpatterns = format_suffix_patterns(urlpatterns) |
import numpy as np
import cv2
print(cv2.__version__)
image = cv2.imread("AVR_pinmap.png",cv2.IMREAD_UNCHANGED)
cv2.imshow("Moon",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from .filters import MSAnd
from .instructions import SY
from .layer import DisplayPriority
class Lookup:
def __init__(self, id='', table=None, display=None, comment='',
instruction=None, rules=None,
display_priority=DisplayPriority.NotSet):
if rules is None:
rules = MSAnd()
if instruction is None:
instruction = []
self.id = id
self.table = table
self.display = display
self.comment = comment
self.instruction = instruction
self.rules = rules
self.display_priority = display_priority
@property
def rot_field(self):
for command in self.instruction:
if isinstance(command, SY) and command.rot_field:
return command.rot_field
def get_expression(self, fields):
if self.rules:
return 'EXPRESSION ({})'.format(self.rules.to_expression(fields))
return ''
def get_styleitems(self, chartsymbols, feature_name, geom_type, fields):
style = {
'POINT': [],
'LINE': [],
'POLYGON': [],
}
for command in self.instruction:
styleitem = command(chartsymbols, feature_name, geom_type, fields)
if isinstance(styleitem, str):
style[geom_type].append(styleitem)
else:
for style_type, style_str in styleitem.items():
style[style_type].append(style_str)
return style
def add_instruction(self, instruction):
self.instruction.append(instruction)
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as e:
raise KeyError(e)
def __add__(self, other):
return LookupCollection([self]) + other
def __matmul__(self, other):
return LookupCollection([self]) @ other
def __iter__(self):
return iter(LookupCollection([self]))
class LookupCollection(list):
__slots__ = ()
def __init__(self, seq, *, id=''):
super().__init__(Lookup(id=id, **lookup)
if isinstance(lookup, dict)
else lookup
for lookup in seq)
if not all(isinstance(item, Lookup) for item in self):
raise TypeError('LookupCollection can only contain Lookups')
def add_instruction(self, instruction):
for lookup in self:
lookup.add_instruction(instruction)
def __matmul__(self, other):
return self.__class__(
Lookup(l.id + r.id,
l.table or r.table,
l.display or r.display,
l.comment + r.comment,
l.instruction + r.instruction,
l.rules & r.rules,
max(l.display_priority, r.display_priority),
)
for l in self
for r in other
if (r.table is None or l.table is None or r.table == l.table)
and (r.display is None or l.display is None
or r.display == l.display)
)
def __add__(self, other):
return self.__class__(list.__add__(self, other))
|
import tkinter as tk
prev = [0, 0]
isMouseDown = False
root = tk.Tk()
canvas = tk.Canvas(root, width=500, height=500)
canvas.pack()
def mousemove(event):
if isMouseDown:
canvas.create_line(prev[0], prev[1], event.x, event.y)
prev[0], prev[1] = event.x, event.y
def mousedown(event):
global isMouseDown
prev[0], prev[1] = event.x, event.y
isMouseDown = True
def mouseup(event):
global isMouseDown
isMouseDown = False
canvas.bind('<Button-1>' , mousedown)
canvas.bind('<ButtonRelease-1>' , mouseup)
canvas.bind('<Motion>', mousemove)
root.mainloop()
|
import collections
import re
import numpy as np
from janome.tokenizer import Tokenizer
from janome.analyzer import Analyzer
from janome.tokenfilter import POSStopFilter
def CountWord(tweets: list, keyword: str = None, stopword_list: set = None) -> dict:
tweet_list = [tweet["text"] for tweet in tweets]
all_tweet = "\n".join(tweet_list)
token_filters = [POSStopFilter(['接続詞', '記号', '助詞', '助動詞'])]
t = Analyzer(tokenizer=Tokenizer(), token_filters=token_filters)
c = collections.Counter(token.base_form for token in t.analyze(all_tweet)
if token.part_of_speech.startswith('名詞') and len(token.base_form) > 1 and token.base_form.isalpha())
if keyword: # キーワードに含まれる言葉を除外
ck = [token.base_form for token in t.analyze(
keyword) if token.part_of_speech.startswith('名詞') and len(token.base_form) > 1 and token.base_form.isalpha()]
freq_dict = {}
mc = c.most_common()
if stopword_list:
if keyword:
for w in ck:
stopword_list.add(w)
for elem in mc:
if elem[0].lower() not in stopword_list:
freq_dict[elem[0].lower()] = elem[1]
else:
for elem in mc:
freq_dict[elem[0].lower()] = elem[1]
tf_dict = CalcTF(freq_dict)
# IDF計算
split_tweet_list = []
for tweet in tweet_list:
l = [token.base_form for token in t.analyze(tweet)
if token.part_of_speech.startswith('名詞') and len(token.base_form) > 1 and token.base_form.isalpha()]
split_tweet_list.append(l)
idf_list = CalcIDF(split_tweet_list)
tf_idf_list = CalcTFIDF(tf_dict, idf_list)
PrintWordsTF(tf_dict)
PrintWordsIDF(idf_list)
PrintWordsTFIDF(tf_idf_list)
return freq_dict
def CalcTF(freq_dict) -> dict:
tf_dict = {}
sum_of_words = 0
for v in freq_dict.values():
sum_of_words += v
for k, v in freq_dict.items():
tf_dict[k] = round(v/sum_of_words, 6)
return tf_dict
def getIDF(word, document):
n_samples = len(document)
df = np.sum(np.array([int(word in d) for d in document], dtype="float32"))
df += 1
return np.log2(n_samples / df)
def CalcIDF(in_corpus):
word_idf_dict = {}
for w in list(set([w for s in in_corpus for w in s])):
word_idf_dict[w.lower()] = getIDF(w, in_corpus)
idf_list = sorted(word_idf_dict.items(), key=lambda x: x[1])
return idf_list
def CalcTFIDF(tf_dict, idf_list):
tf_idf_dict = {}
for item in idf_list:
try:
idf_val = item[1]
tf_val = tf_dict[item[0]]
tf_idf_dict[item[0]] = tf_val * idf_val
except KeyError:
pass
tf_idf_list = sorted(tf_idf_dict.items(), key=lambda x: x[1], reverse=True)
return tf_idf_list
def PrintWordsTF(tf_dict, target=10):
print("\n{0:3}. {1:10} - {2}".format("rank", "TF (頻度)", "word"))
rank = 0
for k, v in tf_dict.items():
if rank >= target:
break
rank += 1
print("{0:3}. {1:10} - {2}".format(rank, v, k))
def PrintWordsIDF(idf_list: list, target=10):
print("\n{0:3}. {1:10} - {2}".format("rank", "IDF(レア度)", "word"))
rank = 0
for item in idf_list:
if rank >= target:
break
rank += 1
print("{0:3}. {1:10} - {2}".format(rank, round(item[1], 6), item[0]))
def PrintWordsTFIDF(tf_idf_list: list, target=30):
print("\n{0:3}. {1:10} - {2}".format("rank", "TF-IDF", "word"))
rank = 0
for item in tf_idf_list:
if rank >= target:
break
rank += 1
print("{0:3}. {1:10} - {2}".format(rank, round(item[1], 6), item[0]))
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression #The class sklearn.linear_model.LinearRegression
# will be used to perform linear and
# polynomial regression and make predictions accordingly.
x = np.array([5, 15, 25, 35, 45, 55]).reshape((-1, 1))
y = np.array([[5], [20], [14], [32], [22], [38]])
model = LinearRegression().fit(x,y) #With .fit(), you calculate the optimal values
predictions = model.predict(x)
r_sq = model.score(x, y) # R^2= coefficient of determination
print('R^2:', r_sq)
print("="*40)
print("b0=", model.intercept_, "\n", "b1=", model.coef_)
print("="*40)
plt.scatter(x, y)
plt.plot(x, model.predict(x) , "r-")
plt.xlabel("Input X")
plt.ylabel("output Y")
plt.title("linear regression")
plt.show()
|
#import sys
#input = sys.stdin.readline
def main():
K = int( input())
X, Y = map( int, input().split())
if K%2 == 0:
if (X+Y)%2 == 1:
print(-1)
return
abX = abs(X)
abY = abs(Y)
a, b = 1, 1
if X < 0:
a = -1
if Y < 0:
b = -1
if (X+Y)%K == 0:
print(abX//K+abY//K+1)
elif (X-Y)%K == 0:
print(abX//K+abY//K+2)
else:
print(abX//K+abY//K+3)
x, y = 0, 0
for _ in range(abX//K):
x += a*K
print(x,y)
for _ in range(abY//K):
y += b*K
print(x,y)
if (X+Y)%K == 0:
print(X,Y)
return
if (X-Y)%K == 0:
print(x+a*K, y)
print(X,Y)
return
print(X,y)
print(X,Y)
if __name__ == '__main__':
main()
|
import sys
import os
from collections import defaultdict
from text_preprocess import txt_preprocesser
import math
import json
"""KMAMIN 62182275 KRISHAN AMIN"""
class bayesian_classifier:
def trainNaiveBayes(self,addition):
trainlist = []
with open('corpus_data/preprocessedf_corpus.json') as corpus:
corpus = json.loads(corpus.read().encode('latin-1'))
for artist,songlist in corpus.items():
for song in songlist:
d = {}
d['artist'] = artist
d['lyrics'] = song['lyrics']
trainlist.append(d)
preprocesser = txt_preprocesser()
class_doc_counts = defaultdict(int)
class_wc = defaultdict(lambda: defaultdict(int))
word_counts = {}
classes = {}
class_wtotals = defaultdict(int)
for element in trainlist:
artist = element['artist']
classes[(artist)] = 1
word_list = preprocesser.process(element['lyrics'],addition)
class_doc_counts[artist] += 1
for word in word_list:
class_wc[artist][word] += 1
word_counts[word] = 1
class_wtotals[artist] += 1
outputdict = {"cdc":class_doc_counts,'cwc':class_wc,'wc':word_counts,'cwt':class_wtotals,'clist':classes}
"""with open('nb_train.json','w') as ofile:
json.dump(outputdict,ofile,indent=4)
ofile.close()"""
return outputdict
def testNaiveBayes(self,addition):
preprocesser = txt_preprocesser() # declare preprocessor
# lcount = 0
# tcount = 0 used for accuracy readnigs
# accuracy = 0
td = self.trainNaiveBayes(addition[0])
"""with open('nb_train.json',) as ifile:
td = json.loads(ifile.read())"""
# TRAIN
cdc = td['cdc'] # docs in true and lie
cwc = td['cwc'] # word counts in true | lie
word_counts = td['wc'] # word counts overall
cwt = td['cwt'] # total words in true / false
class_list = td['clist']
class_score = defaultdict(int)
for el in class_list:
class_score[el] = 0
# RETURN ALL NECC COUNTS
numdocs = sum(val for key,val in cdc.items())
with open(addition[1]) as file:
wlist = preprocesser.process(file.read(),addition)
for artist,score in class_score.items():
for word in wlist:
score += math.log((1+ cwc[artist].get(word,0)) / (cwt[artist]))
# lie score += log( p(lie) * count(word | lie) / (vocabsize + vocabsize of lies))
score *= cdc[artist]/numdocs # add the P(lie)
class_score[artist] = score
sorted_results = sorted(class_score.items(), key=lambda kv: kv[1], reverse=True)
for key, value in sorted_results[:5]:
print(str(key) + "\t" + str(value))
def main():
classifier = bayesian_classifier()
#classifier.trainNaiveBayes()
classifier.testNaiveBayes(sys.argv[1:])
if __name__ == "__main__":
main()
|
import json
import uuid
import sample
import sys
sample_map = json.load(open('sample_map.json', 'r'))
entity_map = json.load(open('entity_map.json', 'r'))
class Intent():
def __init__(self, name, sample_frame):
self.id = str(uuid.uuid4())
self.name = name
self.sample_frame = sample_frame
self.frame = None
self.phrases = []
self.build()
self.build_phrases(sample_frame)
def build(self):
self.frame = {
"id": self.id,
"name": self.name,
"displayName": self.name,
"action": "CSC_BOT",
"responses": [
{
"resetContexts": False,
"affectedContexts": [],
"parameters": [],
"messages": [
{
"type": 0,
"lang": "en",
"speech": []
}
],
"defaultResponsePlatforms": {},
"speech": []
}
]
}
self.build_parameters()
def build_phrases(self, sample_frame):
phrases = sample_map[sample_frame]['rephrases']
phrases.append(key)
samples = []
vals_per_question = 10
for phrase in phrases:
for i in range(vals_per_question):
s = sample.Sample(phrase, self.name)
s.populate_entities(entity_map)
samples.append(s)
for s in samples:
self.build_phrase(s)
def build_phrase(self, s):
phrase = {
"id": str(uuid.uuid4()),
"data": [],
"isTemplate": False,
"count": 0
}
text_len = len(s.text)
index = 0
# go through each entity and add phrase part to data
for entity in s.entities:
if entity['start'] > index:
phrase['data'].append({
"text": s.text[index:entity['start']],
"userDefined": False
})
index += len(s.text[index:entity['start']])
# if number, use sys.number class
if entity['entity'] == 'NUMBER':
phrase['data'].append({
"text": entity['value'],
"userDefined": False,
"alias": "number",
"meta": "@sys.number"
})
else: # not number, use defined entity type
phrase['data'].append({
"text": entity['value'],
"userDefined": False,
"alias": entity['entity'],
"meta": "@{}".format(entity['entity'])
})
index += len(entity['value'])
# add remaining string
if index < len(s.text):
phrase['data'].append({
"text": s.text[index:],
"userDefined": False
})
self.phrases.append(phrase)
def build_parameters(self):
"""Build the parameters for this intent"""
for key in entity_map:
if key in self.sample_frame:
parameter = {
"id": str(uuid.uuid4()),
"required": True,
"name": entity_map[key]['entity_type'],
"dataType": "@{}".format(entity_map[key]['entity_type']),
"value": "${}".format(entity_map[key]['entity_type']),
"isList": False
}
self.frame['responses'][0]['parameters'].append(parameter)
def write_to_file(self):
with open("./CSC466Bot/intents/{}.json".format(self.name), 'w+') as f_out:
json.dump(self.frame, f_out)
with open("./CSC466Bot/intents/{}_usersays_en.json".format(self.name), 'w+') as f_phrases_out:
json.dump(self.phrases, f_phrases_out)
if __name__ == '__main__':
for key in sample_map:
intent_name = sample_map[key]['intent']
intent = Intent(intent_name, key)
intent.write_to_file()
|
### preparing data for training variants of CRF models (e.g. semi-markov) on the command line ###
import io, argparse
def read_data(file):
pred = []
for line in file:
line = line.strip('\n')
toks = line.split()
if len(toks) == 0:
return pred
else:
pred.append(toks[0])
return None
### Gathering data ###
def gather_data(input, train, dev, test): # *_tgt files
### COLLECT DATA AND LABELLING ###
training_dict = {}
dev_dict = {}
test_dict = {}
input_files = [train, dev, test]
dictionaries = (training_dict, dev_dict, test_dict)
train_words = []
dev_words = []
test_words = []
counter = 0
# limit = 0 # init limit
# n_samples = 1000
for file in input_files:
data = []
with io.open(args.input + file, encoding = 'utf-8') as f:
for line in f:
toks = line.strip().split()
morphs = (''.join(c for c in toks)).split('!')
word = ''.join(m for m in morphs)
if file == train:
train_words.append(word)
if file == dev:
dev_words.append(word)
if file == test:
test_words.append(word)
label = ''
for morph in morphs:
if len(morph) == 1:
label += 'S'
else:
label += 'B'
for i in range(len(morph)-2):
label += 'M'
label += 'E'
w_dict = {}
dictionaries[counter][''.join(m for m in morphs)] = label
counter += 1
return dictionaries, train_words, dev_words, test_words
### Computing features ###
def features(word_dictonary, original_words, delta):
X = [] # list (learning set) of list (word) of dics (chars), INPUT for crf
Y = [] # list (learning set) of list (word) of labels (chars), INPUT for crf
words = [] # list (learning set) of list (word) of chars
for word in original_words:
word_plus = '[' + word + ']' # <w> and <\w> replaced with [ and ]
word_list = [] # container of the dic of each character in a word
word_label_list = [] # container of the label of each character in a word
for i in range(len(word_plus)):
char_dic = {} # dic of features of the actual char
for j in range(delta):
char_dic['right_' + word_plus[i:i + j + 1]] = 1
for j in range(delta):
if i - j - 1 < 0: break
char_dic['left_' + word_plus[i - j - 1:i]] = 1
char_dic['pos_start_' + str(i)] = 1 # extra feature: left index of the letter in the word
# char_dic['pos_end_' + str(len(word) - i)] = 1 # extra feature: right index of the letter in the word
# if word_plus[i] in ['a', 's', 'o']: # extra feature: stressed characters (discussed in the report)
# char_dic[str(word_plus[i])] = 1
word_list.append(char_dic)
if word_plus[i] == '[': word_label_list.append('[') # labeling start and end
elif word_plus[i] == ']': word_label_list.append(']')
else: word_label_list.append(word_dictonary[word][i-1]) # labeling chars
X.append(word_list)
Y.append(word_label_list)
temp_list_word = [char for char in word_plus]
words.append(temp_list_word)
return (X, Y, words)
def output_features(data, labels_list):
output = []
for i in range(len(data)):
a += 1
features = data[i]
labels = labels_list[i]
word_out = []
for z in range(len(features)):
out = []
out.append(labels[z])
for f in features[z]:
f = f.split('_')
new_f = []
if f[0] in ['right', 'left']:
new_f.append(f[0])
new_f.append('[')
new_f.append(len(f[1]))
new_f.append(']')
new_f.append('=')
new_f.append(f[1])
if f[0] in ['pos']:
new_f.append('pos')
new_f.append('_start')
new_f.append('=')
new_f.append(f[-1])
out.append(''.join(str(t) for t in new_f))
word_out.append(out)
output.append(word_out)
return output
### Output feature data for training CRF models on the command line ###
def generate(dictionaries, train_words, dev_words, test_words, delta):
training_dict, dev_dict, test_dict = dictionaries
X_training, Y_training, words_training = features(training_dict, train_words, delta)
X_dev, Y_dev, words_dev = features(dev_dict, dev_words, delta)
X_test, Y_test, words_test = features(test_dict, test_words, delta)
train_out = output_features(X_training, Y_training)
dev_out = output_features(X_dev, Y_dev)
test_out = output_features(X_test, Y_test)
return train_out, dev_out, test_out
def reconstruct(pred_labels, src):
words = []
with io.open(src, encoding = 'utf-8') as f:
for line in f:
toks = line.strip().split()
words.append(''.join(c for c in toks))
data = []
a = 0
with io.open(pred_labels, encoding = 'utf-8') as f:
pred = read_data(f)
while pred is not None:
data.append(pred)
pred = read_data(f)
pred_list = []
for idx in range(len(data)):
pred = data[idx]
word = words[idx]
labels = ''.join(w for w in pred[1 : -1])
labels = labels.split('E')
if '' in labels:
labels.remove('')
new_labels = []
for tok in labels:
# print(tok, word)
if 'S' not in tok:
tok += 'E'
new_labels.append(tok)
else:
c = tok.count('S')
if c == len(tok):
for z in range(c):
new_labels.append('S')
else:
tok = tok.split('S')
new_tok = []
for z in tok:
if z == '':
new_labels.append('S')
else:
new_labels.append(z + 'E')
morphs = []
for i in range(len(new_labels)):
tok = new_labels[i]
l = len(tok)
if i == 0:
morphs.append(word[0 : l])
else:
pre = len(''.join(z for z in new_labels[ : i]))
morphs.append(word[pre: pre + l])
# print(pred, labels, new_labels, word, morphs)
pred_list.append(morphs)
return pred_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type = str, help = 'input path')
parser.add_argument('--lang', type = str, help = 'target language')
parser.add_argument('--split', type = str, help = '1, 2, 3, etc')
parser.add_argument('--state', type = str, help = 'g (generate data for training)/ r (reconstruct data for evaluation')
parser.add_argument('--d', type = int, default = 4)
parser.add_argument('--m', help = 'model type')
args = parser.parse_args()
lang = args.lang
n = args.split
train_f = lang + '_train_tgt_' + n
dev_f = lang + '_dev_tgt_' + n
test_f = lang + '_test_tgt_' + n
if args.state == 'g':
dictionaries, train_words, dev_words, test_words = gather_data(args.input, train_f, dev_f, test_f)
training_dict, dev_dict, test_dict = dictionaries
train_out, dev_out, test_out = generate(dictionaries, train_words, dev_words, test_words, args.d)
with io.open(args.input + lang + '_train_' + args.m + '_' + n, 'w', encoding = 'utf-8') as f:
for tok in train_out:
for z in tok:
f.write('\t'.join(c for c in z) + '\n')
f.write('\n')
with io.open(args.input + lang + '_dev_' + args.m + '_' + n, 'w', encoding = 'utf-8') as f:
for tok in dev_out:
for z in tok:
f.write('\t'.join(c for c in z) + '\n')
f.write('\n')
with io.open(args.input + lang + '_test_' + args.m + '_' + n, 'w', encoding = 'utf-8') as f:
for tok in test_out:
for z in tok:
f.write('\t'.join(c for c in z) + '\n')
f.write('\n')
if args.state == 'r':
words = args.input + lang + '_test_src_' + n
pred_labels = args.input + lang + '_test_' + args.m + '_labels_' + n
predictions = reconstruct(pred_labels, words)
with io.open(args.input + lang + '_test_pred_' + args.m + '_' + n, 'w', encoding = 'utf-8') as f:
for tok in predictions:
tok = '!'.join(m for m in tok)
tok = list(tok)
f.write(' '.join(c for c in tok) + '\n')
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CounselingAgency'
db.create_table(u'hud_api_replace_counselingagency', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agcid', self.gf('django.db.models.fields.CharField')(max_length=9)),
('adr1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('adr2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.CharField')(max_length=100)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=20)),
('nme', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone1', self.gf('django.db.models.fields.CharField')(max_length=20)),
('statecd', self.gf('django.db.models.fields.CharField')(max_length=2)),
('weburl', self.gf('django.db.models.fields.CharField')(max_length=255)),
('zipcd', self.gf('django.db.models.fields.CharField')(max_length=10)),
('agc_ADDR_LATITUDE', self.gf('django.db.models.fields.CharField')(max_length=40)),
('agc_ADDR_LONGITUDE', self.gf('django.db.models.fields.CharField')(max_length=40)),
('languages', self.gf('django.db.models.fields.CharField')(max_length=255)),
('services', self.gf('django.db.models.fields.CharField')(max_length=1500)),
('parentid', self.gf('django.db.models.fields.CharField')(max_length=9)),
('county_nme', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone2', self.gf('django.db.models.fields.CharField')(max_length=20)),
('mailingadr1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('mailingadr2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('mailingcity', self.gf('django.db.models.fields.CharField')(max_length=255)),
('mailingzipcd', self.gf('django.db.models.fields.CharField')(max_length=10)),
('mailingstatecd', self.gf('django.db.models.fields.CharField')(max_length=2)),
('state_NME', self.gf('django.db.models.fields.CharField')(max_length=50)),
('state_FIPS_CODE', self.gf('django.db.models.fields.CharField')(max_length=255)),
('faithbased', self.gf('django.db.models.fields.BooleanField')(default=False)),
('colonias_IND', self.gf('django.db.models.fields.BooleanField')(default=False)),
('migrantwkrs_IND', self.gf('django.db.models.fields.BooleanField')(default=False)),
('agc_STATUS', self.gf('django.db.models.fields.CharField')(max_length=255)),
('agc_SRC_CD', self.gf('django.db.models.fields.CharField')(max_length=255)),
('counslg_METHOD', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'hud_api_replace', ['CounselingAgency'])
def backwards(self, orm):
# Deleting model 'CounselingAgency'
db.delete_table(u'hud_api_replace_counselingagency')
models = {
u'hud_api_replace.counselingagency': {
'Meta': {'object_name': 'CounselingAgency'},
'adr1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'adr2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agc_ADDR_LATITUDE': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'agc_ADDR_LONGITUDE': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'agc_SRC_CD': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agc_STATUS': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agcid': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colonias_IND': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'counslg_METHOD': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'county_nme': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'faithbased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailingadr1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailingadr2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailingcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailingstatecd': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'mailingzipcd': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'migrantwkrs_IND': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nme': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parentid': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'phone1': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'phone2': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'services': ('django.db.models.fields.CharField', [], {'max_length': '1500'}),
'state_FIPS_CODE': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_NME': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'statecd': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weburl': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcd': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['hud_api_replace']
|
import urllib, urllib2, json
class Salary:
def __init__(self, username):
url = 'https://socialblade.com/js/class/youtube-money-calculator'
param = { 'query' : username }
self.req = urllib2.Request(url, urllib.urlencode(param))
self.req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
self.req.add_header('origin', 'https://socialblade.com/js/class/youtube-money-calculator')
self.req.add_header('referer', 'https://socialblade.com/js/class/youtube-money-calculator')
def get(self):
data = urllib2.urlopen(self.req)
return data.read()
|
from morepath import redirect
from onegov.org import _
from onegov.org import OrgApp
from onegov.org.forms import ManageUserGroupForm
from onegov.org.layout import UserGroupCollectionLayout
from onegov.org.layout import UserGroupLayout
from onegov.core.elements import Link
from onegov.core.security import Secret
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
def get_usergroup_form_class(model, request):
return getattr(
request.app.settings.org, 'usergroup_form_class', ManageUserGroupForm
)
@OrgApp.html(
model=UserGroupCollection,
template='user_groups.pt',
permission=Secret
)
def view_user_groups(self, request, layout=None):
layout = layout or UserGroupCollectionLayout(self, request)
return {
'layout': layout,
'title': _('User groups'),
'groups': self.query().all()
}
@OrgApp.form(
model=UserGroupCollection,
name='new',
template='form.pt',
permission=Secret,
form=get_usergroup_form_class
)
def add_user_group(self, request, form, layout=None):
if form.submitted(request):
user_group = self.add(name=form.name.data)
form.update_model(user_group)
request.success(_('Added a new user group'))
return redirect(request.link(user_group))
layout = layout or UserGroupCollectionLayout(self, request)
layout.breadcrumbs.append(Link(_('New user group'), '#'))
return {
'layout': layout,
'title': _('New user group'),
'form': form
}
@OrgApp.html(
model=UserGroup,
template='user_group.pt',
permission=Secret
)
def view_user_group(self, request, layout=None):
layout = layout or UserGroupLayout(self, request)
return {
'layout': layout,
'title': self.name,
}
@OrgApp.form(
model=UserGroup,
name='edit',
template='form.pt',
permission=Secret,
form=get_usergroup_form_class
)
def edit_user_group(self, request, form, layout=None):
if form.submitted(request):
form.update_model(self)
request.success(_('Your changes were saved'))
return redirect(request.link(self))
if not form.errors:
form.apply_model(self)
layout = layout or UserGroupLayout(self, request)
layout.breadcrumbs.append(Link(_('Edit user group'), '#'))
return {
'layout': layout,
'title': _('Edit user group'),
'form': form
}
@OrgApp.view(
model=UserGroup,
request_method='DELETE',
permission=Secret
)
def delete_user_group(self, request):
request.assert_valid_csrf_token()
UserGroupCollection(request.session).delete(self)
|
#DONE
#https://www.reddit.com/r/dailyprogrammer/comments/bqy1cf/20190520_challenge_378_easy_the_havelhakimi/
#returns a list that has no zeroes in it
def eliminateZero(arr):
newArr = [i for i in arr if i != 0]
return newArr
#returns a list that has been sorted in descending order
def sortDesc(arr):
newArr = sorted(arr, reverse=True)
return newArr
#returns whether length of a list is less than num
def lengthCheck(num, arr):
return len(arr) < num
#returns a list that has 1 subtracted from the first num indexes of the list
def frontReduction(num, arr):
index = num
newArr = arr
if index > len(newArr):
index = len(newArr)
for i in range(index):
newArr[i] -= 1
return newArr
# Havel-Hakimi Alg
#returns whether everyone is telling the truth or someone is lying
def hh(arr):
newArr = arr
while True:
newArr = eliminateZero(newArr)
if len(newArr) == 0:
return True
else:
newArr = sortDesc(newArr)
N = newArr.pop(0)
if lengthCheck(N, newArr): #or N > len(newArr)
return False
else:
newArr = frontReduction(N, newArr)
#main test
# test zero elim function [WORKS]
#arr = [0] #[1, 0, 2, 3, 4, 0, 8]
#arr = eliminateZero(arr)
#print(arr)
# test sortDesc function [WORKS]
#arr = [1, 2, 5, 4, 6, 3, 3]
#arr = sortDesc(arr)
#print(arr)
# test lengthCheck function [WORKS]
#arr = [] #[5, 5, 5, 5, 5]
#print(lengthCheck(2, arr))
#test frontReduction function [WORKS]
#arr = [1] #[5, 4, 3, 2, 1]
#print(frontReduction(0, arr))
#test hh function [WORKS]
print(hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]))
print(hh([4, 2, 0, 1, 5, 0]))
print(hh([3, 1, 2, 3, 1, 0]))
print(hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]))
print(hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]))
print(hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]))
print(hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]))
print(hh([2, 2, 0]))
print(hh([3, 2, 1]))
print(hh([1, 1]))
print(hh([1]))
print(hh([]))
|
import argparse
import asyncio
import logging
import os
from telegram2mqtt.gateway import Gateway
DEFAULT_LOGLEVEL = 'INFO'
LOGLEVEL_CHOICES = [
'DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL',
]
TELEGRAM_API_TOKEN = os.getenv('TELEGRAM_API_TOKEN', None)
def main():
# ArgumentParser
parser = argparse.ArgumentParser(prog='telegram2mqtt', description='Telegram to Mqtt gateway')
parser.add_argument('-t', '--token', metavar='TOKEN', type=str,
default=TELEGRAM_API_TOKEN, help='Telegram api token. Default from TELEGRAM_API_TOKEN env var.')
parser.add_argument('--loglevel', metavar='LEVEL', type=str,
default=DEFAULT_LOGLEVEL, choices=LOGLEVEL_CHOICES,
help="Log level. Default: '{}'".format(DEFAULT_LOGLEVEL))
kwargs = vars(parser.parse_args())
if not kwargs['token']:
exit(parser.print_usage())
# Log config
loglevel = kwargs.pop('loglevel')
logging.basicConfig(level=loglevel, format=' %(levelname)-8s %(name)s %(message)s')
# Gateway
gateway = Gateway(**kwargs)
loop = asyncio.get_event_loop()
loop.create_task(gateway.start())
loop.run_forever()
if __name__ == '__main__':
main()
|
# -*- coding:UTF-8 -*-
from rest_framework.routers import DefaultRouter
from . import views
router=DefaultRouter()
app_name='goods'
router.register('firstClass',views.FirstClassView,base_name='firstClass')
# router.register('secondClass',views.SecondClassView,base_name='secondClass')
router.register('thirdClass',views.ThirdClassView,base_name='thirdClass')
router.register('firstProperty',views.FirstPropertyView,base_name='firstProperty')
router.register('secondProperty',views.SecondPropertyView,base_name='secondProperty')
router.register('sizeGroup',views.SizeGroupView,base_name='sizeGroup')
router.register('sizeDesc',views.SizeDescView,base_name='sizeDesc')
router.register('sizeGroupClass',views.SizeGroupClassView,base_name='sizeGroupClass')
router.register('itemsDesc',views.ItemsDescView,base_name='itemsDesc')
router.register('goodDetail',views.GoodDetailView,base_name='goodDetail')
router.register('goodSearch',views.GoodSearchView,base_name='goodSearch')
router.register('SearchHistory',views.SearchHistoryView,base_name='goodSearchHistory')
urlpatterns=router.urls |
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from torch.autograd import Variable
import argparse
import numpy as np
from torch.optim.lr_scheduler import *
import csv
from model.resnet import resnet101
from dataset.DogCat import DogCat
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--batchSize', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--gpu', type=str, default='7', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
transform_test = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
testset=DogCat('./data/test1',transform=transform_test,train=False,test=True)
testloader=torch.utils.data.DataLoader(testset,batch_size=opt.batchSize,shuffle=False,num_workers=opt.num_workers)
model=resnet101(pretrained=True)
model.fc=nn.Linear(2048,2)
model.load_state_dict(torch.load('ckp/model.pth'))
model.cuda()
model.eval()
results=[]
with torch.no_grad():
for image,label in testloader:
image=Variable(image.cuda())
out=model(image)
label=label.numpy().tolist()
_,predicted=torch.max(out.data,1)
predicted=predicted.data.cpu().numpy().tolist()
results.extend([[i,";".join(str(j))] for (i,j) in zip(label,predicted)])
eval_csv=os.path.join(os.path.expanduser('.'),'deploy','eval.csv')
with open(eval_csv,'w',newline='') as f:
writer=csv.writer(f,delimiter=',')
q=("id","label")
writer.writerow(q)
for x in results:
writer.writerow(x) |
from __future__ import division
import pandas as pd
import numpy as np
import networkx as nx
import geopandas as gp
from osmnx.utils import make_str
from shapely.geometry import LineString, Point
from osmnx_simplify_overwrite import simplify_graph
from weighted_betweenness import probit_assignment
def prepare_centroids_network2(centroid, network):
'''
Take transport network and centroids shapefiles as inputs
then returns a geodataframe of the transport network with
indicated centroid nodes
Parameters
------------
centroid: str
string of centroid shapefile's address+filename
network: str
string of network shapefile's address+name
Returns
------------
gdf_points: GeoDataFrame
geodataframe (Points) of centroids shapefile
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original network, containing information about the start
node (FNODE) and end node (TNODE). The IsCentroid information is attached as well.
'''
#read the centroid shapefile into geodataframe
gdf_points = gp.read_file(centroid)
#read the network shapefile into geodataframe
gdf = prepare_gdf_network(network)
#take all nodes from the network geodataframe into dataframe
df_node_pos = gdf[['Start_pos', 'FNODE_', 'road']].rename(columns={'Start_pos': 'pos', 'FNODE_': 'Node', 'road': 'road' }).append(
gdf[['End_pos', 'TNODE_', 'road']].rename(columns={'End_pos': 'pos', 'TNODE_': 'Node', 'road': 'road' }))
#drop all duplicate nodes
df_node_pos = df_node_pos.drop_duplicates(subset='Node')
#change the column name
df_node_pos.columns = ['geometry', 'Node', 'road']
#add column of POINT type for the geometry
df_node_pos['pointgeo'] = [Point(xy) for xy in df_node_pos.geometry]
#reindex the dataframe
df_node_pos.index = range(len(df_node_pos))
#save the longitude (x) and latitude(y) separately
xy = np.array(df_node_pos['geometry'].tolist())
x = [xy[i,0] for i in range(len(xy))]
y = [xy[i,1] for i in range(len(xy))]
df_node_pos['x'] = x
df_node_pos['y'] = y
#create geodataframe of the network points from dataframe
gdf_node_pos = gp.GeoDataFrame(df_node_pos, crs=gdf.crs, geometry=df_node_pos.pointgeo)
gdf_node_pos['osmid'] = gdf_node_pos.index
#reference the Node ID of the network to the centroids by selecting the nearest node from the centroid points
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos.iloc[gdf_node_pos.distance(g).idxmin()].Node)
OD = gdf_points['Node'].tolist()
gdf_node_pos['IsCentroid'] = gdf_node_pos.Node.apply(lambda g: 1 if g in OD else 0)
#adding Centroid information to the gdf
gdf['IsCentroid1'] = gdf.TNODE_.apply(lambda g: 1 if g in OD else 0)
gdf['IsCentroid2'] = gdf.FNODE_.apply(lambda g: 1 if (g in OD) else 0)
gdf['IsCentroid'] = gdf['IsCentroid1'] + gdf['IsCentroid2']
del gdf['IsCentroid1']
del gdf['IsCentroid2']
#create unique osmid for the network LineString GeoDataFrame
gdf['osmid'] = gdf.index.map(lambda x: x + 10000)
return gdf_points, gdf_node_pos, gdf
__all__ = ['prepare_gdf_network',
'prepare_centroids_network',
'gdf_to_simplified_multidigraph',
'multigraph_to_graph',
'graph_to_df',
'prepare_adm_background',
'create_link_capacity']
def prepare_gdf_network(network):
'''
Converting transport network shapefile into GeoDataFrame
Parameters
------------
network: str
string of network shapefile's address+filename
Returns
------------
gdf: GeoDataFrame
geodataframe of network with linestring, coordinate of start position, and
coordinate of end position recorded
'''
# Load network shapefile into GeoDataFrame
gdf = gp.read_file(network)
# !!! Add column capacity for min max cut flow algorithm
# gdf['capacity'] = gdf['RD_CLASS']
# shapefile needs to include minimal: geometry linestring and the length computed (e.g. in QGIS)
if 'length' not in gdf.columns:
raise Exception('Shapefile is invalid: length not in attributes:\n{}'.format(gdf.columns))
if not gdf.geometry.map(lambda x: type(x) == LineString).all():
s_invalid_geo = gdf.geometry[gdf.geometry.map(lambda x: type(x) == LineString)]
raise Exception('Shapefile is invalid: geometry not all linestring \n{}'.format(s_invalid_geo))
# Compute the start- and end-position based on linestring
gdf['Start_pos'] = gdf.geometry.apply(lambda x: x.coords[0])
gdf['End_pos'] = gdf.geometry.apply(lambda x: x.coords[-1])
# Create Series of unique nodes and their associated position
s_points = gdf.Start_pos.append(gdf.End_pos).reset_index(drop=True)
s_points = s_points.drop_duplicates()
# Add index of start and end node of linestring to geopandas DataFrame
df_points = pd.DataFrame(s_points, columns=['Start_pos'])
df_points['FNODE_'] = df_points.index
gdf = pd.merge(gdf, df_points, on='Start_pos', how='inner')
df_points = pd.DataFrame(s_points, columns=['End_pos'])
df_points['TNODE_'] = df_points.index
gdf = pd.merge(gdf, df_points, on='End_pos', how='inner')
return gdf
def prepare_centroids_network(centroid, network):
'''
Take transport network and centroids shapefiles as inputs
then returns a geodataframe of the transport network with
indicated centroid nodes
Parameters
------------
centroid: str
string of centroid shapefile's address+filename
network: str
string of network shapefile's address+name
Returns
------------
gdf_points: GeoDataFrame
geodataframe (Points) of centroids shapefile
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original network, containing information about the start
node (FNODE) and end node (TNODE). The IsCentroid information is attached as well.
'''
#read the centroid shapefile into geodataframe
gdf_points = gp.read_file(centroid)
#read the network shapefile into geodataframe
gdf = prepare_gdf_network(network)
#take all nodes from the network geodataframe into dataframe
df_node_pos = gdf[['Start_pos', 'FNODE_']].rename(columns={'Start_pos': 'pos', 'FNODE_': 'Node' }).append(
gdf[['End_pos', 'TNODE_']].rename(columns={'End_pos': 'pos', 'TNODE_': 'Node' }))
#drop all duplicate nodes
df_node_pos = df_node_pos.drop_duplicates(subset='Node')
#change the column name
df_node_pos.columns = ['geometry', 'Node']
#add column of POINT type for the geometry
df_node_pos['pointgeo'] = [Point(xy) for xy in df_node_pos.geometry]
#reindex the dataframe
df_node_pos.index = range(len(df_node_pos))
#save the longitude (x) and latitude(y) separately
xy = np.array(df_node_pos['geometry'].tolist())
x = [xy[i,0] for i in range(len(xy))]
y = [xy[i,1] for i in range(len(xy))]
df_node_pos['x'] = x
df_node_pos['y'] = y
#create geodataframe of the network points from dataframe
gdf_node_pos = gp.GeoDataFrame(df_node_pos, crs=gdf.crs, geometry=df_node_pos.pointgeo)
gdf_node_pos['osmid'] = gdf_node_pos.index
#reference the Node ID of the network to the centroids by selecting the nearest node from the centroid points
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos.iloc[gdf_node_pos.distance(g).idxmin()].Node)
OD = gdf_points['Node'].tolist()
gdf_node_pos['IsCentroid'] = gdf_node_pos.Node.apply(lambda g: 1 if g in OD else 0)
#adding Centroid information to the gdf
gdf['IsCentroid1'] = gdf.TNODE_.apply(lambda g: 1 if g in OD else 0)
gdf['IsCentroid2'] = gdf.FNODE_.apply(lambda g: 1 if (g in OD) else 0)
gdf['IsCentroid'] = gdf['IsCentroid1'] + gdf['IsCentroid2']
del gdf['IsCentroid1']
del gdf['IsCentroid2']
#create unique osmid for the network LineString GeoDataFrame
gdf['osmid'] = gdf.index.map(lambda x: x + 10000)
return gdf_points, gdf_node_pos, gdf
def gdf_to_simplified_multidigraph(gdf_node_pos, gdf, undirected = True, simplify=True, sums=['length']):
'''
Simplifying transport network (in GeoDataFrame format) by removing all nodes which are
neither intersections, end/start nodes, nor centroids. This reduces the computation time
needed to conduct any analysis later.
Parameters
------------
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original transport network.
Returns
------------
G2: MultiGraph, MultiDiGraph
Multi(Di)Graph Networkx object of simplified transport network. Multi(Di)Graph network type
is chosen because the graph simplification algorithm only works with this type of network.
'''
#create a MultiDiGraph object
G2 = nx.MultiDiGraph(crs=gdf.crs)
#create nodes on the MultiDiGraph
for index, row in gdf_node_pos.iterrows():
c = {'x': row.x, 'y': row.y, 'IsCentroid' : row.IsCentroid, 'ID' : row.Node, 'osmid': row.osmid}
G2.add_node(row.Node, **c)
#create bidirectional edges on top of the MultiDiGraph nodes
#based on the FNODE and TNODE information of the transport network GeoDataFrame
for index, row in gdf.iterrows():
dict_row = row.to_dict()
if 'geometry' in dict_row: del dict_row['geometry']
G2.add_edge(u=dict_row['FNODE_'], v=dict_row['TNODE_'], **dict_row)
#swap the FNODE and the TNODE since we want to make bidirectional graph
gdf.rename(columns={'Start_pos': 'End_pos',
'End_pos': 'Start_pos',
'FNODE_': 'TNODE_',
'TNODE_': 'FNODE_', }, inplace=True)
#do the iteration again
for index, row in gdf.iterrows():
dict_row = row.to_dict()
if 'geometry' in dict_row: del dict_row['geometry']
G2.add_edge(u=dict_row['FNODE_'], v=dict_row['TNODE_'], **dict_row)
#simplify the MultiDiGraph using OSMNX's overwritten function
if simplify:
G2 = simplify_graph(G_=G2, sums=sums)
#make a name
G2.graph['name'] = 'graph'
if undirected:
G2 = G2.to_undirected()
return G2
def multigraph_to_graph(G):
'''
Change Multi(Di)Graph object to Graph. Graph is undirected, simple graph type without parallel edges
(while Multi(Di)Graph may have parallel edges). This code removes duplicate edges.
Parameters
------------
G: MultiGraph, MultiDiGraph
Multi(Di)Graph Networkx object of simplified transport network
Returns
------------
G2_new_tograph: Graph
Graph Networkx object
'''
#create arbitrary Graph object
G2_new_tograph = nx.Graph()
#create dummy Graph as a mean to indicate duplicated edges
G_dummy = nx.Graph()
#transform the nodes into Graph, preserving all attributes
for u,v in G.nodes(data=True):
G2_new_tograph.add_node(u, **v)
G_dummy.add_node(u, **v)
#transform the edges into Graph, preserving all attributes
c = []
for u,v,data in G.edges(data=True):
d = (u,v)
#check if the edge that connects (u,v) exists in the graph
if not d in c:
G2_new_tograph.add_edge(u,v,**data)
G_dummy.add_edge(u,v,**data)
c.append(d)
#else replace the old edge with the new edge if the new edge has longer length
else:
for edge in G_dummy.edges(data=True):
e = (edge[0], edge[1])
if e == d:
if data['length'] > edge[2]['length']:
G2_new_tograph.remove_edge(u,v)
G2_new_tograph.add_edge(u,v,**data)
G_dummy.remove_edge(u,v)
G_dummy.add_edge(u,v,**data)
return G2_new_tograph
def graph_to_df(G2_simplified):
'''
Change Graph Networkx object back to GeoDataFrame. This helps for visualization purpose,
as GeoDataFrame has more flexibility in displaying the transport network.
Parameters
------------
G2_simplified: Graph
(Simplified) Graph Networkx object
Returns
------------
gdf_edges: GeoDataFrame
GeoDataFrame (Linestring) of the Graph Networkx object
'''
#get undirected Graph from MultiDiGraph
G2_simplified3 = G2_simplified.copy()
#create a copy for safer operation
G_save = G2_simplified3.copy()
#create dictionaries of nodes from the undirected Graph
nodes = {node:data for node, data in G_save.nodes(data=True)}
#create GeoDataFrame of nodes
gdf_nodes = gp.GeoDataFrame(nodes).T
#change the CRS (coordinate reference system) into EPSG:4326
gdf_nodes.crs = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}
#define the geometry attribute of the GeoDataFrame
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes = gdf_nodes.drop(['x', 'y'], axis=1)
#ensure the osmid is in integer
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64)
#remove all nodes that do not have geometry information
for col in [c for c in gdf_nodes.columns if not c == 'geometry']:
gdf_nodes[col] = gdf_nodes[col].fillna('').map(make_str)
#create list of edges
edges = []
for u, v, data in G_save.edges(data=True):
edge_details = data
edge_details['FNODE_'] = u
edge_details['TNODE_'] = v
# if edge doesn't already have a geometry attribute, create one now
if not 'geometry' in data:
point_u = Point((G_save.node[u]['x'], G_save.node[u]['y']))
point_v = Point((G_save.node[v]['x'], G_save.node[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v])
edges.append(edge_details)
#create GeoDataFrame of edges
gdf_edges = gp.GeoDataFrame(edges)
#change the CRS into EPSG:4326
gdf_edges.crs = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}
#if any element of the GeoDataFrame contains more than one information (due to the graph simplification)
for i in gdf_edges.columns:
#select only one element which is most relevant
gdf_edges[i] = gdf_edges[i].apply(lambda x: _last_list_element(x))
#delete all irrelevant columns
del gdf_edges['End_pos']
# del gdf_edges['FNODE_']
# del gdf_edges['TNODE_']
# del gdf_edges['osmid']
del gdf_edges['Start_pos']
# del gdf_edges['capacity']
return gdf_edges
def _last_list_element(x):
#utility for multidigraph_to_shapefile function
#take the last element of a list
if type(x) == list:
x = x[-1]
return x
def prepare_adm_background(adm_csv, adm_shp, csv_column_list):
'''
Preparing geospatial administrative area background in GeoDataFrame. Merge various socioeconomic
information from another csv. The csv and the shp should have at least one column with identical values
to map to each other.
Parameters
------------
adm_csv: str
string of socioeconomic csv file address+filename
adm_shp: str
string of administrative area csv file address+filename
csv_column_list: list
list of string of column names from the adm_csv file that want to be added into the
resulting GeoDataFrame
Returns
------------
district_gdf2: GeoDataFrame
GeoDataFrame (Polygon) of administrative area and its corresponding socioeconomic data
'''
#read district data statistics
district_df = pd.read_csv(adm_csv)
#read adm level 2 district shapefile
district_gdf = gp.read_file(adm_shp)
#extract only the intended columns
district_df2 = district_df[csv_column_list]
#change the 'code' column into 'HASC_2' so that it can be merged with the shp file
district_df2.rename(columns={'Code':'HASC_2'}, inplace=True)
#combine gdf and df
district_gdf2 = pd.merge(district_gdf, district_df2, on='HASC_2')
return district_gdf2
def create_link_capacity(G, item1, item2='length', calctype='multiplication'):
'''
Preparing capacity of a link for unimodal transport network (i.e. road or waterway or railway separately).
This function (currently) only perform simple multiplication or division between the two items
in order to generate capacity attribute.
Parameters
------------
G: Graph
Networkx Graph object of a unimodal transport network
item1: str
string of the Graph's data attribute that want to be used as the first component of capacity calculation
item2: str
string of the Graph's data attribute that want to be used as the secoond component of capacity calculation
Returns
------------
G1: Graph
Networkx Graph object of a unimodal transport network with capacity attribute embedded in its edges
'''
capacity_dict = {}
G1 = G.copy()
for u,v,data in G.edges(data=True):
if type(data[item1]) != list:
component1 = data[item1]
else:
component1 = min(data[item1])
if type(data[item2]) != list:
component2 = data[item2]
else:
component2 = min(data[item2])
edge = tuple([u,v])
if calctype == 'multiplication':
capacity = component1 * component2
elif calctype == 'division':
capacity = component1 / component2
capacity_dict.update({edge:capacity})
nx.set_edge_attributes(G1, 'lcapacity', capacity_dict)
return G1 |
import os
import sys
import unittest
class TestAutoloads(unittest.TestCase):
def setUp(self):
# Force reload of testconfig module to prevent side-effects from
# previous tests
try:
del sys.modules['testconfig']
except KeyError:
pass
def tearDown(self):
# Remove any environment variables that we set
keys_to_delete = [key for key in os.environ if key.startswith('NOSE_TESTCONFIG')]
for key in keys_to_delete:
print('*** Deleting key: %s' % key)
del os.environ[key]
def test_ini_file(self):
os.environ['NOSE_TESTCONFIG_AUTOLOAD_INI'] = 'examples/example_cfg.ini'
from testconfig import config
self.assertEqual(config['myapp_servers']['main_server'], "'10.1.1.1'")
def test_json_file(self):
os.environ['NOSE_TESTCONFIG_AUTOLOAD_JSON'] = 'examples/example_cfg.json'
from testconfig import config
self.assertEqual(config['myapp']['servers']['main_server'], '10.1.1.1')
|
import logging
import re
from datetime import datetime
from decimal import Decimal
from .exceptions import ExchangeRateDataError
from urllib import parse
import requests
from requests.exceptions import HTTPError
# Regular expression used to destinguish between multiple URLs
URL_DETECTION_RE = re.compile(
r'([a-z0-9]+?:\/\/.*?)[\s,]*(?=$|[a-z0-9]+?:\/\/)', re.I)
# flake8: noqa: C901
def parse_placeholders(tx: object, body: str, title: str = None):
amount_in_fiat = None
fiat = None
msg = [body, title]
# url decode the message just in case the parameters passed are encoded
i = 0
for n in msg:
if msg[i] is not None:
msg[i] = parse.unquote(msg[i])
i += 1
# loop through all the properties of this object
for prop in dir(tx):
# we don't care about private and protected attributes
if prop[0] == "_":
continue
# replace any placeholder attributes with the values from this object
prop_re = f"{{{prop}}}"
if (
re.search(f".*{prop_re}.*", msg[0]) is None
and re.search(f".*{prop_re}.*", msg[1]) is None
and re.search("{amount_in_(...)}", msg[0]) is None
and re.search("{amount_in_(...)}", msg[1]) is None
):
continue
attr = getattr(tx, prop)
if attr is None:
logging.debug(
f"PaymentProvider: {tx.payment_provider} TXID: {tx.tx_id} "
f'Status: The placeholder "{prop_re}" was specified, but no data was found.'
f" Network error or user error. Action: Continuing"
)
continue
i = 0
for n in msg:
if msg[i] is not None:
if re.search(f".*{prop_re}.*", msg[i]):
# just swap the value with the {placeholder}
# .rstrip('0').rstrip('.') is to remove the trailing zeros from decimals i.e. amount
msg[i] = re.sub(
prop_re, attr.__str__().rstrip("0").rstrip("."), msg[i]
)
if (
prop == "amount"
and re.search("{amount_in_(...)}", msg[i]) is not None
):
# convert amount to fiat if necessary
fiat = (
re.search("{amount_in_(...)}", msg[i]).group(1)
if fiat is None
else fiat.lower()
)
prop_fiat = f"{{amount_in_{fiat}}}"
fiat = fiat.upper()
# no need to make multiple requests for both body and title
if amount_in_fiat is None:
amount_in_fiat_unformatted = currency_converter(
tx.amount, tx.currency, fiat, tx.timestamp
)
amount_in_fiat = "{0:.2f}".format(amount_in_fiat_unformatted)
msg[i] = re.sub(prop_fiat, amount_in_fiat, msg[i])
i += 1
return msg[0], msg[1] # body, title
def currency_converter(
amount: Decimal, from_currency: str, to_currency: str, timestamp: datetime = None
):
tx_datetime = timestamp if timestamp is not None else datetime.now()
# convert to unix time
tx_datetime = tx_datetime.timestamp()
uri = "https://min-api.cryptocompare.com"
path = "/data/histohour"
queryStr = (
f"?fsym={from_currency}&tsym={to_currency}&limit=1&e=CCCAGG&toTs={tx_datetime}"
)
url = uri + path + queryStr
try:
r = requests.get(url)
r.raise_for_status()
if r.json()["Response"] == "Error":
raise ExchangeRateDataError(r.json()["Message"])
hdp = r.json()["Data"][1]
except HTTPError as e:
raise HTTPError
except KeyError as e:
raise KeyError
avgPrice = Decimal((hdp["high"] + hdp["low"]) / 2)
return avgPrice * amount
def split_urls(urls):
"""
Takes a string containing URLs separated by comma's and/or spaces and
returns a list.
"""
try:
results = URL_DETECTION_RE.findall(urls)
except TypeError:
results = []
if len(results) > 0 and results[len(results) - 1][-1] != urls[-1]:
# we always want to save the end of url URL if we can; This handles
# cases where there is actually a comma (,) at the end of a single URL
# that would have otherwise got lost when our regex passed over it.
results[len(results) - 1] += \
re.match(r'.*?([\s,]+)?$', urls).group(1).rstrip()
return results
|
from MyUtilities import PrimeNumbers,PowNMod,EuclidianExtended
from PrimitiveRoots import PrimitiveRoots
import random
import time
class ElgamalCriptography:
def __init__(self,base=0):
self.base = base
self.generateKeys()
def generateKeys(self):
prime_length = int(input("How many digits of prime required ? : "))
PN = PrimeNumbers()
self.p = PN.GetPrime(prime_length,3)
PR = PrimitiveRoots()
start_time = time.time()
self.primitive_roots,self.coprime_list = PR.getPrimitiveRoots(self.p)
print("primitive Roots:")
print(self.primitive_roots)
end_time = time.time()
print("_____________")
print("time taken in primitive root generation (sec): %1.3f"%(end_time - start_time))
temp_index = random.randint(0,len(self.primitive_roots))
e_1 = self.primitive_roots[temp_index]
temp_index = random.randint(0,len(self.coprime_list))
d = self.coprime_list[temp_index]
while( d<1 or d>(self.p-1)):
print("d",d)
d = self.coprime_list[temp_index]
e_2 = PowNMod(e_1,d,self.p)
self.publiC_key = (e_1,e_2,self.p)
self.private_key = d
print("keys Generated")
def encrypt(self,plain_text):
cipherList = []
base = self.base
print("Encrypting text")
for pChar in plain_text:
M = ord(pChar) - base
temp_index = random.randint(0,len(self.coprime_list))
r = self.coprime_list[temp_index]
C_1 = PowNMod(self.publiC_key[0],r,self.p)
C_2 = (PowNMod(self.publiC_key[1],r,self.p) * M )%self.p
cipherList.append((C_1,C_2))
return cipherList
def decrypt(self,cipher_text):
EE = EuclidianExtended()
base=self.base
plainText = ""
for cChar in cipher_text:
C_1,C_2 = cChar
C_1_inv = EE.GetInv(PowNMod(C_1, self.private_key, self.p),self.p)
plainChar = (C_2 * C_1_inv) %self.p
plainText += chr(base+plainChar)
return plainText
def test(self):
plain_text = "Hello World"
cipher = self.encrypt(plain_text)
decrypted_text = self.decrypt(cipher)
print(f"plain Text : {plain_text}")
print(f"Public Key : {self.publiC_key}")
print(f"private key : {self.private_key}")
print(f"cipher text : {cipher}")
print(f"decrypted Text : {decrypted_text}")
if __name__ == "__main__":
EC = ElgamalCriptography(0)
EC.test() |
"""django0002 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path
from django.conf import settings
from app01 import views
urlpatterns = [
url(r'admin/',admin.site.urls),
path('login/', views.login),
url(r'^$', views.register),
path('home/',views.home,name='home'),
path('qidian/',views.qidian,name="qidian"),
path('refresh/',views.qidian_refresh,name="refresh"),
path('twitter/',views.twitter,name="twitter"),
path('weibo/',views.weibo_net,name="weibo"),
path('hupu/',views.hupu,name="hupu"),
path('cat/',views.cat,name ='cat'),
path('write/',views.write,name='write'),
path('jlu_new/',views.jlu_new),
path('jlu/',views.jlu),
url(r'mdeditor/', include('mdeditor.urls')),
path(r'article/<article_id>/',views.article)
]
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# Converting to fuzzy sets
import numpy as np
def mem1(x,hb,lb):
return float((hb-x) / (hb-lb))
def mem2(x,hb,lb):
return float((x-lb) / (hb-lb))
def preprocess(num):
fname = 'iris.csv'
data = np.genfromtxt(fname,delimiter=',')
data = data[1:] #if there is heading
attribute =[]
for i in range(1,num):
attribute.append(data[:,i])
dev =[] #calulate standard deviation
for i in range(0,len(attribute)):
dev.append(np.std(attribute[i]))
# print dev
mean = [] #calculate mean
for i in range(0,len(attribute)):
mean.append(np.mean(attribute[i]))
# print mean
#for low set
low_lb = []
for i in range(0,len(attribute)):
low_lb.append(np.amin(attribute[i]))
# print low_lb
low_hb = []
for i in range(0,len(attribute)):
low_hb.append(mean[i] - (dev[i]/2) + (mean[i] * 0.05))
# print low_hb
# for medium set
med_lb = []
for i in range(0,len(attribute)):
med_lb.append(mean[i] - (dev[i]/2) - (mean[i] * 0.05))
med_hb = []
for i in range(0,len(attribute)):
med_hb.append(mean[i] + (dev[i]/2) + (mean[i] * 0.05))
# for high set
high_lb = []
for i in range(0,len(attribute)):
high_lb.append((mean[i] + (dev[i]/2)) - (mean[i] * 0.05))
high_hb = []
for i in range(0,len(attribute)):
high_hb.append(np.amax(attribute[i]))
#For Attribute 0
low = []
med = []
high = []
for i in range(0,len(attribute)):
ai_low = []
ai_med = []
ai_high = []
for x in attribute[i]:
if x >= low_lb[i] and x < med_lb[i]:
ai_low.append(1)
ai_med.append(0)
ai_high.append(0)
elif x >= med_lb[i] and x <= low_hb[i]:
ai_low.append(mem1(x,low_hb[i], med_lb[i]))
ai_med.append(mem2(x,low_hb[i], med_lb[i]))
ai_high.append(0)
elif x >low_hb[i] and x < high_lb[i]:
ai_low.append(0)
ai_med.append(1)
ai_high.append(0)
elif x <= med_hb[i] and x >= high_lb[i]:
ai_low.append(0)
ai_med.append(mem1(x,med_hb[i], high_lb[i]))
ai_high.append(mem2(x,med_hb[i], high_lb[i]))
else :
ai_low.append(0)
ai_med.append(0)
ai_high.append(1)
low.append(ai_low)
med.append(ai_med)
high.append(ai_high)
attribute = [] # Final attribute matrix
for i in range(0,len(low)):
attribute.append(low[i])
attribute.append(med[i])
attribute.append(high[i])
attribute=np.array(attribute)
attribute=attribute.transpose()
print attribute
np.savetxt("foo.csv", attribute, delimiter=",")
if __name__ == "__main__":
num=5
preprocess(num)
|
import serial
arduinoComPort = "COM7"
baudRate = 9600
ser = serial.Serial(arduinoComPort, baudRate, timeout=1)
#
# main loop to read data from the Arduino, then display it
#
while True:
#
# ask for a line of data from the serial port, the ".decode()" converts the
# data from an "array of bytes", to a string
#
lineOfData = ser.readline().decode()
#
# check if data was received
#
if len(lineOfData) > 0:
data = ser.readline().decode()
print(data)
# #
# # data was received, convert it into 4 integers
# #
# a, b, c, d = (int(x) for x in lineOfData.split(','))
# #
# # print the results
# #
# print("a = " + str(a), end="")
# print(", b = " + str(b), end="")
# print(", c = " + str(c), end="")
# print(", d = " + str(d))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 18:50:52 2018
@author: ddeng
"""
import pandas as pd
import pdb
import os
import numpy as np
from sklearn.svm import SVC, LinearSVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import VotingClassifier
from sklearn import metrics
from time import time
from scipy.stats import skew, kurtosis, mode
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from my_utils import plot_confusion_matrix, Dataset,cv_on_SVC_feat_selection
feat_type = ['head','pose','gaze_angle','FAUs']
time_interval = [5, 10, 20, 30, 50, 70, 90, 120, 150, 200]
def cv_on_SVC(gender, feature, time_int):
dataset = Dataset(feat_type=feature, time_interval=time_int)
female_data = dataset.load_my_data('F')
male_data = dataset.load_my_data('M')
if gender=='F':
X_train, y_train = female_data['train']['data'], female_data['train']['target']
X_dev, y_dev, dev_index = female_data['dev']['data'], female_data['dev']['target'], female_data['dev']['index']
elif gender=='M':
X_train, y_train = male_data['train']['data'], male_data['train']['target']
X_dev, y_dev, dev_index = male_data['dev']['data'], male_data['dev']['target'], male_data['dev']['index']
elif gender=='A':
X_train = np.concatenate((female_data['train']['data'], male_data['train']['data']), axis=0)
y_train = np.concatenate((female_data['train']['target'], male_data['train']['target']), axis=0)
X_dev = np.concatenate((female_data['dev']['data'], male_data['dev']['data']), axis=0)
y_dev = np.concatenate((female_data['dev']['target'], male_data['dev']['target']), axis=0)
dev_index= female_data['dev']['index']
dev_index.extend(male_data['dev']['index'])
scaler = preprocessing.MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_dev = scaler.transform(X_dev)
y_pred_probs = cv_on_SVC_feat_selection(X_train, y_train, X_dev, y_dev, dev_index)
|
from grpc import insecure_channel
import requests
import time
from canoser import Uint64
from libra.account import Account
from libra.account_address import Address
from libra.account_resource import AccountState, AccountResource
from libra.account_config import AccountConfig
from libra.transaction import RawTransaction, SignedTransaction, Script, TransactionPayload
from libra.trusted_peers import ConsensusPeersConfig
from libra.ledger_info import LedgerInfo
from libra.get_with_proof import verify
from libra.proto.admission_control_pb2 import SubmitTransactionRequest, AdmissionControlStatusCode
from libra.proto.admission_control_pb2_grpc import AdmissionControlStub
from libra.proto.get_with_proof_pb2 import UpdateToLatestLedgerRequest
NETWORKS = {
'testnet':{
'host': "ac.testnet.libra.org",
'port': 8000,
'faucet_host': "faucet.testnet.libra.org"
}
}
class LibraError(Exception):
pass
class AccountError(LibraError):
pass
class TransactionError(LibraError):
pass
class TransactionTimeoutError(LibraError):
pass
class LibraNetError(LibraError):
pass
class Client:
def __init__(self, network="testnet", validator_set_file=None, faucet_file=None):
if network == "mainnet":
raise LibraNetError("Mainnet is not supported currently")
if network != "testnet":
raise LibraNetError(f"Unknown network: {network}")
self.host = NETWORKS[network]['host']
self.port = NETWORKS[network]['port']
self.do_init(validator_set_file, faucet_file)
def do_init(self, validator_set_file=None, faucet_file=None):
self.init_validators(validator_set_file)
self.init_grpc()
self.init_faucet_account(faucet_file)
self.verbose = True
def init_grpc(self):
#TODO: should check under ipv6, add [] around ipv6 host
self.channel = insecure_channel(f"{self.host}:{self.port}")
self.stub = AdmissionControlStub(self.channel)
def init_faucet_account(self, faucet_file):
if self.is_testnet():
self.faucet_host = NETWORKS['testnet']['faucet_host']
self.faucet_account = None
else:
self.faucet_account = Account.gen_faucet_account(faucet_file)
def is_testnet(self):
return self.host == NETWORKS['testnet']['host']
def init_validators(self, validator_set_file):
if self.is_testnet() and validator_set_file is None:
validator_set_file = ConsensusPeersConfig.testnet_file_path()
if validator_set_file is None:
raise LibraError("Validator_set_file is required except testnet.")
self.validator_verifier = ConsensusPeersConfig.parse(validator_set_file)
@classmethod
def new(cls, host, port, validator_set_file, faucet_file=None):
ret = cls.__new__(cls)
ret.host = host
if isinstance(port, str):
port = int(port)
if port <=0 or port > 65535:
raise LibraNetError("port must be between 1 and 65535")
ret.port = port
ret.do_init(validator_set_file, faucet_file)
return ret
def get_account_blob(self, address):
address = Address.normalize_to_bytes(address)
request = UpdateToLatestLedgerRequest()
item = request.requested_items.add()
item.get_account_state_request.address = address
resp = self.update_to_latest_ledger(request)
blob = resp.response_items[0].get_account_state_response.account_state_with_proof.blob
version = resp.ledger_info_with_sigs.ledger_info.version
return (blob, version)
def get_account_state(self, address):
blob, version = self.get_account_blob(address)
if len(blob.__str__()) == 0:
#TODO: bad smell
raise AccountError("Account state blob is empty.")
return AccountState.deserialize(blob.blob)
def get_account_resource(self, address):
state = self.get_account_state(address)
return state.get_resource()
def get_sequence_number(self, address):
try:
state = self.get_account_resource(address)
return state.sequence_number
except AccountError:
return 0
def get_balance(self, address):
try:
state = self.get_account_resource(address)
return state.balance
except AccountError:
return 0
def update_to_latest_ledger(self, request):
resp = self.stub.UpdateToLatestLedger(request)
verify(self.validator_verifier, request, resp)
return resp
def get_latest_ledger_info(self):
request = UpdateToLatestLedgerRequest()
resp = self.update_to_latest_ledger(request)
return resp.ledger_info_with_sigs.ledger_info
def _get_time_diff(self):
from datetime import datetime
info = self.get_latest_ledger_info()
localtime = datetime.now().timestamp()
return localtime - info.timestamp_usecs / 1000_000
def get_latest_transaction_version(self):
return self.get_latest_ledger_info().version
def _get_txs(self, start_version, limit=1, fetch_events=False):
request = UpdateToLatestLedgerRequest()
item = request.requested_items.add()
item.get_transactions_request.start_version = start_version
item.get_transactions_request.limit = limit
item.get_transactions_request.fetch_events = fetch_events
return (request, self.update_to_latest_ledger(request))
def get_transactions_proto(self, start_version, limit=1, fetch_events=False):
request, resp = self._get_txs(start_version, limit, fetch_events)
txnp = resp.response_items[0].get_transactions_response.txn_list_with_proof
return (txnp.transactions, txnp.events_for_versions)
def get_transactions(self, start_version, limit=1):
transactions, _ = self.get_transactions_proto(start_version, limit, False)
return [SignedTransaction.deserialize(x.signed_txn) for x in transactions]
def get_transaction(self, start_version):
return self.get_transactions(start_version)[0]
def get_account_transaction_proto(self, address, sequence_number, fetch_events=False):
address = Address.normalize_to_bytes(address)
request = UpdateToLatestLedgerRequest()
item = request.requested_items.add()
itemreq = item.get_account_transaction_by_sequence_number_request
itemreq.account = address
itemreq.sequence_number = sequence_number
itemreq.fetch_events = fetch_events
resp = self.update_to_latest_ledger(request)
usecs = resp.ledger_info_with_sigs.ledger_info.timestamp_usecs
transaction = resp.response_items[0].get_account_transaction_by_sequence_number_response
return (transaction.signed_transaction_with_proof, usecs)
# Returns events specified by `access_path` with sequence number in range designated by
# `start_seq_num`, `ascending` and `limit`. If ascending is true this query will return up to
# `limit` events that were emitted after `start_event_seq_num`. Otherwise it will return up to
# `limit` events in the reverse order. Both cases are inclusive.
def get_events(self, address, path, start_sequence_number, ascending=True, limit=1):
address = Address.normalize_to_bytes(address)
request = UpdateToLatestLedgerRequest()
item = request.requested_items.add()
item.get_events_by_event_access_path_request.access_path.address = address
item.get_events_by_event_access_path_request.access_path.path = path
item.get_events_by_event_access_path_request.start_event_seq_num = start_sequence_number
item.get_events_by_event_access_path_request.ascending = ascending
item.get_events_by_event_access_path_request.limit = limit
resp = self.update_to_latest_ledger(request)
return resp.response_items[0].get_events_by_event_access_path_response.events_with_proof
def get_events_sent(self, address, start_sequence_number, ascending=True, limit=1):
path = AccountConfig.account_sent_event_path()
return self.get_events(address, path, start_sequence_number, ascending, limit)
def get_events_received(self, address, start_sequence_number, ascending=True, limit=1):
path = AccountConfig.account_received_event_path()
return self.get_events(address, path, start_sequence_number, ascending, limit)
def get_latest_events_sent(self, address, limit=1):
return self.get_events_sent(address, 2**64-1, False, limit)
def get_latest_events_received(self, address, limit=1):
return self.get_events_received(address, 2**64-1, False, limit)
def mint_coins(self, address, micro_libra, is_blocking=False):
if self.faucet_account:
tx = self.mint_coins_with_faucet_account(address, micro_libra, is_blocking)
return tx.raw_txn.sequence_number
else:
return self.mint_coins_with_faucet_service(address, micro_libra, is_blocking)
def mint_coins_with_faucet_account(self, receiver_address, micro_libra, is_blocking=False):
script = Script.gen_mint_script(receiver_address, micro_libra)
payload = TransactionPayload('Script', script)
return self.submit_payload(self.faucet_account, payload, is_blocking=is_blocking)
def mint_coins_with_faucet_service(self, receiver, micro_libra, is_blocking=False):
url = "http://{}?amount={}&address={}".format(self.faucet_host, micro_libra, receiver)
resp = requests.post(url)
if resp.status_code != 200:
raise IOError(
"Failed to send request to faucet service: {}".format(self.faucet_host)
)
sequence_number = int(resp.text)
if is_blocking:
self.wait_for_transaction(AccountConfig.association_address(), sequence_number-1)
return sequence_number
def wait_for_transaction(self, address, sequence_number, expiration_time=Uint64.max_value):
max_iterations = 50
if self.verbose:
print("waiting", flush=True)
while max_iterations > 0:
time.sleep(1)
max_iterations -= 1
transaction, usecs = self.get_account_transaction_proto(address, sequence_number, True)
if transaction.HasField("events"):
if self.verbose:
print("transaction is stored!")
if len(transaction.events.events) == 0:
if self.verbose:
print("no events emitted")
return False
else:
return True
else:
if expiration_time <= (usecs // 1000_000):
raise TransactionTimeoutError("Transaction expired.")
if self.verbose:
print(".", end='', flush=True)
raise TransactionTimeoutError("wait_for_transaction timeout.")
def transfer_coin(self, sender_account, receiver_address, micro_libra,
max_gas=140_000, unit_price=0, is_blocking=False, txn_expiration=100):
script = Script.gen_transfer_script(receiver_address,micro_libra)
payload = TransactionPayload('Script', script)
return self.submit_payload(sender_account, payload, max_gas, unit_price,
is_blocking, txn_expiration)
def create_account(self, sender_account, fresh_address):
script = Script.gen_create_account_script(fresh_address)
payload = TransactionPayload('Script', script)
return self.submit_payload(sender_account, payload)
def rotate_authentication_key(self, sender_account, public_key):
script = Script.gen_rotate_auth_key_script(public_key)
payload = TransactionPayload('Script', script)
return self.submit_payload(sender_account, payload)
def submit_payload(self, sender_account, payload,
max_gas=140_000, unit_price=0, is_blocking=False, txn_expiration=100):
sequence_number = self.get_sequence_number(sender_account.address)
#TODO: cache sequence_number
raw_tx = RawTransaction.new_tx(sender_account.address, sequence_number,
payload, max_gas, unit_price, txn_expiration)
signed_txn = SignedTransaction.gen_from_raw_txn(raw_tx, sender_account)
request = SubmitTransactionRequest()
request.signed_txn.signed_txn = signed_txn.serialize()
self.submit_transaction(request, raw_tx, is_blocking)
return signed_txn
def submit_transaction(self, request, raw_tx, is_blocking):
resp = self.submit_transaction_non_block(request)
if is_blocking:
address = bytes(raw_tx.sender)
sequence_number = raw_tx.sequence_number
expiration_time = raw_tx.expiration_time
self.wait_for_transaction(address, sequence_number, expiration_time)
return resp
def submit_transaction_non_block(self, request):
resp = self.stub.SubmitTransaction(request)
status = resp.WhichOneof('status')
if status == 'ac_status':
if resp.ac_status.code == AdmissionControlStatusCode.Accepted:
return resp
else:
raise TransactionError(f"Status code: {resp.ac_status.code}")
elif status == 'vm_status':
raise TransactionError(resp.vm_status.__str__())
elif status == 'mempool_status':
raise TransactionError(resp.mempool_status.__str__())
else:
raise TransactionError(f"Unknown Error: {resp}")
raise AssertionError("unreacheable")
|
"""
Пробуем создать утилиту, которая будет отслеживать редисок, которые скручивают километраж на мажинах и их перепродают
"""
import urllib3
import json
import requests
my_kay = "mnrOlvNOeTnNJHyFrkjk6RFZ0NfDkftYxlO2cD1t"
def url_search(search_url: str) -> str:
# берет строку с функции take_search_string и добавляет ее в полную строку для запроса
search_line = f"https://developers.ria.com/auto/search?api_key=YOUR_API_KEY&{search_url}"
return search_line
def take_search_string(url_from_site: str, key: str) -> str:
# берет поисковую строку с сайта и превращает ее в поисковую строку для апи
r = requests.get(f'https://developers.ria.com/new_to_old?api_key={key}&{url_from_site}')
data = json.loads(r.text)
return data['string']
def create_json_with_data(input_url: str, api_key: str, json_name: str):
"""
принимает готовую поисковую строку с функции - url_search и отправляет запрос к АП
после ответа - сохраняет данные в файл с названием json_name которое тоже передается в эту функцию
"""
http = urllib3.PoolManager()
our_url = f'{input_url}'.replace('YOUR_API_KEY', api_key)
our_url = http.request('GET', our_url)
decode_dict = our_url.data.decode('utf-8')
# open str to list
data_from_api = json.loads(decode_dict)
# add to json, mark_cars
with open(f'{json_name}.json', 'w') as f:
json.dump(data_from_api, f)
def find_id(json_name: str) -> list:
"""
Принимается имя файла который должен быть уже создан на пк, с данными от АПИ и достаются от туда все айдишники
объявлений
"""
# open json with data
all_search_data = json.load(open(f'{json_name}'))
# list with id's
new_cars = all_search_data['result']['search_result']['ids']
# list with id's cars who was used
old_cars = all_search_data['result']['search_result_common']['data']
list_of_used_cars = []
for ids in old_cars:
list_of_used_cars.append(ids['id'])
output_cars = []
for i in new_cars:
output_cars.append(i)
for i in list_of_used_cars:
output_cars.append(i)
return output_cars
def read_from_id(list_with_ids: list):
for ids in list_with_ids:
create_json_with_data(f'https://developers.ria.com/auto/info?api_key=YOUR_API_KEY&auto_id={ids}', my_kay,
f'{ids}')
def main():
create_json_with_data(
url_search(take_search_string('categories.main.id=1&indexName=auto&brand.id[0]=47&model.id[0]=393', my_kay)),
my_kay, 'Mazda6')
read_from_id(find_id('Mazda6.json'))
if __name__ == main():
main()
|
from django import forms
from itens.models import Filme, Serie, Livro
from .models import Avaliacao, Comentario
class FazerAvaliacaoFilme(forms.ModelForm):
class Meta:
model = Avaliacao
fields = ['avaliacao', 'valor', 'user_id', 'filme', 'tipo']
labels = {'user_id': '', 'filme': '', 'tipo': ''}
def __init__(self, *args, **kwargs):
super(FazerAvaliacaoFilme, self).__init__(*args, **kwargs)
for field in self.fields.keys():
self.fields[field].required = True
if field in ('user_id','filme','tipo'):
self.fields[field].required = False
class FazerAvaliacaoLivro(forms.ModelForm):
class Meta:
model = Avaliacao
fields = ['avaliacao', 'valor', 'user_id', 'livro', 'tipo']
labels = {'user_id': '', 'livro': '', 'tipo': ''}
def __init__(self, *args, **kwargs):
super(FazerAvaliacaoLivro, self).__init__(*args, **kwargs)
for field in self.fields.keys():
self.fields[field].required = True
if field in ('user_id','livro','tipo'):
self.fields[field].required = False
class FazerAvaliacaoSerie(forms.ModelForm):
class Meta:
model = Avaliacao
fields = ['avaliacao', 'valor', 'user_id', 'serie', 'tipo']
labels = {'user_id': '', 'serie': '', 'tipo': ''}
def __init__(self, *args, **kwargs):
super(FazerAvaliacaoSerie, self).__init__(*args, **kwargs)
for field in self.fields.keys():
self.fields[field].required = True
if field in ('user_id','serie','tipo'):
self.fields[field].required = False
class NovoComentario(forms.ModelForm):
class Meta:
model = Comentario
fields = ['avaliacao', 'comentario', 'user_id']
labels = {'comentario': 'Novo Comentário'}
def __init__(self, avaliacao, user, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
if field == 'avaliacao':
self.fields[field].initial = avaliacao
self.fields[field].widget = forms.HiddenInput()
elif field == 'user_id':
self.fields[field].initial = user
self.fields[field].widget = forms.HiddenInput() |
from django.shortcuts import render
from prices.models import Consultant
def home(request):
context = {'consultants': Consultant.objects.order_by('id')}
return render(request, 'landing_page.html', context)
|
#!/usr/bin/env python
"""This script plots the mean of the identified gains. The gains must be
precomputed. It currently does not include trials from Subject 9."""
# builtin
import os
import argparse
# external
import numpy as np
import matplotlib.pyplot as plt
# local
import utils
PATHS = utils.config_paths()
def main(event, structure):
file_name_safe_event = '-'.join(event.lower().split(' '))
file_name_safe_structure = '-'.join(structure.split(' '))
plot_dir = utils.mkdir(os.path.join(PATHS['figures_dir'],
'identification-results',
file_name_safe_event,
file_name_safe_structure))
# Do not include subject 9 in the means because of the odd ankle joint
# torques.
similar_trials = utils.build_similar_trials_dict(bad_subjects=[9])
mean_gains_per_speed = {}
for speed, trial_numbers in similar_trials.items():
all_gains = utils.aggregate_gains(trial_numbers,
utils.Trial.sensors,
utils.Trial.controls,
utils.Trial.num_cycle_samples,
file_name_safe_event,
file_name_safe_structure,
scale_by_mass=True)
mean_gains = all_gains.mean(axis=0)
var_gains = all_gains.var(axis=0)
mean_gains_per_speed[speed] = mean_gains
markers = utils.mark_if_sig_diff_than(all_gains)
fig, axes = utils.plot_joint_isolated_gains(
utils.Trial.sensors, utils.Trial.controls, mean_gains,
gains_std=np.sqrt(var_gains), mass=1.0, mark=markers)
fig.set_size_inches((14.0, 14.0))
fig.savefig(os.path.join(plot_dir, 'mean-gains-' + speed + '.png'),
dpi=300)
plt.close(fig)
fig, axes = plt.subplots(2, 3, sharex=True)
linestyles = ['-', '--', ':']
speeds = ['0.8', '1.2', '1.6']
for speed, linestyle in zip(speeds, linestyles):
fig, axes = utils.plot_joint_isolated_gains(utils.Trial.sensors,
utils.Trial.controls,
mean_gains_per_speed[speed],
gains_std=np.sqrt(var_gains),
axes=axes,
linestyle=linestyle)
axes[0, 0].legend().set_visible(False)
right_labels = ['Right ' + speed + ' [m/s]' for speed in speeds]
left_labels = ['Left ' + speed + ' [m/s]' for speed in speeds]
leg = axes[1, 0].legend(list(sum(zip(right_labels, left_labels), ())),
loc='best', fancybox=True, fontsize=8)
leg.get_frame().set_alpha(0.75)
fig.savefig(os.path.join(plot_dir, 'mean-gains-vs-speed.png'), dpi=300)
plt.close(fig)
if __name__ == "__main__":
desc = "Identify Controller"
parser = argparse.ArgumentParser(description=desc)
msg = ("A valid event name in the data, likely: "
"'Longitudinal Perturbation', 'First Normal Walking', "
"or 'Second Normal Walking'.")
parser.add_argument('-e', '--event', type=str, help=msg,
default='Longitudinal Perturbation')
msg = ("The desired controller structure: 'join isolated' or 'full'.")
parser.add_argument('-s', '--structure', type=str, help=msg,
default='joint isolated')
args = parser.parse_args()
main(args.event, args.structure)
|
class Solution:
dp = []
def kmp(self, text,pattten) -> int:
self.build(pattten)
return self.search(text)
def build(self,patten):
length = len(patten)
self.dp = []*length
for _ in range(length):
alphabetDict = {}
for alphabet in range(97,123,1):
alphabetDict[alphabet] = 0
self.dp.append(alphabetDict)
self.dp[0][ord(patten[0])] = 1
history = 0
for i in range(1,length):
for alphabet in range(97,123,1):
if ord(patten[i])==alphabet:
self.dp[i][alphabet] = i+1
else:
# if dp[history].get(alphabet)!=None:
self.dp[i][alphabet] = self.dp[history][alphabet]
history = self.dp[history][ord(patten[i])]
def search(self,text):
end = len(self.dp)
status = 0
for i in range(len(text)):
status = self.dp[status][ord(text[i])]
if status == end:
return True
return False
s = Solution()
res = s.kmp("abcabababc","abdc")
print(res)
|
"""
LARPER - Let's Authenticate Resources Per Each Request
Design
======
larper provides the following ways to start an LDAP directory session:
* UserSession.connect(request)
* RegistrarSession.connect(request)
* AdminSession.connect(request)
UserSession
-----------
Once one has obtained a directory session, one can search or
update a person in the phonebook.
Search results are larper.Person objects.
People have larper.SystemId objects as well as profile photos.
RegistararSession
-----------------
With a registrar session, one can add new users to the system.
AdminSession
------------
With an admin session, one can delete users from the system.
"""
import os
import re
from time import time
import ldap
from ldap.dn import explode_dn, escape_dn_chars
from ldap.filter import filter_format
from ldap.modlist import addModlist, modifyModlist
from django.conf import settings
from django.core import signing
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db.models import Q
import commonware.log
from statsd import statsd
from users.models import UserProfile
log = commonware.log.getLogger('i.larper')
def get_password(request):
"""Not sure if this and store_password belong here..."""
d = request.session.get('PASSWORD')
if d:
return signing.loads(d).get('password')
def store_password(request, password):
"""
request - Django web request
password - A clear text password
"""
request.session['PASSWORD'] = signing.dumps(dict(password=password))
class NO_SUCH_PERSON(Exception):
"""Raised when a search by unique_id fails."""
pass
class INCONCEIVABLE(Exception):
"""Raised when something that should not happen,
happens. If this happens often, this Exception
might not mean what you think it means."""
pass
CONNECTIONS_KEY = 'larper_conns'
READ = 0
WRITE = 1
MOZILLA_IRC_SERVICE_URI = 'irc://irc.mozilla.org/'
KNOWN_SERVICE_URIS = [
MOZILLA_IRC_SERVICE_URI,
]
PEEP_SRCH_FLTR = '(&(objectClass=mozilliansPerson)(|(cn=*%s*)(mail=*%s*)))'
IRC_SRCH_FLTR = """(&(objectClass=mozilliansLink)(mozilliansServiceID=*%s*)
(mozilliansServiceURI=irc://irc.mozilla.org/))"""
NONVOUCHED_SRCH_FLTR = """(&(objectClass=mozilliansPerson)(|(cn=*%s*)
(mail=*%s*))(&(!(mail=*@mozilla*))
(!(mozilliansVouchedBy=*))))"""
NONVOUCHED_EMAIL_SRCH_FLTR = """(&(|(mail=*%s*)(uid=*%s*))
(&(!(mail=*@mozilla*))
(!(mozilliansVouchedBy=*))))"""
class UserSession(object):
"""
A directory session for the currenly logged in user.
Data access to the directory is mediated on a
user by user, request by request basis. A person object
may be missing, or search results may be empty if the
current viewer of a directory doesn't have permissions
to see certain people.
"""
def __init__(self, request):
self.request = request
def _ensure_conn(self, mode):
"""
mode - One of READ or WRITE. Pass WRITE
if any of the LDAP operations will include
adding, modifying, or deleting entires.
"""
dn, password = self.dn_pass()
if not hasattr(self.request, CONNECTIONS_KEY):
self.request.larper_conns = [{}, {}]
if dn not in self.request.larper_conns[mode]:
if mode == WRITE:
server_uri = settings.LDAP_SYNC_PROVIDER_URI
else:
server_uri = settings.LDAP_SYNC_CONSUMER_URI
conn = ldap.initialize(server_uri)
conn.bind_s(dn, password)
self.request.larper_conns[mode][dn] = conn
return self.request.larper_conns[mode][dn]
def dn_pass(self):
"""
Returns a tuple of LDAP distinguished name and password
for use during authentication.
Subclasses of UserSession should override this method
if they don't auth against the user in the session.
"""
unique_id = self.request.user.unique_id
password = get_password(self.request)
if unique_id and password:
return (Person.dn(unique_id), password)
else:
# Should never happen
if unique_id == None:
raise Exception("No unique id on the request.user object")
elif password == None:
raise Exception("No password in the session")
else:
raise Exception("unique_id [%s] password length [%d]" %\
(unique_id, len(password)))
def search(self, query, nonvouched_only=False):
"""
General purpose 'quick' search. Returns a list of
larper.Person objects.
"""
encoded_q = query.encode('utf-8')
if nonvouched_only:
peep_esc_q = filter_format(NONVOUCHED_SRCH_FLTR, (encoded_q, encoded_q))
else:
peep_esc_q = filter_format(PEEP_SRCH_FLTR, (encoded_q, encoded_q))
irc_esc_q = filter_format(IRC_SRCH_FLTR, (encoded_q,))
people = self._people_search(peep_esc_q)
irc_nicks = self._irc_search(irc_esc_q)
people += self._people_from_irc_results_search(irc_nicks)
return self._populate_people_results(people)
def search_by_name(self, query):
"""
Searches against the full_name field for people. Returns
same type of data as search.
"""
q = filter_format("(cn=*%s*)", (query.encode('utf-8'),))
return self._populate_people_results(self._people_search(q))
def search_by_email(self, query, nonvouched_only=False):
"""
Searches against the email fields for people. Returns
same type of data as search.
"""
encoded_q = query.encode('utf-8')
if nonvouched_only:
q = filter_format(NONVOUCHED_EMAIL_SRCH_FLTR,
(encoded_q, encoded_q,))
else:
q = filter_format("(|(mail=*%s*)(uid=*%s*))",
(encoded_q, encoded_q,))
return self._populate_people_results(self._people_search(q))
def get_by_unique_id(self, unique_id, use_master=False):
"""
Retrieves a person from LDAP with this unique_id.
Raises NO_SUCH_PERSON if unable to find them.
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
q = filter_format("(uniqueIdentifier=%s)", (unique_id,))
results = self._people_search(q, use_master)
msg = 'Unable to locate %s in the LDAP directory'
if not results:
raise NO_SUCH_PERSON(msg % unique_id)
elif len(results) == 1:
_dn, attrs = results[0]
# Pending users will detect the existance of another
# person, but there won't be any data besides uniqueIdentifier
if 'sn' not in attrs:
raise NO_SUCH_PERSON(msg % unique_id)
else:
return Person.new_from_directory(attrs)
else:
msg = 'Multiple people found for %s. This should never happen.'
raise INCONCEIVABLE(msg % unique_id)
def profile_photo(self, unique_id, use_master=False):
"""
Retrieves a person's profile photo. Returns
jpeg binary data.
"""
attrs = self._profile_photo_attrs(unique_id, use_master)
if 'jpegPhoto' in attrs:
return attrs['jpegPhoto'][0]
return False
def profile_service_ids(self, person_unique_id, use_master=False):
"""
Returns a dict that contains remote system ids.
Keys for dict include:
* MOZILLA_IRC_SERVICE_URI
Values are a SystemId object for that service.
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
services = {}
if use_master:
conn = self._ensure_conn(WRITE)
else:
conn = self._ensure_conn(READ)
search_filter = '(mozilliansServiceURI=*)'
rs = conn.search_s(Person.dn(person_unique_id),
ldap.SCOPE_SUBTREE,
search_filter)
for r in rs:
_dn, attrs = r
sysid = SystemId(person_unique_id,
attrs['uniqueIdentifier'][0].decode('utf-8'),
attrs['mozilliansServiceURI'][0].decode('utf-8'),
service_id=attrs['mozilliansServiceID'][0]\
.decode('utf-8'))
services[attrs['mozilliansServiceURI'][0]] = sysid
return services
def _profile_photo_attrs(self, unique_id, use_master=False):
"""Returns dict that contains the jpegPhoto key or None."""
if use_master:
conn = self._ensure_conn(WRITE)
else:
conn = self._ensure_conn(READ)
search_filter = filter_format("(uniqueIdentifier=%s)", (unique_id,))
rs = conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter, ['jpegPhoto'])
for r in rs:
_dn, attrs = r
if 'jpegPhoto' in attrs:
return attrs
return {}
def update_person(self, unique_id, form):
"""
Updates a person's LDAP directory record
based on phonebook.forms.ProfileForm.
Method always uses master.
"""
conn = self._ensure_conn(WRITE)
dn = Person.dn(unique_id)
person = self.get_by_unique_id(unique_id)
form['unique_id'] = person.unique_id
if 'email' not in form:
form['email'] = person.username
newp = Person.form_to_profile_attrs(form)
modlist = modifyModlist(person.ldap_attrs(), newp,
ignore_oldexistent=1)
if modlist:
conn.modify_s(dn, modlist)
services = self.profile_service_ids(unique_id)
oldservs = dict((k, v.ldap_attrs()) for k, v in services.iteritems())
newservs = SystemId.form_to_service_ids_attrs(form)
for service_uri in KNOWN_SERVICE_URIS:
newserv = newservs[service_uri]
if service_uri in oldservs:
oldserv = oldservs[service_uri]
newserv['uniqueIdentifier'][0] = oldserv['uniqueIdentifier'][0]
sys_id_dn = SystemId.dn(unique_id,
oldserv['uniqueIdentifier'][0])
if newserv['mozilliansServiceID'][0]:
modlist = modifyModlist(oldserv, newserv)
if modlist:
conn.modify_s(sys_id_dn, modlist)
else:
conn.delete_s(sys_id_dn)
else:
sys_id_dn = SystemId.dn(unique_id,
newserv['uniqueIdentifier'][0])
if newserv['mozilliansServiceID'][0]:
modlist = addModlist(newserv)
if modlist:
conn.add_s(sys_id_dn, modlist)
return True
def update_profile_photo(self, unique_id, form):
"""
Adds or Updates a person's profile photo.
unique_id
form - An instance of phonebook.forms.ProfileForm
Safe to call if no photo has been uploaded by the user.
Method always uses master.
"""
if 'photo' in form and form['photo']:
photo = form['photo'].file.read()
elif form.get('photo_delete'):
photo = None
else:
return False
conn = self._ensure_conn(WRITE)
dn = Person.dn(unique_id)
attrs = self._profile_photo_attrs(unique_id)
if photo:
new_attrs = dict(jpegPhoto=photo)
elif attrs.get('jpegPhoto'):
new_attrs = dict(**attrs)
del new_attrs['jpegPhoto']
else: # If no photo exists for this user, we don't bother trying to
# delete it.
return False
# Person record will always exist, so we only do a mod
modlist = modifyModlist(attrs, new_attrs,
ignore_oldexistent=bool(photo))
if modlist:
conn.modify_s(dn, modlist)
def _people_search(self, search_filter, use_master=False):
"""
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
if use_master:
conn = self._ensure_conn(WRITE)
else:
conn = self._ensure_conn(READ)
return conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter, Person.search_attrs)
def _irc_search(self, search_filter, use_master=False):
"""
Searches for SystemIDs based on IRC nickname.
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
if use_master:
conn = self._ensure_conn(WRITE)
else:
conn = self._ensure_conn(READ)
return conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter, SystemId.search_attrs)
def _people_from_irc_results_search(self, irc_results, use_master=False):
"""
Searches for SystemIDs based on IRC nickname.
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
if use_master:
conn = self._ensure_conn(WRITE)
else:
conn = self._ensure_conn(READ)
uniq_ids = []
for result in irc_results:
dn, attrs = result
parts = ldap.dn.explode_dn(dn)
# ['uniqueIdentifier=13173391.34', 'uniqueIdentifier=7f3a67u000',
# 'ou=people', 'dc=mozillians', 'dc=org']
if len(parts) > 1:
subparts = parts[1].split('=')
# ['uniqueIdentifier', '7f3a67u000001']
if len(subparts) == 2:
# 7f3a67u000001
uniq_ids.append(subparts[1])
if not uniq_ids:
return []
# "(uniqueIdentifier=7f3a67u00001)(uniqueIdentifier=7f3a67u00002)"
frags = ["(uniqueIdentifier=%s)" % x for x in uniq_ids]
dn_filter_frag = ''.join(frags)
base_filter = '(&(objectClass=mozilliansPerson)(|%s))'
search_filter = base_filter % dn_filter_frag
return conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter, Person.search_attrs)
def _populate_people_results(self, results):
"""
Given LDAP search results, method sorts and ensures
unique set of results.
"""
people = []
cache = {}
for result in results:
dn, attrs = result
if 'sn' not in attrs:
continue
p = Person.new_from_directory(attrs)
if not p or p.unique_id in cache:
continue
else:
cache[p.unique_id] = True
people.append(p)
return people
def __str__(self):
return "<larper.UserSession for %s>" % self.request.user.username
@staticmethod
def connect(request):
"""
Open (or reuse) a connection to the phonebook directory.
Request must contain an authenticated user.
Data requests will be authorized based on the current
users rights.
Connection pooling, master/slave routing, and other low
level details will automagically work.
"""
return UserSession(request)
@staticmethod
def disconnect(request):
"""
Releases all connections to the LDAP directory, including:
* UserSession instances
* AdminSession instances
* RegistrarSession instances
"""
if hasattr(request, CONNECTIONS_KEY):
# Each mode (read/write)
conns = request.larper_conns
for i in range(len(conns)):
for dn in conns[i].keys():
conns[i][dn].unbind()
del request.larper_conns[i][dn]
class Person(object):
"""
A person has a couple required attributes and then lots of optional
profile details. Data is populated based on what the current request's
user should see. If a property is None, it may be because
* the profile's property doesn't have any data or
* the viewer doesn't have permission to see this property
Required Properties
-------------------
* unique_id - A stable id that is randomly generated
* username - Email address used for authentication
* full_name - A person's full name
* last_name - A person's last name
Optional Properties
-------------------
* first_name - A person's first name
* biography - A person's bio
* voucher_unique_id - The unique_id of the Mozillian who vouched for them.
Photo
-----
Photo access is done seperatly to improve data access performance.
For a user's photo, see larper.UserSession.profile_photo and
update_profile_photo.
"""
required_attrs = ['uniqueIdentifier', 'uid', 'cn', 'sn']
optional_attrs = ['givenName', 'description', 'mail', 'telephoneNumber',
'mozilliansVouchedBy']
search_attrs = required_attrs + optional_attrs
binary_attrs = ['jpegPhoto']
def __init__(self, unique_id, username,
first_name=None, last_name=None,
full_name=None,
biography=None,
voucher_unique_id=None):
self.unique_id = unique_id
self.username = username
self.first_name = first_name
self.last_name = last_name
self.full_name = full_name
self.display_name = '%s %s' % (first_name, last_name)
self.biography = biography
self.voucher_unique_id = voucher_unique_id
def __str__(self):
return u'%s %s' % (self.first_name, self.last_name)
@staticmethod
def new_from_directory(ldap_attrs):
"""
Given a dictionary of LDAP search result attributes, this
method returns a larper.Person instance.
"""
# givenName is optional in LDAP, but required by our API
given_name = ldap_attrs.get('givenName', [''])
p = Person(ldap_attrs['uniqueIdentifier'][0].decode('utf-8'),
ldap_attrs['uid'][0].decode('utf-8'),
given_name[0].decode('utf-8'),
ldap_attrs['sn'][0].decode('utf-8'),
ldap_attrs['cn'][0].decode('utf-8'))
if 'description' in ldap_attrs:
p.biography = ldap_attrs['description'][0].decode('utf-8')
if 'mozilliansVouchedBy' in ldap_attrs:
voucher = ldap_attrs['mozilliansVouchedBy'][0].decode('utf-8')
p.voucher_unique_id = Person.unique_id(voucher)
return p
def get_profile(self):
"""Retrieve the Django UserProfile for this Person.
This is full of hacks because all the Mozillians servers are throwing
ObjectDoesNotExist errors (even in production) if we try a straight-up
`User.objects.get(email=self.username)`. This method now exhaustively
tries to get a User object from the database. If it doesn't find one,
or finds one without a UserProfile, we make one on the spot, trying
our best to fill things in sanely. FML.
See: https://bugzilla.mozilla.org/show_bug.cgi?id=698699
TODO: Remove this as soon as possible. It's insane.
"""
user = (User.objects.filter(Q(email=self.username) |
Q(username=self.username)))[:1]
if user:
# Yes, sometimes the User exists but the UserProfile doesn't.
# See: https://bugzilla.mozilla.org/show_bug.cgi?id=699234
try:
profile = user[0].get_profile()
except ObjectDoesNotExist, e:
statsd.incr('user.errors.profile_doesnotexist')
log.warning(e)
profile = UserProfile.objects.create(user=user[0])
else:
statsd.incr('user.errors.doesnotexist')
log.warning('No user with email %s' % self.username)
user = User(username=self.username, email=self.username)
user.set_unusable_password()
user.save()
profile = user.get_profile()
return profile
def ldap_attrs(self):
"""
Converts this person object into a dict compatible
with the low level ldap libraries.
"""
objectclass = ['inetOrgPerson', 'person', 'mozilliansPerson']
full_name = u'%s %s' % (self.first_name, self.last_name)
full_name = full_name
attrs = dict(objectclass=objectclass,
uniqueIdentifier=[self.unique_id],
uid=[self.username],
sn=[self.last_name],
cn=[full_name],
displayName=[full_name],
mail=[self.username])
# Optional
if self.first_name:
attrs['givenName'] = [self.first_name]
if self.biography:
attrs['description'] = [self.biography]
# TODO - deal with this somewhere else?
if self.voucher_unique_id:
attrs['mozilliansVouchedBy'] = [Person.dn(self.voucher_unique_id)]
return attrs
@staticmethod
def form_to_profile_attrs(form):
"""
Creates a profile dict compatible with the low level ldap libraries
from a form dictionary.
Form must contain the following keys:
* unique_id
* username
* first_name
* last_name
"""
objectclass = ['inetOrgPerson', 'person', 'mozilliansPerson']
full_name = u'%s %s' % (form['first_name'], form['last_name'])
full_name = full_name.encode('utf-8')
attrs = dict(objectclass=objectclass,
uniqueIdentifier=[form['unique_id'].encode('utf-8')],
uid=[form['email'].encode('utf-8')],
sn=[form['last_name'].encode('utf-8')],
cn=[full_name],
displayName=[full_name],
mail=[form['email'].encode('utf-8')])
if 'password' in form:
attrs['userPassword'] = [form['password'].encode('utf-8')]
if 'first_name' in form and form['first_name']:
attrs['givenName'] = [form['first_name'].encode('utf-8')]
else:
attrs['givenName'] = [None]
if 'biography' in form and form['biography']:
attrs['description'] = [form['biography'].encode('utf-8')]
else:
attrs['description'] = [None]
return attrs
@staticmethod
def unique_id(dn):
dn_parts = explode_dn(dn)
reg = re.compile('uniqueIdentifier=(.*)', re.IGNORECASE)
for part in dn_parts:
matcher = reg.match(part)
if matcher:
return matcher.groups()[0]
raise INVALID_PERSON_DN(dn)
@staticmethod
def dn(unique_id):
params = (escape_dn_chars(unique_id), settings.LDAP_USERS_GROUP)
return 'uniqueIdentifier=%s,%s' % params
class SystemId(object):
"""
Represents a connection between a person and
a remote system.
Required Properties
-------------------
* person_unique_id - Person who owns this system id
* unique_id - internal stable id for this service id
* service_uri - A URI which commonly identifies a remote system
* service_id - username, email, or whatever is used in the
remote system as an ID.
KISS: Although many URIs could signify a remote system, we should not
have several URIs for a service which would only have one auth
credentials. Example: G+, Google docs, and Gmail would only have one
URI - http://google.com. Youtube (a Google property) would have
it's own URI, since it has seperate username.
"""
search_attrs = ['uniqueIdentifier', 'mozilliansServiceURI',
'mozilliansServiceID']
def __init__(self, person_unique_id, unique_id, service_uri, service_id):
self.person_unique_id = person_unique_id
self.unique_id = unique_id
self.service_uri = service_uri
self.service_id = service_id
def ldap_attrs(self):
"""
Converts this SystemId object into a dict compatible
with the low level ldap libraries.
"""
attrs = dict(objectclass=['mozilliansLink'],
uniqueIdentifier=[self.unique_id],
mozilliansServiceURI=[self.service_uri],
mozilliansServiceID=[self.service_id])
return attrs
@staticmethod
def form_to_service_ids_attrs(form):
"""
Creates a list of dicts. Each dict of remote system ids
is compatible with the low level ldap libraries from
a form dictionary.
See phonebook.forms.ProfileForm for full list of fields.
"""
known_service_fields = [
('irc_nickname', MOZILLA_IRC_SERVICE_URI),
]
attrs_list = {}
for field, uri in known_service_fields:
system_id = form[field].encode('utf-8')
system_unique_id = form['%s_unique_id' % field].encode('utf-8')
if not system_unique_id:
system_unique_id = str(time())
if not system_id:
system_id = None
attrs = dict(objectclass=['mozilliansLink'],
uniqueIdentifier=[system_unique_id],
mozilliansServiceURI=[MOZILLA_IRC_SERVICE_URI],
mozilliansServiceID=[system_id])
attrs_list[uri] = attrs
return attrs_list
@staticmethod
def dn(person_unique_id, unique_id):
"""
Formats an LDAP distinguished name for a remote system id
"""
params = (escape_dn_chars(unique_id), Person.dn(person_unique_id))
return 'uniqueIdentifier=%s,%s' % params
class INVALID_PERSON_DN(Exception):
"""A function which expected a valid DN was
given an invalid DN. Probably didn't contain a
uniqueIdentifier component."""
pass
# Increase length of random uniqueIdentifiers as size of Mozillians
# community enters the low millions ;)
UUID_SIZE = 5
class RegistrarSession(UserSession):
"""
A directory session for the registrar user.
"""
def __init__(self, request):
UserSession.__init__(self, request)
def dn_pass(self):
"""Returns registrar dn and password."""
return (settings.LDAP_REGISTRAR_DN, settings.LDAP_REGISTRAR_PASSWORD)
def create_person(self, form):
"""
Creates a new user account in the LDAP directory.
form - An instance of phonebook.forms.RegistrationForm
returns a string which is the unique_id of the new user.
Method always uses master.
"""
conn = self._ensure_conn(WRITE)
unique_id = os.urandom(UUID_SIZE).encode('hex')
form['unique_id'] = unique_id
new_dn = Person.dn(unique_id)
attrs = Person.form_to_profile_attrs(form)
mods = addModlist(attrs)
conn.add_s(new_dn, mods)
return unique_id
@staticmethod
def connect(request):
"""
Open (or reuse) a connection to the phonebook directory.
Data requests will be authorized based on the shared
system's registrar account.
Connection pooling, master/slave routing, and other low
level details will automagically work.
"""
return RegistrarSession(request)
class AdminSession(UserSession):
"""
A directory session for the admin user.
"""
def __init__(self, request):
UserSession.__init__(self, request)
def dn_pass(self):
"""Returns administrator dn and password."""
return (settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
def delete_person(self, unique_id):
"""
Completely removes a user's data from the LDAP directory.
Note: Does not un-vouch any Mozillians for whom this user
has vouched.
Method always uses master.
"""
conn = self._ensure_conn(WRITE)
person_dn = Person.dn(unique_id)
# Kill SystemId or other children
rs = conn.search_s(Person.dn(unique_id),
ldap.SCOPE_ONELEVEL,
'(objectclass=*)')
for sub_dn, attrs in rs:
conn.delete_s(sub_dn)
conn.delete_s(person_dn)
return self
@staticmethod
def connect(request):
"""
Open (or reuse) a connection to the phonebook directory.
Data requests will be authorized based on the shared
system's admin account.
Connection pooling, master/slave routing, and other low
level details will automagically work.
"""
return AdminSession(request)
def change_password(unique_id, oldpass, password):
"""Changes a user's password."""
dn = Person.dn(unique_id)
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
try:
conn.bind_s(dn, oldpass)
conn.passwd_s(dn, None, password)
log.debug("Changed %s password" % dn)
return True
except Exception, e:
log.error("Password change failed %s", e)
return False
finally:
conn.unbind()
def set_password(username, password):
"""
Resets a user's LDAP password.
.. warning:
*Careful!* This function has the capability to change
anyone's password. It should only be used for
un-authenticated users from the reset-password email
flow.
*If the user is authenticated*, then
*use the change_password method above*.
"""
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
try:
conn.bind_s(settings.LDAP_ADMIN_DN,
settings.LDAP_ADMIN_PASSWORD)
search_filter = filter_format("(uid=%s)", (username,))
rs = conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter)
for dn, attrs in rs:
conn.passwd_s(dn, None, password)
log.info("Resetting %s password" % dn)
finally:
conn.unbind()
def _return_all():
"""Return all LDAP records, provided no LIMITs are set."""
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
encoded_q = '@'.encode('utf-8')
search_filter = filter_format('(|(mail=*%s*)(uid=*%s*))',
(encoded_q, encoded_q,))
rs = conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter)
return rs
def get_user_by_email(email):
"""Given an email address, return an ldap record."""
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
encoded_q = email.encode('utf-8')
search_filter = filter_format('(|(mail=*%s*)(uid=*%s*))',
(encoded_q, encoded_q,))
rs = conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter)
if rs:
return rs[0]
def get_user_by_uid(uid):
"""Given a uniqueIdentifier, return an ldap record."""
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
search_filter = filter_format('(uniqueIdentifier=%s)', (uid,))
rs = conn.search_s(settings.LDAP_USERS_GROUP, ldap.SCOPE_SUBTREE,
search_filter, Person.search_attrs)
if rs:
return rs[0]
def record_vouch(voucher, vouchee):
"""Updates a *Pending* account to *Mozillian* status.
voucher - The unique_id of the Mozillian who will vouch
vouchee - The unique_id of the Pending user who is being vouched for
"""
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
voucher_dn = Person.dn(voucher)
vouchee_dn = Person.dn(vouchee)
modlist = [(ldap.MOD_ADD, 'mozilliansVouchedBy', [voucher_dn])]
conn.modify_s(vouchee_dn, modlist)
return True
def get_service_data(uid):
"""
Returns a dict that contains remote system ids.
Keys for dict include:
* MOZILLA_IRC_SERVICE_URI
Values are a SystemId object for that service.
use_master can be set to True to force reading from master
where stale data isn't acceptable.
"""
services = {}
conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)
conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)
search_filter = '(mozilliansServiceURI=*)'
try:
rs = conn.search_s(Person.dn(uid), ldap.SCOPE_SUBTREE, search_filter)
for r in rs:
_dn, attrs = r
sysid = SystemId(uid,
attrs['uniqueIdentifier'][0].decode('utf-8'),
attrs['mozilliansServiceURI'][0].decode('utf-8'),
service_id=attrs['mozilliansServiceID'][0]\
.decode('utf-8'))
services[attrs['mozilliansServiceURI'][0]] = sysid
except ldap.NO_SUCH_OBJECT:
pass
return services
|
"""
@Author : Laura
@File : hr_selenium.py
@Time : 2020/3/16 15:43
"""
# encoding: utf-8 #指定编码格式
from selenium import webdriver #selenium 模块中导入指定部分webdriver(类)
import time #导入时间模块
import unittest
chrome_driver=r"E:\Program Files\python 3.8.2\Lib\site-packages\selenium\chromedriver.exe"
browser = webdriver.Chrome(executable_path=chrome_driver) # 实例化浏览器,executable_path指定了chromedriver所在的路径,程序运行时就会到该路径下启动chrome浏览器
url_1 = "http://dev.emhes.cn/"
browser.get(url_1) #获取url-1
time.sleep(2) #等待2s
# print("浏览器最大化")
# browser.maximize_window() #将浏览器最大化显示time.sleep(2)
#
# print("设置浏览器宽480、高800显示")
# browser.set_window_size(480, 800) #参数数字为像素点
# browser.maximize_window()
#
# print("浏览器后退/前进")
# url_2 = "https://www.runoob.com/python3/python3-module.html"
# browser.get(url_2) #获取url-2
# browser.back() #后退到url_1
# time.sleep(1)
# # browser.forward() #前进到url_2
#
# browser.find_element_by_id("kw").send_keys("selenium") #id定位输入框,并输入值“selenium”
# browser.find_element_by_id("su").click() #id定位搜索按钮,并点击
# time.sleep(3)
# browser.quit() #关闭整个窗口 broswer.close() 关闭当前窗口
# browser.quit() #关闭整个窗口 broswer.close() 关闭当前窗口
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
"""
Twitter's wrapper around the optparse module for doing more stateless builder-style
options parsing.
Typical usage:
from twitter.common import options
base_parser = options.parser()
my_opts = [
options.Option(...),
options.Option(...)
]
my_foo_opts = [
options.Option(...),
options.Option(...)
]
group = base_parser.new_group('foo')
group.add_option(*my_foo_opts)
parser = base_parser
.options(my_opts)
.groups([group])
.interspersed_arguments(True)
.usage("blah blah blah"))
values, rargs = parser.parse()
"""
__author__ = 'Brian Wickman'
import copy
import sys
import inspect
import types
from optparse import (
OptionParser,
OptionValueError,
Option,
OptionGroup,
Values,
NO_DEFAULT
)
from .twitter_option import TwitterOption
def parser():
return TwitterOptionParser()
def new_group(name):
return TwitterOptionGroup(name)
group = new_group
__all__ = [
'parser',
'new_group',
'group', # alias for new_group
'Option',
'TwitterOption',
'Values'
]
class TwitterOptionGroup(object):
def __init__(self, name):
self._name = name
self._option_list = []
def add_option(self, *option):
self._option_list.extend(option)
def prepend_option(self, *option):
self._option_list = list(option) + self._option_list
def options(self):
return self._option_list
def name(self):
return self._name
@staticmethod
def format_help(group, header=None):
pass
class TwitterOptionParser(object):
"""
Wrapper for builder-style stateless options parsing.
"""
class InvalidParameters(Exception): pass
class InvalidArgument(Exception): pass
ATTRS = [ '_interspersed_arguments', '_usage', '_options', '_groups', '_values' ]
def __init__(self):
self._interspersed_arguments = False
self._usage = ""
self._options = []
self._groups = []
self._values = Values()
def interspersed_arguments(self, i_a=None):
""" Enable/disable interspersed arguments. """
if i_a is None:
return self._interspersed_arguments
me = self._copy()
me._interspersed_arguments = i_a
return me
def usage(self, new_usage=None):
""" Get/set usage. """
if new_usage is None:
return self._usage
me = self._copy()
me._usage = new_usage
return me
def options(self, merge_options=None):
""" Get/add options. """
if merge_options is None:
return self._options
me = self._copy()
me._options.extend(merge_options)
return me
def groups(self, merge_groups=None):
""" Get/add groups. """
if merge_groups is None:
return self._groups
me = self._copy()
me._groups.extend(merge_groups)
return me
def values(self, merge_values=None):
""" Get/update default/parsed values. """
if merge_values is None:
return self._values
me = self._copy()
TwitterOptionParser._merge_values(me._values, merge_values)
return me
@staticmethod
def _merge_values(values1, values2):
for attr in values2.__dict__:
if getattr(values2, attr) != NO_DEFAULT:
setattr(values1, attr, getattr(values2, attr))
def _copy(self):
c = TwitterOptionParser()
for attr in TwitterOptionParser.ATTRS:
setattr(c, attr, copy.deepcopy(getattr(self, attr)))
return c
def _init_parser(self):
parser = OptionParser(add_help_option=False, usage=self.usage())
parser.allow_interspersed_args = self.interspersed_arguments()
for op in self.options():
parser.add_option(copy.deepcopy(op))
for gr in self.groups():
real_group = parser.add_option_group(gr.name())
for op in gr.options():
real_group.add_option(copy.deepcopy(op))
return parser
# There is enough special-casing that we're doing to muck with the optparse
# module that it might be worthwhile in writing our own, sigh.
def parse(self, argv=None):
""" Parse argv. If argv=None, use sys.argv[1:]. """
parser = self._init_parser()
inherit_values = copy.deepcopy(self.values())
if isinstance(inherit_values, dict):
inherit_values = Values(inherit_values)
if argv is None:
argv = sys.argv[1:]
values, leftover = parser.parse_args(args=argv)
for attr in copy.copy(values.__dict__):
if getattr(values, attr) is None:
delattr(values, attr)
TwitterOptionParser._merge_values(inherit_values, values)
return inherit_values, leftover
def print_help(self):
parser = self._init_parser()
parser.print_help()
def error(self, message):
parser = self._init_parser()
parser.error(message)
def __enter__(self):
return self
def __exit__(self, *args):
return False
|
import pandas
import numpy
import random
import math
from statsmodels import api as sm
class Tree(object):
def __init__(self):
self.left = None
self.right = None
self.data = None
self.feature = None
self.constraint = None
self.featuresLeftToTry = None
self.gini = None
def createTestForLogit():
# should be no errors here, perfect regression set up
inSample = pandas.DataFrame({'G' : pandas.Series(range(100)) , 'A' : pandas.Series([0]*50+[2]*50), 'NUMFM' : pandas.Series(numpy.random.randint(0, 10, 100)), 'Survived' : pandas.Series([0]*50+[1]*50)})
outOfSample = pandas.DataFrame({'G' : pandas.Series(range(25,75)) , 'A' : pandas.Series([0]*25+[2]*25), 'NUMFM' : pandas.Series(numpy.random.randint(0, 10, 50)), 'Survived' : pandas.Series([0]*25+[1]*25)})
return inSample, outOfSample
def createTestForGini():
# in this example, A should be split off first, then NUMFM, and we should be 100% correct for these four out of sample observations
hundredZeroes = [0]*100
fourZeroes = [0]*4
inSample = pandas.DataFrame({'G' : pandas.Series(hundredZeroes) , 'A' : pandas.Series([0]*50+[1]*50), 'NUMFM' : pandas.Series([0]*41+[1]*9+[0]*41+[1]*9), 'Survived' : pandas.Series([0]*40+[1]*10+[1]*40+[0]*10)})
outOfSample = pandas.DataFrame({'G' : pandas.Series(fourZeroes) , 'A' : pandas.Series([0,0,1,1]), 'NUMFM' : pandas.Series([0,1,0,1]), 'Survived' : pandas.Series([0,1,1,0])})
return inSample, outOfSample
def createTestDataForNearestNeighbors():
# nearest neighbors should be 100% correct for these three out of sample observations. k must be three for this to work.
sixZeroes = [0,0,0,0,0,0]
threeZeroes = [0,0,0]
inSample = pandas.DataFrame({'G' : pandas.Series(sixZeroes) , 'A' : pandas.Series([0,0,1,1,.5,.5]), 'NUMFM' : pandas.Series([0,1,0,1,.5,.5]), 'Survived' : pandas.Series([0,0,1,1,0,1])})
outOfSample = pandas.DataFrame({'G' : pandas.Series(threeZeroes) , 'A' : pandas.Series([0.25,0.75,0.25]), 'NUMFM' : pandas.Series([0.25,0.25,0.75]), 'Survived' : pandas.Series([0,1,0])})
return inSample, outOfSample
# This function creates random data to analyze and model
def createData():
numSamples = 20
gData = numpy.random.randint(0, 2, numSamples)
aData = numpy.random.randint(0, 100, numSamples)
numfmData = numpy.random.randint(0, 6, numSamples)
SurvivedData = numpy.random.randint(0, 2, numSamples)
preFrameData = {'G' : pandas.Series(gData) , 'A' : pandas.Series(aData), 'NUMFM' : pandas.Series(numfmData), 'Survived' : pandas.Series(SurvivedData)}
framedData = pandas.DataFrame(preFrameData)
return framedData
def createStaticData():
gData = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1,0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
aData = [66, 83, 46, 21, 4, 66, 36, 49, 78, 59, 47, 56, 28, 6, 42, 69, 76,85, 58, 7, 76, 46, 65, 16, 79, 79, 7, 96, 39, 60, 73, 25, 53, 52,32, 35, 18, 53, 22, 54, 4, 46, 89, 41, 39, 80, 78, 28, 85, 86, 13,67, 20, 3, 4, 86, 95, 25, 87, 28]
numfmData = 3, 5, 0, 1, 5, 5, 4, 5, 4, 1, 4, 3, 4, 4, 2, 2, 1, 1, 1, 3, 4, 1, 2,4, 1, 0, 0, 1, 2, 5, 3, 1, 5, 1, 2, 2, 4, 1, 2, 2, 4, 1, 5, 0, 0, 1,5, 3, 2, 1, 5, 0, 3, 1, 5, 2, 5, 0, 5, 5
SurvivedData = [1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1,1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1,1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0]
preFrameData = {'G' : pandas.Series(gData) , 'A' : pandas.Series(aData), 'NUMFM' : pandas.Series(numfmData), 'Survived' : pandas.Series(SurvivedData)}
inSampleData = pandas.DataFrame(preFrameData)
gDataOOS = [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1,
1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0,
0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,
1, 1, 0, 0, 1, 1, 1, 0]
aDataOOS = [31, 3, 81, 87, 64, 51, 45, 5, 98, 25, 8, 11, 97, 10, 20, 56, 40,
18, 46, 35, 32, 53, 99, 68, 97, 37, 82, 6, 63, 76, 29, 11, 95, 59,
57, 89, 56, 30, 10, 85, 72, 45, 81, 15, 62, 91, 71, 51, 56, 52, 70,
17, 58, 29, 11, 83, 22, 42, 78, 4, 18, 28, 62, 64, 30, 22, 36, 31,
56, 20, 69, 42, 5, 90, 94, 49, 85, 48, 68, 69, 44, 42, 76, 15, 61,
96, 20, 43, 11, 92, 0, 71, 81, 99, 3, 81, 31, 93, 41, 79]
numfmDataOOS = [4, 2, 4, 3, 2, 0, 3, 2, 2, 2, 2, 5, 2, 4, 2, 0, 5, 3, 4, 1, 0, 0, 1,3, 1, 1, 3, 5, 3, 1, 5, 4, 0, 4, 2, 0, 0, 3, 1, 4, 3, 1, 4, 4, 0, 2,1, 2, 1, 1, 2, 4, 5, 3, 2, 4, 1, 5, 1, 4, 4, 4, 2, 0, 1, 3, 1, 1, 5,5, 0, 5, 0, 2, 2, 5, 1, 4, 5, 4, 2, 1, 4, 0, 0, 2, 4, 1, 2, 3, 4, 0,3, 2, 1, 5, 5, 0, 2, 2]
SurvivedDataOOS = [1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1,
0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1,
0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1,
1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1,
1, 1, 1, 0, 0, 1, 0, 1]
outOfSampleData = pandas.DataFrame({'G' : pandas.Series(gDataOOS) , 'A' : pandas.Series(aDataOOS), 'NUMFM' : pandas.Series(numfmDataOOS), 'Survived' : pandas.Series(SurvivedDataOOS)})
return inSampleData, outOfSampleData
# This function returns an in sample data set and an out of sample data set from an original, whole data set
# The parameter percentInSample tells us how much of the original data set to pour into the new in sample data set
def getInAndOutSample(data, percentInSample):
inSampleLength = int(math.floor(percentInSample*len(data)))
inSampleIndices = random.sample(xrange(len(data)), inSampleLength)
inSampleData = data.ix[inSampleIndices]
outOfSampleData = data.drop(inSampleIndices)
return inSampleData, outOfSampleData
# This function calculates the euclidean distance between two observations
def getEuclideanDistance(features, currentOutOfSampleDataPoint, currentInSampleDataPoint):
sumOfSquares = 0
for feature in features:
sumOfSquares += (currentOutOfSampleDataPoint[feature] - currentInSampleDataPoint[feature]) ** 2
return math.sqrt(sumOfSquares)
# This function runs a nearest neighbors model and returns an error rate. We loop through all of the out of sample observations,
# and for each one, calculate the euclidean distance to each in sample observation. We then take the k closest observations and average their
# target variable to reach out prediction.
def runNearestNeighbors(inSampleData, outOfSampleData, k, alterOutOfSampleData=False):
numIncorrectPredictions = 0.0
features = inSampleData.columns.drop('Survived')
for outOfSampleIndex in outOfSampleData.index: # loop through out of sample observations
euclideanDistancesAndData = []
currentOutOfSampleDataPoint = outOfSampleData.ix[outOfSampleIndex]
for inSampleIndex in inSampleData.index: # loop through in sample observations
currentInSampleDataPoint = inSampleData.ix[inSampleIndex]
euclideanDistance = getEuclideanDistance(features, currentOutOfSampleDataPoint, currentInSampleDataPoint) # calculate the distance between the current in and out of sample observations
euclideanDistancesAndData.append({'EuclideanDistance' : euclideanDistance , 'InSampleDataPoint' : currentInSampleDataPoint}) # append the above distance calculation
euclideanDistancesAndData = sorted(euclideanDistancesAndData, key=lambda k : k['EuclideanDistance']) # sort the dictionary of in sample observations and their distances to the current out of sample observation
targetVars = [ distanceAndDataPoint['InSampleDataPoint']['Survived'] for distanceAndDataPoint in euclideanDistancesAndData[:k]] # find the closest k in sample observations
averageTarget = numpy.mean(targetVars)
# If the average target is greater than 0.5, then we predict a value of 1 to be most likely, which corresponds to alive. Otherwise we predict a value of 0.
prediction = 1 if averageTarget > .5 else 0
if prediction != currentOutOfSampleDataPoint['Survived']: # check if our prediction is correct and increment numIncorrectPredictions if it is not
numIncorrectPredictions += 1
return numIncorrectPredictions / len(outOfSampleData)
def runNearestNeighborsKernelSmoothing(inSampleData, outOfSampleData, alterOutOfSampleData=False):
# TO DO - still need to write this function
return 1
def runLogisticRegression(inSampleData, outOfSampleData, alterOutOfSampleData=False):
features = inSampleData.columns.drop('Survived')
logit = sm.Logit(inSampleData['Survived'], inSampleData[features])
result = logit.fit()
outOfSamplePredictions = result.predict(outOfSampleData[features])
transformedOutOfSamplePredictions = [ 1 if prediction > 0.5 else 0 for prediction in outOfSamplePredictions]
outOfSampleData = outOfSampleData.reset_index()
incorrectPredictionTracker = [ 1 if transformedOutOfSamplePredictions[i] != outOfSampleData['Survived'][i] else 0 for i in range(len(transformedOutOfSamplePredictions))]
return sum(incorrectPredictionTracker)/float(len(incorrectPredictionTracker))
# This function calculations the gini impurity for the current node
def getNodeGini(decisionTree):
percentAlive = sum(decisionTree.data['Survived']) / float(len(decisionTree.data['Survived']))
percentDead = 1 - percentAlive
return 1 - (percentAlive ** 2) - (percentDead ** 2)
# This function calculates the gini impurity for the next hypothetical level of the decision tree for a given feature and constraint.
# If we find the new gini to be low enough as compared to the gini impurity of the current node, then we may be able to reassign the lowestGini variable.
def getNextStepGini(lowestGini, decisionTree, feature, giniImprovementRequirement, contraint):
leftData = decisionTree.data[decisionTree.data[feature] < contraint]
rightData = decisionTree.data[decisionTree.data[feature] > contraint]
if len(leftData)!= 0 and len(rightData) != 0: # here we check that both branches would have at least one observation
leftRatioAlive = sum(leftData['Survived']) / float(len(leftData['Survived'])) # the percent of alive passengers in the left branch
rightRatioAlive = sum(rightData['Survived']) / float(len(rightData['Survived'])) # the percent of alive passengers in the right branch
ratioOfSamplesToLeft = len(leftData) / float(len(decisionTree.data)) # the percent of observations in the left branch
ratioOfSampleToRight = 1 - ratioOfSamplesToLeft
leftGini = 1 - (leftRatioAlive ** 2) - ((1-leftRatioAlive) ** 2)
rightGini = 1 - (rightRatioAlive ** 2) - ((1-rightRatioAlive) ** 2)
averageGini = leftGini * ratioOfSamplesToLeft + rightGini * ratioOfSampleToRight # this is the calculation of the gini impurity for the next hypothetical level
if decisionTree.gini - averageGini > giniImprovementRequirement and averageGini < lowestGini['Gini']:
lowestGini = { 'Gini' : averageGini, 'LeftGini': leftGini, 'RightGini' : rightGini, 'Feature' : feature, 'Constraint' : contraint}
return lowestGini
# This function tries to find a lower gini impurity by cycling through a few different constraint values for the given feature
def tryNewFeature(lowestGini, decisionTree, feature, giniImprovementRequirement):
numSteps = 10 # the number of constraints to cycle through
minValue = decisionTree.data[feature].min()
maxValue = decisionTree.data[feature].max()
constraintsToTry = [minValue + (1.0 / numSteps)*i*(maxValue - minValue) for i in range(numSteps)] # calculate constraints to cycle through
for contraint in constraintsToTry:
lowestGini = getNextStepGini(lowestGini, decisionTree, feature, giniImprovementRequirement, contraint)
return lowestGini
# This recursive function builds out our decision tree from the in sample data implanted in a tree root. This function just alters the variable decisionTree, it does not
# return anything.
def buildOutDecisionTree(decisionTree, giniImprovementRequirement):
thisNodesGiniImpurity = getNodeGini(decisionTree) # Here we calculate the gini impurity of the current node
decisionTree.gini = thisNodesGiniImpurity
lowestGini = { 'Gini' : float('inf'), 'LeftGini' : None, 'RightGini' : None, 'Feature' : None, 'Constraint' : None} # This dictionary tracks the best gini impurity we have found so far
for feature in decisionTree.featuresLeftToTry: # filter through all of the features we have not yet used and search each one for a way to beat the current lowest gini impurity
lowestGini = tryNewFeature(lowestGini, decisionTree, feature, giniImprovementRequirement)
if lowestGini['Gini'] < float('inf'): # if we found at least one gini impurity smaller enough than the gini impurity of the current node, then create left and right branches
decisionTree.feature = lowestGini['Feature']
decisionTree.constraint = lowestGini['Constraint']
decisionTree.left = Tree()
decisionTree.right = Tree()
decisionTree.left.featuresLeftToTry = decisionTree.right.featuresLeftToTry = decisionTree.featuresLeftToTry.drop(lowestGini['Feature'])
decisionTree.left.data = decisionTree.data[decisionTree.data[lowestGini['Feature']] < lowestGini['Constraint']]
decisionTree.right.data = decisionTree.data[decisionTree.data[lowestGini['Feature']] > lowestGini['Constraint']]
decisionTree.left.gini = lowestGini['LeftGini']
decisionTree.right.gini = lowestGini['RightGini']
buildOutDecisionTree(decisionTree.left, giniImprovementRequirement) # recursively build out the left and right branches
buildOutDecisionTree(decisionTree.right, giniImprovementRequirement)
# This functions predicts the target variable for a single observation and decision tree and returns if our prediction was correct or not
def predictTargetForObservation(decisionTree, outOfSampleObservation, alterOutOfSampleData=False):
if decisionTree.left == None or decisionTree.right == None: # then we are at a leaf and need to check our prediction now
predictedTarget = round(numpy.mean(decisionTree.data['Survived']))
elif outOfSampleObservation[decisionTree.feature] > decisionTree.constraint: # in this case we need to recurse through the right branch of the current node
return predictTargetForObservation(decisionTree.right, outOfSampleObservation, alterOutOfSampleData)
elif outOfSampleObservation[decisionTree.feature] <= decisionTree.constraint: # in this case we need to recurse through the left branch of the current node
return predictTargetForObservation(decisionTree.left, outOfSampleObservation, alterOutOfSampleData)
else: # We should never get here so I wanted to print an error in case we do
print 'We ran into an unexpected edge case!'
return 1/0
if alterOutOfSampleData:
return predictedTarget
else:
observationIncorrectlyPredicted = int(predictedTarget != outOfSampleObservation['Survived'])
return observationIncorrectlyPredicted
# This function runs the out of sample data through the decision tree and returns our error rate for predicting the target variable
def evaluateOutOfSampleDataThroughDecisionTree(decisionTree, outOfSampleData, alterOutOfSampleData=False):
numIncorrectPredictions = 0
outOfSampleData = outOfSampleData.reset_index()
for rowIndex in range(len(outOfSampleData)): # here we loop through all observations in the out of sample data
observationIncorrectlyPredictedOrTarget = predictTargetForObservation(decisionTree, outOfSampleData.ix[rowIndex], alterOutOfSampleData)
outOfSampleData['Survived'].ix[rowIndex] = observationIncorrectlyPredictedOrTarget
numIncorrectPredictions += observationIncorrectlyPredictedOrTarget # increment numIncorrectPredictions everytime we make another incorrect prediction
if alterOutOfSampleData:
return outOfSampleData
else:
return float(numIncorrectPredictions) / len(outOfSampleData)
# This is the main function to get an error rate for the decision tree learning model. This model uses the gini impurity metric
# and uses the simple stopping criteria of comparing the gini gain to the parameter giniImprovementRequirement and making sure it is larger.
def runDecisionTreeGiniImpurity(inSampleData, outOfSampleData, giniImprovementRequirement, alterOutOfSampleData=False):
decisionTree = Tree() # Here we create the root of our decision tree and start filling in some basic data fields
decisionTree.data = inSampleData
decisionTree.featuresLeftToTry = inSampleData.columns.drop('Survived')
buildOutDecisionTree(decisionTree, giniImprovementRequirement) # Here we start the recursion process and build out our decision tree based on the in sample data
errorRateOrOOSData = evaluateOutOfSampleDataThroughDecisionTree(decisionTree, outOfSampleData, alterOutOfSampleData) # Here we run our out of sample data through the decision tree and calculate the error rate
return errorRateOrOOSData
# This function returns the error rate for a single cross validation run. This function
# feeds into other functions nearly immediately depending on which model is specified by the user.
def getErrorRate(modelType, inSampleData, outOfSampleData, alterOutOfSampleData=False):
if modelType == 'NearestNeighbors':
k = 3
return runNearestNeighbors(inSampleData, outOfSampleData, k, alterOutOfSampleData)
elif modelType == 'NearestNeighborsKernelSmoothing':
return runNearestNeighborsKernelSmoothing(inSampleData, outOfSampleData, alterOutOfSampleData) # TO DO - still need to write this function
elif modelType == 'LogisticRegression':
return runLogisticRegression(inSampleData, outOfSampleData, alterOutOfSampleData)
elif modelType == 'DecisionTreeGiniImpurity':
giniImprovementRequirement = .1
return runDecisionTreeGiniImpurity(inSampleData, outOfSampleData, giniImprovementRequirement, alterOutOfSampleData)
else:
print 'Did not input a valid model type! \n'
return 1/0
def retrieveAndCleanData(fileName):
titanicData = pandas.read_csv(fileName)
titanicData['Gender'] = titanicData['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
titanicData=titanicData.drop('Sex',1)
titanicData=titanicData.drop('Embarked',1)
medianAge = numpy.median(titanicData['Age'])
titanicData['AgeNoNan'] = titanicData['Age']
for index in range(len(titanicData['Age'])):
titanicData['AgeNoNan'].ix[index] = medianAge if math.isnan(titanicData['Age'].ix[index]) else titanicData['Age'].ix[index]
titanicData=titanicData.drop('Age',1)
return titanicData
def main():
# This is our main function
percentInSample = 0.75 # The percent of data used as in sample data for each run
numberOfCrossValidations = 10 # The number of randomized cross validations that we will run
modelType = 'DecisionTreeGiniImpurity' # Choices of model type are DecisionTreeGiniImpurity, NearestNeighborsKernelSmoothing, NearestNeighbors, and LogisticRegression
errorRates = []
runType = 'KaggleOutput' # choices are CrossValidation, and KaggleOutput
dataType = 'Actual' # Choices are Actual and Random
if runType == 'KaggleOutput' and modelType == 'DecisionTreeGiniImpurity':
## TO DO - make KaggleOutput runType also work for modelTypes of nearest neighbors and logistic regression
inSampleData = retrieveAndCleanData('InSampleData.csv')
outOfSampleData = retrieveAndCleanData('OutOfSampleData.csv')
outOfSampleData['Survived'] = ''
outOfSampleData = getErrorRate(modelType, inSampleData, outOfSampleData, alterOutOfSampleData=True)
for feature in ['index','Pclass','SibSp','Parch','Fare','Gender','AgeNoNan']:
outOfSampleData=outOfSampleData.drop(feature,1)
outOfSampleData.to_csv('KaggleResult.csv')
return outOfSampleData
elif runType == 'CrossValidation':
myData = retrieveAndCleanData('InSampleData.csv') if dataType == 'Actual' else createData() # createData Creates fake data to use before we get real data
for crossValidation in xrange(numberOfCrossValidations):
inSampleData, outOfSampleData = getInAndOutSample(myData, percentInSample) # Pick our in and out of sample data sets from the overall data set
errorRate = getErrorRate(modelType, inSampleData, outOfSampleData)
errorRates.append(errorRate)
averageErrorRate = numpy.mean(errorRates)
print 'The average error rate is: ', averageErrorRate
### NOTES: Error rate is 0.55 for LogisticRegression, 0.57 for NearestNeighbors, and 0.47 for DecisionTreeGiniImpurity for main static test case
|
from dataclasses import dataclass
from enum import Enum
from typing import Callable, Type, Union
from bot.bot import Bot, Event
from mypy_extensions import KwArg, VarArg
from typing_extensions import Protocol, runtime_checkable
@dataclass
class MessageEnv:
bot: Bot
event: Event
user_id: str
class BadArg(Exception):
pass
class ImproperlyConfigured(Exception):
pass
@runtime_checkable
class CustomParam(Protocol):
'''
Any custom param type of bot must implement this protocol.
from_message and to_message methods
must be mutually inverse transformations.
verbose_classname may be cls.__name__ or any other verbose
classname for help method.
That is they must satisfy following rules:
```python
arg: str
CustomParam.from_arg(arg).to_arg() == arg
param: CustomParam
CustomParam.from_arg(param.to_arg()) == param
```
Also from_arg must raise `BadArg` exception on validation error.
'''
@classmethod
def verbose_classname(cls) -> str: ...
@classmethod
def from_arg(cls, arg: str) -> 'CustomParam': ...
def to_arg(self) -> str: ...
ArgType = Union[str, int, float, bool, Enum, CustomParam]
ArgSigType = Union[
Type[str], Type[int], Type[float], Type[bool],
Type[Enum], Type[CustomParam],
]
CommandHandler = Callable[[MessageEnv, VarArg(ArgType), KwArg(ArgType)], str]
Handler = Callable[[Bot, Event], None]
Decorator = Callable[[Handler], Handler]
|
import os
# ----------------------------------------------------------
def ReadDarksusy(fn):
f = open(fn); lines = f.readlines(); f.close()
L = lines[0]
w = L.split()
par = {}
#print L
w.pop(0); w.pop(0); w.pop(0)
par['tests'] = w.pop(0)
w.pop(0); w.pop(0)
par['ds_oktot'] = int(w.pop(0) == 0)
w.pop(0); w.pop(0)
par['ds_unphys'] = int(w.pop(0)) # always ok, i.e. 0 ?
w.pop(0); w.pop(0)
par['ds_accel'] = int(w.pop(0)) # just contains tests in binary form (9 bits), so not so useful
w.pop(0); w.pop(0)
try: par['ds_cdm'] = float(w.pop(0))
except: par['ds_cdm'] = -2.
try: par['ds_cdm2'] = float(w.pop(0))
except: par['ds_cdm2'] = -2
w.pop(0); w.pop(0)
par['ds_gminus2'] = float(w.pop(0))
w.pop(0)
testsH = ['C','G','Q','L','Z','h','N','bsgam','rho']
for i in range(9):
par['ds_ok'+testsH[i]] = int(par['tests'][i] == '0') # 0 means test is ok (not excluded)
return par
# ----------------------------------------------------------
def ReadDarksusy2(fn):
if not os.path.exists(fn): return {}
f = open(fn); lines = f.readlines(); f.close()
L = lines[0]
w = L.split()
par = {}
#print L
w.pop(0); w.pop(0); w.pop(0)
par['tests'] = w.pop(0) # this is the entire 9-character test string (is used towards the bottom)
w.pop(0); w.pop(0)
par['ds_excl_combined'] = abs(int(w.pop(0))) # this trick ensures 0:all tests ok; 1:at least one test fails
w.pop(0); w.pop(0)
par['ds_unphys'] = int(w.pop(0)) # is (nearly?) always ok, i.e. 0
w.pop(0); w.pop(0)
par['ds_accel'] = int(w.pop(0)) # just contains tests in binary form (9 bits), so not so useful
w.pop(0); w.pop(0)
try: par['ds_cdm'] = float(w.pop(0))
except: par['ds_cdm'] = -2.
try: par['ds_cdm2'] = float(w.pop(0))
except: par['ds_cdm2'] = -2
w.pop(0); w.pop(0)
par['ds_gminus2'] = float(w.pop(0))
w.pop(0)
testsH = ['C','G','Q','L','Z','h','N','bsgam','rho']
par['ds_excl_combined_except_h'] = 0
par['ds_excl_combined_except_Ch'] = 0
for i in range(9):
# par['ds_excl_'+testsH[i]] = int(par['tests'][i] == '1') # '1' means test gives exclusion # 2012-06-11: swapped 0 and 1 for single-exclusions
# par[testsH[i]] = int(par['tests'][i] == '0') # 0 means test is ok (not excluded) # same as above, just shorter
if testsH[i] not in ['h']:
if par['tests'][i] != '0': par['ds_excl_combined_except_h'] = 1
if testsH[i] not in ['C','h']:
if par['tests'][i] != '0': par['ds_excl_combined_except_Ch'] = 1
par['ds_excl_'+testsH[i]] = int(par['tests'][i]) # just take what is in the file: '1' means test gives exclusion # 2012-06-11: swapped 0 and 1 for single-exclusions
par[testsH[i]] = int(par['tests'][i]) # 1 means excluded by the given test # same as above, just shorter # 2012-06-11: swapped
if testsH[i] not in ['h']:
if par['tests'][i] == '1': par['ds_excl_combined_except_h'] = 1
if testsH[i] not in ['C','h']:
if par['tests'][i] == '1': par['ds_excl_combined_except_Ch'] = 1 # 1 is exclusion
return par
# ----------------------------------------------------------
def Darksusy_these_bits_notexcluded(par, bits):
'''
This procedure tests the given bits (e.g. bsgam, N, h) in darksusy output
bits is a comma-separated list of bits, e.g. ['N','C','h']
'''
isok = 0
for bit in bits:
if bit not in par.keys():
print "Warning::Darksusy_testbits bit '%s' not keys: %s" %(bit, str(par.keys()))
continue
if par[bit] != 0: return 0
# have arrived here, all bits were ok, i.e. ==0, i.e. not excluded
return 1
# ----------------------------------------------------------
|
# Copyright Hal Emmerich <SolidHal> 2020
### tap functions
taplock_key = "taplock"
# All internal layers should be integers
#layers on left hand
LEFT_PREFIX = 0 #left_prefix_layer
LEFT_CMD = 1 #"left_blank_cmd_layer"
SYMS = 2 #"symbols_layer"
#layers accessed by left hand prefix
RIGHT_PREFIX = 0 #right_prefix_layer
RIGHT_CMD = 1 #"right_blank_cmd_layer"
NUMS = 2 #"number_layer" # also includes the arrow keys
FN = 3 #"function_layer"
#layers on right hand
RIGHT_CMD = 1 #"right_blank_cmd_layer"
NUMS = 2 #"number_layer" # also includes the arrow keys
FN = 3 #"function_layer"
#layers accessed by right hand prefix
LEFT_CMD = 1 #"left_blank_cmd_layer"
SYMS = 2 #"symbols_layer"
#TODO
TFUNS = "tap_function_layer"
# All keys should be strings
left_prefix_layer = {
#blank_tap
0b00000 : [RIGHT_CMD],
# modifiers/layers
0b00001 : ['shift', 'win'],
0b00010 : ['win'],
0b00100 : ['shift'],
0b01000 : [NUMS],
0b10000 : ['ctrl'],
0b10011 : [FN],
# modifier combos
0b01100 : ['shift', NUMS],
0b01001 : ['shift', NUMS, 'win'],
0b11000 : ['ctrl', NUMS], # since arrows are on the NUMS layer, we get ctrl + arrows here
0b01010 : ['win', NUMS],
}
left_cmd_layer = {
#blank_tap
0b00000 : [],
# specials
0b11111 : ['space'],
# letters
0b00011 : ['a'],
0b00110 : ['t'],
0b00101 : ['e'],
0b10001 : ['f'],
0b10010 : ['x'], # mapped easily for cut
0b00111 : ['d'],
0b01110 : ['s'],
0b11100 : ['z'],
0b11001 : ['r'],
0b01111 : ['w'],
0b11110 : ['c'],
0b11101 : ['g'],
0b11011 : ['v'],
0b01011 : ['b'],
0b10100 : ['q'],
}
#order matters here, the blank_tap from cmd_layer wipes out the blank_tap from prefix_layer to avoid recursive circles
#we merge the base prefix layer and base cmd layer for each hand to form the default layer
left_blank_cmd_layer = {**left_prefix_layer, **left_cmd_layer}
right_prefix_layer = {
#blank_tap
0b00000 : [LEFT_CMD],
# modifiers/layers
0b10000 : ['shift', 'win'],
0b01000 : ['win'],
0b00100 : ['shift'],
0b00010 : [SYMS],
0b00001 : ['ctrl'],
# modifier combos
0b00110 : ['shift', SYMS],
0b00101 : ['ctrl', 'shift'], # for c, v copy/paste in terminals
}
right_cmd_layer = {
#blank_tap
0b00000 : [],
# specials
0b11111 : ['backspace'],
0b00011 : ['tab'],
0b10011 : ['esc'],
# punctuation
0b10010 : ['?'],
0b10001 : [';'],
0b01001 : ['.'],
0b01110 : [','],
# letters
0b11000 : ['o'],
0b01100 : ['i'],
0b10100 : ['u'],
0b11100 : ['n'],
0b00111 : ['h'],
0b11001 : ['p'],
0b11110 : ['m'],
0b01111 : ['y'],
0b10111 : ['l'],
0b11011 : ['j'],
0b11010 : ['k'],
}
#order matters here, the blank_tap from cmd_layer wipes out the blank_tap from prefix_layer to avoid recursive circles
right_blank_cmd_layer = {**right_prefix_layer, **right_cmd_layer}
right_empty_map = {
# empty - not exhaustive, but limited to "easier" taps
}
number_layer = {
0b10000 : ["1"],
0b01000 : ["2"],
0b11000 : ["3"],
0b00100 : ["4"],
0b10100 : ["5"],
0b01100 : ["6"],
0b11100 : ["7"],
0b00010 : ["8"],
0b10010 : ["9"],
0b00111 : ["0"],
0b11111 : ["up"],
0b01111 : ["down"],
0b10111 : ["left"],
0b10011 : ["right"],
}
symbols_layer = {
0b00001 : ["`"],
0b00010 : ["-"],
0b00011 : ["="],
0b00100 : ["["],
0b00101 : ["]"],
0b00110 : ["\\"],
0b00111 : ["'"],
}
function_layer = {
0b10000 : ["f1"],
0b01000 : ["f2"],
0b11000 : ["f3"],
0b00100 : ["f4"],
0b10100 : ["f5"],
0b01100 : ["f6"],
0b11100 : ["f7"],
0b00010 : ["f8"],
0b10010 : ["f9"],
0b00111 : ["f10"],
}
tap_function_layer = {
}
left_prefix_layers = {
RIGHT_CMD : right_blank_cmd_layer,
NUMS : number_layer,
FN : function_layer,
}
right_prefix_layers = {
LEFT_CMD : left_blank_cmd_layer,
SYMS : symbols_layer,
}
# special dual tap macros
doublelayer = {
0b0000000000 : "Zeros-- an impossible tap",
0b1000000001 : "Pinkeys",
0b0100000010 : "Rings",
0b0010000100 : "Middles",
0b0001001000 : "Pointers",
0b0000110000 : "Thumbs",
0b1111111111 : taplock_key
}
# hand = [ [layers_on_hand_list], [other_hand_layers_list] ]
left = [
left_prefix_layer,
left_cmd_layer,
[left_blank_cmd_layer, symbols_layer],
left_prefix_layers
]
right = [
right_prefix_layer,
right_cmd_layer,
[right_blank_cmd_layer, number_layer, function_layer],
right_prefix_layers
]
### reference
full_left_hand_map = {
0b00000 : [],
0b00001 : [],
0b00010 : [],
0b00011 : [],
0b00100 : [],
0b00101 : [],
0b00110 : [],
0b00111 : [],
0b01000 : [],
0b01001 : [],
0b01010 : [],
0b01011 : [],
0b01100 : [],
0b01101 : [],
0b01110 : [],
0b01111 : [],
0b10000 : [],
0b10001 : [],
0b10010 : [],
0b10011 : [],
0b10100 : [],
0b10101 : [],
0b10110 : [],
0b10111 : [],
0b11000 : [],
0b11001 : [],
0b11010 : [],
0b11011 : [],
0b11100 : [],
0b11101 : [],
0b11110 : [],
0b11111 : [],
}
full_right_hand_map = {
0b00000 : [],
0b10000 : [],
0b01000 : [],
0b11000 : [],
0b00100 : [],
0b10100 : [],
0b01100 : [],
0b11100 : [],
0b00010 : [],
0b10010 : [],
0b01010 : [],
0b11010 : [],
0b00110 : [],
0b10110 : [],
0b01110 : [],
0b11110 : [],
0b00001 : [],
0b10001 : [],
0b01001 : [],
0b11001 : [],
0b00101 : [],
0b10101 : [],
0b01101 : [],
0b11101 : [],
0b00011 : [],
0b10011 : [],
0b01011 : [],
0b11011 : [],
0b00111 : [],
0b10111 : [],
0b01111 : [],
0b11111 : [],
}
def _reverseBits(code):
return int('{:05b}'.format(code)[::-1], 2)
def generate_hand_map():
print("left_hand_map")
for i in range(0,32):
print(" 0b{:05b} : [],".format(i))
print("right_hand_map")
for i in range(0,32):
j = _reverseBits(i)
print(" 0b{:05b} : [],".format(j))
|
from bs4 import BeautifulSoup
from file_handler.file_handler import FileHandler
class XMLHandler(FileHandler):
def preview(self):
print self.get_xml().prettify()
def get_xml(self):
return BeautifulSoup(self.get(), 'html.parser')
def extract(self, element):
for row in self.get_xml().find_all(str(element)):
for child in row.children:
print '{0}: {1}'.format(child.name, child.string)
print ''
if __name__ == '__main__':
xml = XMLHandler('/home/alissa/PycharmProjects/xml_handler/xml-files/nyc-social-media.xml')
xml.extract('row')
|
class Stack(object):
""" Stack implementation | (C) @ Bofin Babu"""
def __init__(self):
self.elements = []
def pop(self):
return self.elements.pop()
def push(self, item):
self.elements.append(item)
def is_empty(self):
return self.elements == []
def size(self):
return len(self.elements)
|
#! /usr/bin/env python
import rospy
import time
import actionlib
from my_turtlebot_actions.msg import record_odomFeedback, record_odomResult, record_odomAction
from std_srvs.srv import Empty
from odom_sub import getOdom
from geometry_msgs.msg import Twist
class Odom_server(object):
# create messages that are used to publish feedback/result
_feedback = record_odomFeedback()
_result = record_odomResult()
def __init__(self):
# creates the action server
self._as = actionlib.SimpleActionServer("/rec_odom_as", record_odomAction, self.goal_callback, False)
self._as.start()
self.rate = rospy.Rate(10)
self.tic = time.time()
self.tok = time.time()
def goal_callback(self, goal):
# this callback is called when the action server is called.
# this is the function that computes the Fibonacci sequence
# and returns the sequence to the node that called the action server
# helper variables
r = rospy.Rate(1)
self.tic = time.time()
self.tok = time.time()
while self.tok - self.tic < 42:
self.tok = time.time()
#print(self.tok - self.tic)
#print(OdomClass.data)
self._result.result_odom_array.append(OdomClass.data)
if OdomClass.data.pose.pose.position.y<-8.5:
rospy.loginfo("Congradulation! exit maze!")
break
time.sleep(0.1)
time.sleep(1)
print('Mission time : %f' % (self.tok - self.tic))
self._as.set_succeeded(self._result)
if __name__ == '__main__':
rospy.init_node('record_odom_action_server_node')
OdomClass = getOdom()
Odom_server()
rospy.spin()
|
from pathlib import Path
from setuptools import setup
wemail = Path(__file__).parent / "wemail.py"
with wemail.open("r") as f:
for line in f:
if line.startswith("__version__"):
__version__ = line.partition("=")[-1].strip().strip('"').strip("'")
break
changelog = (Path(__file__).parent / "CHANGELOG.txt").read_text()
readme = (Path(__file__).parent / "README.md").read_text()
long_desc = readme + "\n\n---\n\n" + changelog
extras_require = ["mistletoe"]
tests_require = ["pytest", "pytest-cov", "aiosmtpd"]
setup(
name="wemail",
version=__version__,
author="Wayne Werner",
author_email="wayne@waynewerner.com",
url="https://github.com/waynew/wemail",
py_modules=["wemail"],
entry_points="""
[console_scripts]
wemail-old=wemail:do_it
wemail=wemail:do_it_now
""",
long_description=long_desc,
long_description_content_type="text/markdown",
tests_require=tests_require,
extras_require={
"test": tests_require + extras_require,
"build": ["wheel"],
"html": extras_require,
},
)
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from tsfresh.utilities.dataframe_functions import (
get_range_values_per_column,
impute_dataframe_range,
)
class PerColumnImputer(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator, for column-wise imputing DataFrames by replacing all ``NaNs`` and ``infs``
with with average/extreme values from the same columns. It is basically a wrapper around
:func:`~tsfresh.utilities.dataframe_functions.impute`.
Each occurring ``inf`` or ``NaN`` in the DataFrame is replaced by
* ``-inf`` -> ``min``
* ``+inf`` -> ``max``
* ``NaN`` -> ``median``
This estimator - as most of the sklearn estimators - works in a two step procedure. First, the ``.fit``
function is called where for each column the min, max and median are computed.
Secondly, the ``.transform`` function is called which replaces the occurances of ``NaNs`` and ``infs`` using
the column-wise computed min, max and median values.
"""
def __init__(
self,
col_to_NINF_repl_preset=None,
col_to_PINF_repl_preset=None,
col_to_NAN_repl_preset=None,
):
"""
Create a new PerColumnImputer instance, optionally with dictionaries containing replacements for
``NaNs`` and ``infs``.
:param col_to_NINF_repl: Dictionary mapping column names to ``-inf`` replacement values
:type col_to_NINF_repl: dict
:param col_to_PINF_repl: Dictionary mapping column names to ``+inf`` replacement values
:type col_to_PINF_repl: dict
:param col_to_NAN_repl: Dictionary mapping column names to ``NaN`` replacement values
:type col_to_NAN_repl: dict
"""
self._col_to_NINF_repl = None
self._col_to_PINF_repl = None
self._col_to_NAN_repl = None
self.col_to_NINF_repl_preset = col_to_NINF_repl_preset
self.col_to_PINF_repl_preset = col_to_PINF_repl_preset
self.col_to_NAN_repl_preset = col_to_NAN_repl_preset
def fit(self, X, y=None):
"""
Compute the min, max and median for all columns in the DataFrame. For more information,
please see the :func:`~tsfresh.utilities.dataframe_functions.get_range_values_per_column` function.
:param X: DataFrame to calculate min, max and median values on
:type X: pandas.DataFrame
:param y: Unneeded.
:type y: Any
:return: the estimator with the computed min, max and median values
:rtype: Imputer
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
col_to_max, col_to_min, col_to_median = get_range_values_per_column(X)
if self.col_to_NINF_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_NINF_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_NINF_repl_preset' contain more keys "
"than the column names in X"
)
col_to_min.update(self.col_to_NINF_repl_preset)
self._col_to_NINF_repl = col_to_min
if self.col_to_PINF_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_PINF_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_PINF_repl_preset' contain more keys "
"than the column names in X"
)
col_to_max.update(self.col_to_PINF_repl_preset)
self._col_to_PINF_repl = col_to_max
if self.col_to_NAN_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_NAN_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_NAN_repl_preset' contain more keys "
"than the column names in X"
)
col_to_median.update(self.col_to_NAN_repl_preset)
self._col_to_NAN_repl = col_to_median
return self
def transform(self, X):
"""
Column-wise replace all ``NaNs``, ``-inf`` and ``+inf`` in the DataFrame `X` with average/extreme
values from the provided dictionaries.
:param X: DataFrame to impute
:type X: pandas.DataFrame
:return: imputed DataFrame
:rtype: pandas.DataFrame
:raise RuntimeError: if the replacement dictionaries are still of None type.
This can happen if the transformer was not fitted.
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if (
self._col_to_NINF_repl is None
or self._col_to_PINF_repl is None
or self._col_to_NAN_repl is None
):
raise NotFittedError("PerColumnImputer is not fitted")
X = impute_dataframe_range(
X, self._col_to_PINF_repl, self._col_to_NINF_repl, self._col_to_NAN_repl
)
return X
|
import datetime
from aiohttp import web
from dataclasses import asdict
from serv.json_util import json_dumps
from .config import db_block, web_routes
#得到所有学生的信息
@web_routes.get("/api/student/list")
async def get_student_list(request):
with db_block() as db:
db.execute("""
SELECT sn AS stu_sn, no AS stu_no, name AS stu_name, gender, enrolled FROM student
""")
data = list(asdict(r) for r in db)
return web.Response(text=json_dumps(data), content_type="application/json")
#得到某一个学生的信息
@web_routes.get("/api/student/{stu_sn:\d+}")
async def get_student_profile(request):
stu_sn = request.match_info.get("stu_sn")
with db_block() as db:
db.execute("""
SELECT sn AS stu_sn, no AS stu_no, name AS stu_name, gender, enrolled FROM student
WHERE sn=%(stu_sn)s
""", dict(stu_sn=stu_sn))
record = db.fetch_first()
if record is None:
return web.HTTPNotFound(text=f"no such student: stu_sn={stu_sn}")
data = asdict(record)
return web.Response(text=json_dumps(data), content_type="application/json")
@web_routes.post("/api/student")
async def new_student(request):
student = await request.json()
#为入学时间提供默认值
if not student.get('enrolled'):
student['enrolled'] = datetime.date(1900, 1, 1)
with db_block() as db:
db.execute("""
INSERT INTO student (no, name, gender, enrolled)
VALUES(%(stu_no)s, %(stu_name)s, %(gender)s, %(enrolled)s) RETURNING sn;
""", student)
record = db.fetch_first()
student["stu_sn"] = record.sn
print(student)
return web.Response(text=json_dumps(student), content_type="application/json")
@web_routes.put("/api/student/{stu_sn:\d+}")
async def update_student(request):
stu_sn = request.match_info.get("stu_sn")
student = await request.json()
if not student.get('enrolled'):
student['enrolled'] = datetime.date(1900, 1, 1)
student["stu_sn"] = stu_sn
with db_block() as db:
db.execute("""
UPDATE student SET
no=%(stu_no)s, name=%(stu_name)s, gender=%(gender)s, enrolled=%(enrolled)s
WHERE sn=%(stu_sn)s;
""", student)
return web.Response(text=json_dumps(student), content_type="application/json")
@web_routes.delete("/api/student/{stu_sn:\d+}")
async def delete_student(request):
stu_sn = request.match_info.get("stu_sn")
with db_block() as db:
db.execute("""
DELETE FROM student WHERE sn=%(stu_sn)s;
""", dict(stu_sn=stu_sn))
return web.Response(text="", content_type="text/plain")
|
"Run the MEAPSoft Analyzer"
import chop
import subprocess
import glob
import re
MEAP_PATH = "lib/MEAPsoft-2.0.beta/bin/MEAPsoft.jar"
def run(cmd):
p = subprocess.Popen(cmd)
p.wait()
def analyze(src):
f_paths = []
for path in chop.chopped(src):
f_path = path + ".feat"
f_paths.append(f_path)
# Segment
cmd = ["java", "-cp",
MEAP_PATH,
"com.meapsoft.Segmenter",
"-o", f_path,
"-d", # "old-style" onset detector
"-0", # start at 0
path]
run(cmd)
# Features
cmd = ["java", "-cp",
MEAP_PATH,
"com.meapsoft.FeatExtractor",
"-f", "AvgMelSpec",
"-f", "SpectralStability",
"-f", "AvgTonalCentroid",
"-f", "AvgSpecFlatness",
"-f", "AvgChroma",
"-f", "AvgPitch",
"-f", "AvgMFCC",
"-f", "RMSAmplitude",
f_path]
run(cmd)
return f_paths
def _ncols(key):
parentheticals = re.findall(r"\((\d+)\)", key)
if len(parentheticals) == 1:
return int(parentheticals[0])
elif len(parentheticals) > 1:
raise AssertionError("too many parentheticals")
return 1
def _load(meap):
lines = open(meap).read().split('\n')
keys = lines[0].split('\t')
segs = []
for l in lines[1:]:
valid = True
d = {}
cols = l.split('\t')
idx = 0
for key in keys:
ncols = _ncols(key)
key = key.strip()
d[key] = cols[idx:idx+ncols]
if not key.startswith('#'):
d[key] = [float(x) for x in d[key]]
if ncols == 1:
if len(d[key]) > 0:
d[key] = d[key][0]
else:
valid = False
idx += ncols
if valid:
segs.append(d)
return segs
def _combine(analyses, t=300):
out = []
for idx,analysis in enumerate(analyses):
for X in analysis:
X["onset_time"] += t*idx
out.append(X)
return out
def analysis(src):
f_paths = glob.glob(src + '*.feat')
if len(f_paths) == 0:
f_paths = analyze(src)
return _combine([_load(X) for X in f_paths])
if __name__=='__main__':
import sys
for p in sys.argv[1:]:
print analysis(p)
|
#=========================================================================
# pisa_sltiu_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits, sext, zext
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 5
nop
nop
nop
nop
nop
nop
nop
nop
sltiu r3, r1, 4
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rimm_dest_byp_test( 5, "sltiu", 1, 0, 0 ),
gen_rimm_dest_byp_test( 4, "sltiu", 1, 1, 0 ),
gen_rimm_dest_byp_test( 3, "sltiu", 0, 1, 1 ),
gen_rimm_dest_byp_test( 2, "sltiu", 2, 1, 0 ),
gen_rimm_dest_byp_test( 1, "sltiu", 2, 2, 0 ),
gen_rimm_dest_byp_test( 0, "sltiu", 1, 2, 1 ),
]
#-------------------------------------------------------------------------
# gen_src_byp_test
#-------------------------------------------------------------------------
def gen_src_byp_test():
return [
gen_rimm_src_byp_test( 5, "sltiu", 3, 2, 0 ),
gen_rimm_src_byp_test( 4, "sltiu", 3, 3, 0 ),
gen_rimm_src_byp_test( 3, "sltiu", 2, 3, 1 ),
gen_rimm_src_byp_test( 2, "sltiu", 4, 3, 0 ),
gen_rimm_src_byp_test( 1, "sltiu", 4, 4, 0 ),
gen_rimm_src_byp_test( 0, "sltiu", 3, 4, 1 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rimm_src_eq_dest_test( "sltiu", 9, 8, 0 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rimm_value_test( "sltiu", 0x00000000, 0x0000, 0 ),
gen_rimm_value_test( "sltiu", 0x00000001, 0x0001, 0 ),
gen_rimm_value_test( "sltiu", 0x00000003, 0x0007, 1 ),
gen_rimm_value_test( "sltiu", 0x00000007, 0x0003, 0 ),
gen_rimm_value_test( "sltiu", 0x00000000, 0x8000, 1 ),
gen_rimm_value_test( "sltiu", 0x80000000, 0x0000, 0 ),
gen_rimm_value_test( "sltiu", 0x80000000, 0x8000, 1 ),
gen_rimm_value_test( "sltiu", 0x00000000, 0x7fff, 1 ),
gen_rimm_value_test( "sltiu", 0x7fffffff, 0x0000, 0 ),
gen_rimm_value_test( "sltiu", 0x7fffffff, 0x7fff, 0 ),
gen_rimm_value_test( "sltiu", 0x80000000, 0x7fff, 0 ),
gen_rimm_value_test( "sltiu", 0x7fffffff, 0x8000, 1 ),
gen_rimm_value_test( "sltiu", 0x00000000, 0xffff, 1 ),
gen_rimm_value_test( "sltiu", 0xffffffff, 0x0001, 0 ),
gen_rimm_value_test( "sltiu", 0xffffffff, 0xffff, 0 ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src = Bits( 32, random.randint(0,0xffffffff) )
imm = Bits( 16, random.randint(0,0xffff) )
dest = Bits( 32, src < sext(imm,32) )
asm_code.append( gen_rimm_value_test( "sltiu", src.uint(), imm.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
from game.entity import Entity
from game.level import Level
from game.component import Component
from game.graphicsComponent import GraphicsComponent
from game.physicsComponent import PhysicsComponent
from game.playerInputComponent import PlayerInputComponent
|
#coding = utf-8
# refer to http://blog.csdn.net/bone_ace/article/details/46718683
class Node():
def __init__(self,data,left=None,right=None):
self.data = data
self.left = left
self.right = right
#层次遍历
def lookup(root):
stack = [root]
while stack:
currrent = stack.pop(0)
if currrent:
print(currrent.data)
if currrent.left:
stack.append(currrent.left)
if currrent.right:
stack.append(currrent.right)
#前序遍历/深度遍历 DLR
def preorder(root):
if not root:
return
print(root.data)
preorder(root.left)
preorder(root.right)
#中序遍历 LDR
def inorder(root):
if not root:
return
inorder(root.left)
print(root.data)
inorder(root.right)
#后序遍历 LRD
def backorder(root):
if not root:
return
backorder(root.left)
backorder(root.right)
print(root.data)
'''
1
/ \
3 2
/ \ / \
7 6 5 4
/
0
层次遍历: 1 3 2 7 6 5 4
先序遍历 DLR: 1 3 7 0 6 2 5 4
中序遍历 LDR: 0 7 3 6 1 5 2 4
后序遍历 LRD: 0 7 6 3 5 4 2 1
'''
if __name__ == '__main__':
tree = Node(1, Node(3, Node(7, Node(0)), Node(6)), Node(2, Node(5), Node(4)))
lookup(tree)
print("====lookup end=======")
preorder(tree)
print("====preorder end=======")
inorder(tree)
print("====inorder end=======")
backorder(tree)
print("====backorder end=======")
|
# Script for Hollerado - Don't Shake project
#
# The idea is to trace a path from The Barfly in Montreal to
# The Danforth Music Hall in Toronto, and to synchronize
# the video with the song "Don't Shake" by Hollerado.
#
# This script is just intended to be an illustration of
# the workflow, and a record of the scripting I did.
# It won't reproduce exactly what I did to generate the
# video, because lots of unreplicable work was done
# (e.g., probing various points along the route to see what
# data was available, and basing future design decisions
# on that).
#
#
# Query Google for route and set up pandas dataframe of GPS points
#
#
# Imports
from utils import *
from API_KEYS import API_KEY_DIRECTIONS, API_KEY_STREETVIEW
import pickle
# Point A and point B:
barfly = (45.517146, -73.579837)
danforth = (43.676533, -79.357132)
# Get the route
gd = googlemaps.Client(key=API_KEY_DIRECTIONS)
directions_result = gd.directions(origin=barfly, destination=danforth, mode="driving")
# Save directions (commented out to not accidentally overwrite!)
# with open("barfly_to_danforth_route.p", "wb") as f:
# pickle.dump(directions_result, f)
# Load directions
with open("barfly_to_danforth_route.p", "rb") as f:
directions_result = pickle.load(f)
# Decode polyline (the directions) into dense sequence of GPS points
path_points = polyline.decode(directions_result[0]['overview_polyline']['points'])
dense_points = [interpolate_points(pt[0], pt[1], hop_size=1) for pt in zip(path_points[:-1], path_points[1:])]
look_points_rough = [item for sequence in dense_points for item in sequence]
# Remove unnecessary points
look_points = clean_look_points(look_points_rough)
# Create an itinerary object and probe a 1000th of the frames:
pickle_filename = "bd_points.p"
# Save
# Don't accidentally recreate it! You only want to create this dataframe once.
# itin_bd = create_itinerary_df(look_points)
# Load
itin_bd = pd.read_pickle(pickle_filename)
# Probe a subset of the path
take_these_steps = range(0, itin_bd.shape[0], 1000) # Do a subset of every 1000th point?
take_these_steps = range(0, itin_bd.shape[0], 10) # Or every 10th point?
# Google can get angry at you if you probe too much. Maybe you're trying to copy their database! Ha.
# So, it's prudent to probe different subsets at intervals, and to save the output after you've
# probed a significant new chunk to make sure you don't lose data.
probe_itinerary_items(itin_bd, take_these_steps, API_KEY_STREETVIEW)
# Save your work:
# itin_bd.to_pickle(pickle_filename)
# Convert date field to numerical year and month columns
years = [date[:4] for date in itin_bd.date]
years_int = [int(y) if len(y) else 0 for y in years]
months = [date[5:] for date in itin_bd.date]
months_int = [int(y) if len(y) else 0 for y in months]
itin_bd["year"] = years_int
itin_bd["month"] = months_int
# Add distance in months from most common month of most common year (which turned out to be 2018-07)
months_before = (2018 - itin_bd.year) * 12 + (7 - itin_bd.month)
months_after = (itin_bd.year - 2018) * 12 + (itin_bd.month - 7)
itin_bd["dist_from_2018-07"] = np.max((months_before, months_after), axis=0)
#
#
# Information about song to be matched
#
#
# The following array was estimated using madmom from the audio file.
# That work isn't replicated here. Just the output.
beats = np.array([0.25, 0.86, 1.52, 2.14, 2.81, 3.42, 4.08, 4.69,
5.36, 5.96, 6.62, 7.24, 7.91, 8.52, 9.18, 9.8,
10.47, 11.07, 11.73, 12.35, 13.02, 13.63, 14.29, 14.91,
15.58, 16.19, 16.85, 17.47, 18.14, 18.74, 19.4, 20.01,
20.66, 21.29, 21.94, 22.57, 23.22, 23.84, 24.5, 25.11,
25.77, 26.39, 27.05, 27.68, 28.32, 28.95, 29.6, 30.22,
30.88, 31.5, 32.15, 32.78, 33.43, 34.05, 34.71, 35.33,
35.98, 36.61, 37.26, 37.89, 38.54, 39.16, 39.81, 40.45,
41.09, 41.72, 42.37, 43., 43.64, 44.28, 44.92, 45.55,
46.2, 46.83, 47.47, 48.11, 48.75, 49.38, 50.02, 50.66,
51.3, 51.94, 52.58, 53.22, 53.86, 54.49, 55.13, 55.77,
56.41, 57.04, 57.67, 58.32, 58.96, 59.59, 60.25, 60.86,
61.51, 62.14, 62.79, 63.42, 64.07, 64.69, 65.35, 65.97,
66.62, 67.25, 67.9, 68.52, 69.18, 69.79, 70.45, 71.09,
71.73, 72.36, 73., 73.63, 74.28, 74.91, 75.56, 76.19,
76.83, 77.46, 78.11, 78.75, 79.39, 80.02, 80.66, 81.3,
81.94, 82.57, 83.21, 83.85, 84.49, 85.12, 85.77, 86.4,
87.05, 87.67, 88.32, 88.95, 89.6, 90.23, 90.88, 91.51,
92.15, 92.79, 93.42, 94.06, 94.71, 95.34, 95.98, 96.62,
97.26, 97.9, 98.54, 99.17, 99.81, 100.45, 101.09, 101.72,
102.37, 103., 103.64, 104.28, 104.92, 105.56, 106.19, 106.83,
107.47, 108.11, 108.74, 109.39, 110.03, 110.65, 111.3, 111.94,
112.58, 113.21, 113.86, 114.49, 115.13, 115.76, 116.41, 117.05,
117.68, 118.32, 118.96, 119.59, 120.24, 120.87, 121.51, 122.15,
122.79, 123.43, 124.07, 124.7, 125.35, 125.98, 126.62, 127.26,
127.9, 128.53, 129.18, 129.81, 130.45, 131.08, 131.72, 132.36,
133., 133.64, 134.28, 134.91, 135.56, 136.19, 136.83, 137.46,
138.11, 138.74, 139.39, 140.02, 140.67, 141.3, 141.94, 142.57,
143.22, 143.85, 144.49, 145.13, 145.77, 146.41, 147.05, 147.68,
148.32, 148.95, 149.62, 150.23, 150.9, 151.51, 152.17, 152.78,
153.43, 154.07, 154.71, 155.34, 155.98, 156.62, 157.26, 157.89,
158.54, 159.16, 159.81, 160.45, 161.09, 161.73, 162.36, 163.,
163.64, 164.28, 164.92, 165.56, 166.2, 166.83, 167.47, 168.11,
168.75, 169.38, 170.02, 170.66, 171.3, 171.94, 172.58, 173.21,
173.86, 174.49, 175.12, 175.77, 176.41, 177.04, 177.68, 178.32,
178.96, 179.6, 180.24, 180.88, 181.52, 182.14, 182.8, 183.42,
184.07, 184.7, 185.34, 185.98, 186.62, 187.25, 187.9, 188.53,
189.18, 189.81, 190.45, 191.08, 191.73, 192.36, 193., 193.63,
194.28, 194.91, 195.56, 196.19, 196.83, 197.47, 198.11, 198.74,
199.39, 200.02, 200.67, 201.3, 201.94, 202.57, 203.22, 203.84,
204.49, 205.11, 205.77, 206.39])
tmp_half_beats = beats[:-1] + (beats[1:] - beats[:-1]) / 2
halfbeats = np.array(sorted(list(tmp_half_beats) + list(beats)))
# Define a timeline object so that we can easily generate a movie
# that aligns to points in the music.
class timeline(object):
def __init__(self, duration, fps=24, new_stem="default_stem", base_path="./photos"):
self.timeline = pd.DataFrame(columns=['time', 'beatindex', 'filename'])
self.timeline.time = np.arange(0, duration, 1.0 / fps)
self.timeline.beatindex = np.zeros(self.timeline.shape[0]).astype(int)
self.timeline = self.timeline.fillna("")
self.fps = fps
self.new_stem = new_stem
self.base_path = base_path
def set_beat_indices(self, beat_times_seconds):
nearest_frames_to_beats = [np.argmin(np.abs(self.timeline.time - b)) for b in beat_times_seconds]
cumulative_beat_index = np.zeros((self.timeline.shape[0],))
cumulative_beat_index[nearest_frames_to_beats] = 1
self.timeline["beatindex"] = np.cumsum(cumulative_beat_index).astype(int)
def set_pic_to_beat(self, pic_filename, beat1, beat2=None):
if beat2 is None:
beat2 = beat1 + 1
self.timeline["filename"][(self.timeline.beatindex >= beat1) & (self.timeline.beatindex < beat2)] = pic_filename
def set_continuous_pics_from_beat(self, pic_filenames, beat1, beat2):
start_index = self.timeline.index[self.timeline.beatindex == beat1].values[0]
end_index = self.timeline.index[self.timeline.beatindex == beat2].values[0]
range_len = end_index - start_index
self.timeline['filename'][start_index:end_index] = pic_filenames[:range_len]
return range_len
def copy_images_in_timeline(self):
for ind in self.timeline.index:
old_filename = self.timeline.loc[ind]['filename']
new_filename = "{0}/{1}{2}.jpg".format(self.base_path, self.new_stem, ind)
if old_filename is not '':
print("{0} {1} {2}".format("cp", old_filename, new_filename))
os.system("{0} {1} {2}".format("cp", old_filename, new_filename))
def script_make_video(self):
video_filename = self.new_stem + "vid"
sound_filename = self.new_stem + "vid_sound"
make_video(self.new_stem, rate=self.fps, video_string=video_filename, picsize="640x640",
basepath=self.base_path)
os.system(
"ffmpeg -i {0}.mp4 -i \"/Users/jordan/Music/iTunes/iTunes Music/Hollerado/Born Yesterday/02 Don't Shake.wav\" -shortest {1}.mp4 -y".format(
video_filename, sound_filename))
print("Video should have been successfully made here: {0}".format(sound_filename))
def download_missing_items_for_timeline(timeline_obj, itinerary, stem="bd_1000s"):
missing_ids = []
for fn in timeline_obj.timeline["filename"].values:
# Check if it exists.
if not os.path.exists(fn):
folder = os.path.dirname(fn)
filename = os.path.basename(fn)
basename, ext = os.path.splitext(filename)
index = int(basename.split(stem)[1])
missing_ids += [index]
print("For this route, there are {0} images to download.".format(len(missing_ids)))
continue_opt = raw_input('Would you like to download them all Type yes to proceed; otherwise, program halts.\n')
if continue_opt not in ['Yes', 'yes']:
return
download_pics_from_list(itinerary, API_KEY_STREETVIEW, stem, "640x640", redownload=False, index_filter=missing_ids)
return itinerary
# Construct a plan for the song, deciding, for each range of sub-beats (2 ticks per beat),
# whether to assign one picture for the entire beat or one picture per frame of video.
# set a "pace" to skip more pictures at a time
def define_program():
pic_per_4_beats = []
pic_per_2_beats = []
pic_per_1_beat = []
pic_per_1_frame = []
pace = {}
# verse 1, first half = 0, 16
pic_per_2_beats += [0, 14]
pic_per_4_beats += range(2, 14, 4)
pic_per_2_beats += range(16, 32, 2)
# verse 1, second half = 32, 64
pic_per_1_beat += range(32, 64)
# verse 2 first half
pic_per_1_frame += range(64, 68)
pic_per_2_beats += range(68, 72, 2)
pic_per_1_frame += range(72, 76)
pic_per_1_beat += range(76, 80)
pic_per_1_frame += range(80, 84)
pic_per_2_beats += range(84, 88, 2)
pic_per_1_frame += range(88, 92)
pic_per_1_beat += range(92, 96)
# verse 2 second half
pic_per_1_frame += range(96, 100)
pic_per_2_beats += range(100, 104, 2)
pic_per_1_frame += range(104, 108)
pic_per_1_beat += range(108, 112)
pic_per_1_frame += range(112, 116)
pic_per_2_beats += range(116, 120, 2)
pic_per_1_frame += range(120, 128)
pace[128] = 2
# chorus = 128 to 192
pic_per_1_frame += range(128, 192)
pace[192] = 1
# interlude
pic_per_1_frame += range(192, 194)
pic_per_4_beats += [194]
pic_per_2_beats += [197, 199]
pic_per_1_frame += range(200, 208)
pic_per_1_frame += range(208, 210)
pic_per_4_beats += [210]
pic_per_2_beats += [213, 215]
pic_per_1_frame += range(216, 220)
pic_per_4_beats += [220]
# verse 3
pic_per_1_frame += range(224, 288)
pace[288] = 2
# chorus 2
pic_per_1_frame += range(288, 352)
pace[352] = 1
# interlude 2
pic_per_1_frame += range(352, 400)
# bridge -- accelerate until whatever point is necessary to get to Toronto by the end
pic_per_1_frame += range(400, 464)
pace[400] = 8
# interlude 3
# super quiet, ritard.: (x = new pic, X = hard on this 16th) [xxxxx.x.x...x.XX], off the 401 onto local Toronto highway
pic_per_2_beats += range(464, 468)
pic_per_4_beats += [468, 470, 472]
pic_per_2_beats += [476]
pic_per_1_frame += range(478, 480)
pace[478] = 2
# chorus 3, 1.5 times longer
pic_per_1_frame += range(480, 576)
pace[480] = 2
pace[572] = 1
# outro
pic_per_1_frame += range(576, 640)
# extra snaps
pic_per_4_beats += [640, 644]
# At these points, leap a little further in the series of points.
leap_onsets = [194, 197, 199, 200, 210, 213, 215, 216, 220, 224, 464, 466, 468, 470, 472, 474, 476, 478]
return pic_per_4_beats, pic_per_2_beats, pic_per_1_beat, pic_per_1_frame, leap_onsets, pace
#
#
# Generate music video
#
#
use_this_bd = itin_bd.loc[itin_bd.status == "OK"]
pano_id_changes = use_this_bd.pano_id.values[:-1] != use_this_bd.pano_id.values[1:]
unique_ids = use_this_bd.index[np.where(pano_id_changes)[0]]
spaced_ids = list(unique_ids[range(0, 100, 2)]) + list(unique_ids[range(100, len(unique_ids), 3)])
itinerary_ids = spaced_ids
# Set up timeline object. Give it the beats. Get plan.
tl = timeline(208, 24)
tl.set_beat_indices([0] + halfbeats[1:])
# Start at picture 0. For each click of the timeline, assign the next filenames.
eligible_pic_ind = 0
pic_per_4_beats, pic_per_2_beats, pic_per_1_beat, pic_per_1_frame, leap_onsets, pace = define_program()
current_pace = 1
# PHASE 1:
# Assign all the pictures
for beat_i in range(0, 647):
if beat_i == 480:
eligible_pic_ind_at_480 = eligible_pic_ind
if np.mod(beat_i, 10) == 0:
print("Beat {0}/646 ... pic_ind {1}/13564".format(beat_i, eligible_pic_ind))
if beat_i in pace.keys():
current_pace = pace[beat_i]
if beat_i in leap_onsets:
eligible_pic_ind += 50
if beat_i in pic_per_2_beats:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 2)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_4_beats:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 4)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_1_beat:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 1)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_1_frame:
pic_filenames = ["./photos/bd_1000s{0}.jpg".format(itinerary_ids[ep_ind]) for ep_ind in
range(eligible_pic_ind, len(itinerary_ids))]
pace_pic_filenames = [pic_filenames[i] for i in range(0, len(pic_filenames), current_pace)]
range_len = tl.set_continuous_pics_from_beat(pace_pic_filenames, beat_i, beat_i + 1)
eligible_pic_ind = eligible_pic_ind + range_len * current_pace
eligible_pic_ind_at_647 = eligible_pic_ind
# At beat 480, we want to make sure we are on track to finish at the end perfectly.
# So, we record the value of eligible_pic_ind at 480, and again at the end, and then
# re-run the above loop after resetting eligible_pic_ind forward sufficiently.
epis_needed_for_480_onward = eligible_pic_ind_at_647 - eligible_pic_ind_at_480
max_epi = len(itinerary_ids)
id_to_set_at_480 = max_epi - epis_needed_for_480_onward
# >>> eligible_pic_ind_at_480
# 8175
# >>> eligible_pic_ind_at_647
# 10108
# >>> id_to_set_at_480
# 11631
eligible_pic_ind = id_to_set_at_480
for beat_i in range(480, 647):
if np.mod(beat_i, 10) == 0:
print("Beat {0}/646 ... pic_ind {1}/13564".format(beat_i, eligible_pic_ind))
if beat_i in pace.keys():
current_pace = pace[beat_i]
if beat_i in leap_onsets:
eligible_pic_ind += 50
if beat_i in pic_per_2_beats:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 2)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_4_beats:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 4)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_1_beat:
tl.set_pic_to_beat("./photos/bd_1000s{0}.jpg".format(itinerary_ids[eligible_pic_ind]), beat_i, beat_i + 1)
eligible_pic_ind += 1 * current_pace
elif beat_i in pic_per_1_frame:
pic_filenames = ["./photos/bd_1000s{0}.jpg".format(itinerary_ids[ep_ind]) for ep_ind in
range(eligible_pic_ind, len(itinerary_ids))]
pace_pic_filenames = [pic_filenames[i] for i in range(0, len(pic_filenames), current_pace)]
range_len = tl.set_continuous_pics_from_beat(pace_pic_filenames, beat_i, beat_i + 1)
eligible_pic_ind = eligible_pic_ind + range_len * current_pace
# Final steps: make sure all the pics are downloaded,
# copy pictures into proper sequence,
# then make the video using ffmpeg.
itin_bd_copy = download_missing_items_for_timeline(tl, itin_bd, stem="bd_1000s")
tl.copy_images_in_timeline()
tl.script_make_video()
# Preserve output!
itin_bd.to_pickle("new_pickled_filename.p")
|
import time
start = time.time()
m = 1
for i in xrange(1, 101):
m *= i
result = sum(map(int, str(m)))
spend = time.time() - start
print "The result is %s and take time is %f" % (result, spend) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# resources.py
# Author: gfcocos
import os,sys
from xml.etree import ElementTree as ET
import json
import re
import string
#输入
# 资源搜索目录
rs_paths = [
"D:\\DiceApp\\res\\branches\\2014-1-17-91\\ResourcesOutput\\image\\ui_winlost",
"D:\\DiceApp\\res\\branches\\2014-1-17-91\\ResourcesOutput\\image\\winlose"
]
# 生成resource.h的路径和名称
h_file = './resource.h'#'D:\\DiceApp\src\\branches\\client\\2014-1-17-91\\client\\Classes\\ui\\resource.h'#cs_path + "/resource.h"
# 打开输出文档的编辑器
editor_app = 'sublime_text'
dir_list = []
file_list = []
texture_list = []
#资源顺序容器
resources_list = []
#文件路径
search_paths_list = []
#img(jpg,png)容器
imgs_list = []
#plist 容器
plists_list = []
#目录容器
dirpath_filenames_dict = {}
dirpath_plists_dict = {}
#日志文件
logfilename = './tmp/mylog.txt'
if os.path.isfile(logfilename):
os.remove(logfilename)
logfile = open(logfilename,'w');
# 初始化方法
def init():
# 填充所有目录和文件名称
# rs_path = "../resource"
# eg:
# dirpaths =
# ../resource
# ../resource/config
# ../resource/data
# ../resource/font
# ../resource/image
# ../resource/image/areawindow
# ../resource/image/areawindow/areawindow_ani
# ../resource/image/areawindow/areawindow_ui
# dirnames =
# config
# data
# font
# image
# filenames =
# ../resource/a.png
# ../resource/b.png
for rs_path in rs_paths:
for dirpath, dirnames, filenames in os.walk(rs_path):
dirpath = dirpath.replace('\\','/')
search_paths_list.append(dirpath) #资源搜索目录
for filename in filenames:
resources_list.append(filename)
#for dirname in dirnames:
# debug = ' '#print dirname
if dirpath not in dirpath_filenames_dict:
tmp = []
tmp_plist = []
for filename in filenames:
tmp.append(filename)
name, ext = os.path.splitext(filename)
if ext == '.plist':
tmp_plist.append(dirpath+'/'+filename)
dirpath_filenames_dict[dirpath] = tmp #dirpath -> filenames
if len(tmp_plist):
dirpath_plists_dict[dirpath] = tmp_plist #dirpath -> plists
#输出字典容器
#for key in dirpath_filenames_dict:
# debug = ' '#print key
# for filename in dirpath_filenames_dict[key]:
# debug = ' '#print filename
for key in dirpath_plists_dict:
if len(dirpath_plists_dict[key]):
for plist in dirpath_plists_dict[key]:
debug = ' '#print plist
# 资源分流放入img_list,plists_list
for res in resources_list:
name, ext = os.path.splitext(res)
container = getListByExt(ext)
if container != None:
container.append(res)
# debug
logfile.write ("======================resouces file :========================\n")
for resource in resources_list:
logfile.write(resource+"\n")
logfile.write ("======================img_lists:========================\n")
for img in imgs_list:
logfile.write(img+"\n")
logfile.write ("======================plists_lists:========================\n")
for plist in plists_list:
logfile.write(plist+"\n")
logfile.write("%s\n" % "======================search_paths_list:========================")
for search_path in search_paths_list:
logfile.write("%s\n" % search_path)
# 根据扩展名称返回缩写
def getPreByExt(ext):
if ext.find('.') >= 0:
ext = ext[1:]
ext_dict = {
"png": "i",
"jpg": "i",
"plist": "p",
"ttf": "t",
"gif": "i",
"db": "d",
"json": "js",
'atlas': 'a'
}
if len(ext) > 0:
if ext in ext_dict:
return ext_dict[ext]
else:
return ""
else:
return ""
def getListByExt(ext):
if ext.find('.') >= 0:
ext = ext[1:]
list_dict = {
"png":imgs_list,
"plist":plists_list
}
if len(ext) > 0:
if ext in list_dict:
return list_dict[ext]
else:
#debug = ' '#print "cannt find the path"
return
else:
#debug = ' '#print "ext len <= 0"
return
def generate_h():
if os.path.isfile(h_file):
os.remove(h_file)
f = file(h_file, 'w')
f.write("#ifndef _AUTO_RESOURCES_H_\n")
f.write("#define _AUTO_RESOURCES_H_\n\n")
# 生成查找目录
#f.write("/* cocos查找目录 \n")
#f.write("static const std::vector<std::string> searchPaths = {\n")
#for dirname in search_paths_list:
# name = dirname[len(rs_path) + 1:]
# if len(name) > 0:
# f.write('\t"%s",\n' % name )
#f.write("};\n\n*/\n")
#求最大长度
max_length = 0
for filename in resources_list:
if max_length < len(filename):
max_length = len(filename)
#获取最大的长度
res_format = 'static const char s%s_%s[]%s="%s";\n'
# 生成资源定义
f.write("// files\n")
#输出字典容器
for dirpath in dirpath_filenames_dict:
if len(dirpath_filenames_dict[dirpath]):
f.write('\n// '+dirpath+'\n')
f.write('//file\n')
for filename in dirpath_filenames_dict[dirpath]:
# 去路径
filename = os.path.basename(filename)
name, ext = os.path.splitext(filename)
# 去空格
name = name.replace(' ', '')
# 前缀
ext_pre = getPreByExt(ext)
# 空格
spaces = ''
spaces_num = max_length - len(name)
#debug = ' '#print spaces_num
for i in range(spaces_num):
spaces = spaces + ' '
#去前缀
# dirpath
pre = re.sub('.*image','image',dirpath)
name_re = name.replace('-','_')
name_2 = name_re.replace('.','_')
f.write(res_format % (ext_pre,name_2,spaces,pre+'/'+filename))
if dirpath in dirpath_plists_dict:
f.write("//texture\n")
for plist in dirpath_plists_dict[dirpath]:
f.write('//%s\n'%plist)
root = ET.parse(plist).getroot()
tmp_array = {}
varname = ''
for elem in root[0][1]:
if elem.tag == "key":
image = elem.text
name, ext = os.path.splitext(image)
# 去空格
name = name.replace(' ', '')
# 前缀
ext_pre = getPreByExt(ext)
#f.write(res_format % (ext_pre, name, image))
# 空格
spaces = ''
spaces_num = max_length - len(name)
for i in range(spaces_num):
spaces = spaces + ' '
varname = 's%s_%s' % (ext_pre,name)
f.write(res_format % (ext_pre,name,spaces,image))
if re.match('\w+\d+.png',image): #属于动画类型或者连续类型
key = re.sub('\d+.png|_\d+.png$','',image)
if key not in tmp_array:
tmp_array[key] = []
tmp_array[key].append(varname)
#输出数组
for key in tmp_array:
f.write('static const char* sa_%s[]={'% key)
tmp = []
for val in tmp_array[key]:
index = re.sub('[^\d+]','',val)
i = string.atol(index)-1 #不应该使用这种模式
debug = ' '#print i
debug = ' '#print val
tmp.insert(i,val)
debug = ' '#print tmp
for i in range(len(tmp)):
if i%3 == 0:
f.write('\n\t')
f.write(tmp[i]+', ')
f.write('};\n')
#if len():
#for plist in dirpath_plists_dict[dirpath]:
# debug = ' '#print plist
# 获取plist中的纹理
#dirpath_plists_dict[dirpath]
# debug = ' '#print dirpath_plists_dict[dirpath]
#for plist in dirpath_plists_dict[dirpath]:
# debug = ' '#print plist
f.write("\n\n#endif //#ifndef _AUTO_RESOURCES_H_\n")
f.close()
os.system(editor_app+' '+h_file)
init()
generate_h()
|
from django.contrib import admin
# Register your models here.
from .models import Stage, SubStage, Block, Level, ProgressEntry, ProgressInventoryEntry, ProgressEntryMedia, \
ProgressComment
class StageAdmin(admin.ModelAdmin):
search_fields = ['project', 'title']
list_display = ('title', 'created_at', 'updated_at', 'project')
fields = ('title', 'project')
class SubStageAdmin(admin.ModelAdmin):
search_fields = ['project']
list_display = ('title', 'stage', 'created_at', 'updated_at')
fields = ('stage', 'title', 'inventory')
# list_filter = ('user',)
class BlockAdmin(admin.ModelAdmin):
search_fields = ['project']
list_display = ('title', 'created_at', 'updated_at', 'sub_stage')
fields = ('title', 'sub_stage')
class LevelAdmin(admin.ModelAdmin):
search_fields = ['project']
list_display = ('title', 'created_at', 'updated_at', 'block')
fields = ('title', 'block')
class ProgressEntryAdmin(admin.ModelAdmin):
search_fields = ['block']
list_display = ('block', 'is_approved')
fields = ('block', 'is_approved')
list_filter = ('is_approved',)
class ProgressInventoryEntryAdmin(admin.ModelAdmin):
search_fields = ['progress_entry', 'inventory']
list_display = ('progress_entry', 'inventory', 'quantity', 'last_updated', 'created_at', 'created_by')
fields = ('progress_entry', 'inventory', 'quantity')
readonly_fields = ('created_by',)
# list_filter = ('is_approved',)
class ProgressEntryMediaAdmin(admin.ModelAdmin):
search_fields = ['progress_entry']
list_display = ('media', 'progress_entry')
fields = ('media', 'progress_entry')
# list_filter = ('is_approved',)
class ProgressCommentAdmin(admin.ModelAdmin):
search_fields = ['progress_entry', 'inventory']
list_display = ('progress_entry', 'comment', 'last_updated', 'created_at', 'created_by')
readonly_fields = ('created_by',)
fields = ('progress_entry', 'comment')
admin.site.register(Stage, StageAdmin)
admin.site.register(SubStage, SubStageAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(Level, LevelAdmin)
admin.site.register(ProgressEntry, ProgressEntryAdmin)
admin.site.register(ProgressInventoryEntry, ProgressInventoryEntryAdmin)
admin.site.register(ProgressEntryMedia, ProgressEntryMediaAdmin)
admin.site.register(ProgressComment, ProgressCommentAdmin)
|
kmrodado = float(input('Insira a quantidade de KM percorrido: '))
diasaluguel = float(input('Insira a quantidade de dias de aluguel: '))
totalkm = kmrodado*0.15
totaldias = diasaluguel*60
totalgeral = totalkm+totaldias
print(' o seu carro foi alugado por {:.0f} dias e rodou por {} Quilômetros o total a ser pago pelo aluguel é de R$: {:.2f}'.format(diasaluguel, kmrodado, totalgeral )) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.dispatch.dispatcher import receiver
from django.db.models.signals import post_save
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import RegexValidator
from django.core.validators import URLValidator
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.conf import settings
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
""" Automatically creates a token to all created users """
if created:
Token.objects.create(user=instance)
@python_2_unicode_compatible
class Device(models.Model):
ANDROID = 'AND'
IOS = 'IOS'
WINDOWS_PHONE = 'WPH'
DEVICE_PLATFORMS = (
(ANDROID, 'Android'),
(IOS, 'iOS'),
(WINDOWS_PHONE, 'Windows Phone')
)
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='User')
platform = models.CharField(max_length=3, choices=DEVICE_PLATFORMS, default=ANDROID, verbose_name='Platform')
device_identifier = models.CharField(max_length=64, unique=True, verbose_name='Identifier')
def __str__(self):
return '%s' % (self.device_identifier)
class Meta:
verbose_name = 'User device'
verbose_name_plural = 'User devices'
@python_2_unicode_compatible
class Team(models.Model):
EASTERN_CONFERENCE = 'EC'
WESTERN_CONFERENCE ='WC'
CONFERENCES = (
(EASTERN_CONFERENCE, 'EASTERN'),
(WESTERN_CONFERENCE, 'WESTERN')
)
name = models.CharField(max_length=64, verbose_name='Name')
conference = models.CharField(max_length=2, choices=CONFERENCES, default=EASTERN_CONFERENCE, verbose_name='Conference')
arena = models.CharField(max_length=64, verbose_name='Arena')
foundation = models.DateField(verbose_name='Foundation')
about_history = models.TextField(verbose_name='About/History')
flag = models.TextField(validators=[URLValidator()], verbose_name='Flag')
def thumbnail(self):
return '<img src="%s" style="max-width: 40px; max-height: 40px;">' % self.flag
thumbnail.allow_tags = True
thumbnail.short_description = 'Thumbnail'
def __str__(self):
return '%s' % (self.name)
class Meta:
verbose_name = 'Team'
verbose_name_plural = 'Teams'
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from cnn import CNN
import os
import librosa
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "data/ckpt",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("input_files", "",
"Location/file pattern of audio file(s) to classify.")
def print_audio_class(audio_class, sess):
argmax = tf.argmax(audio_class, 1)
#print(img_class)
if tf.equal(argmax, tf.argmax([[1,0,0]], 1)).eval(session=sess):
print('Audio is of a bee')
elif tf.equal(argmax, tf.argmax([[0,1,0]], 1)).eval(session=sess):
print('Audio is of a cricket')
else:
print('Audio is of noise')
model = CNN(32768, 3)
with tf.Session() as sess:
# 1. Build and Restore Model
model.build()
sess.run(tf.global_variables_initializer())
model.saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_path))
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
print("Running classification on %d files matching %s" % (
len(filenames), FLAGS.input_files))
for filename in filenames:
# 2. Load and Pre-process Audio
audio_arr, sr = librosa.load(filename)
# 3. Classify and Print result
audio_class = sess.run(model.logits,feed_dict={model.x: [audio_arr[:32768]], model.keep_prob: 1.0})
print_audio_class(audio_class, sess)
|
from __future__ import print_function, unicode_literals, division
import re
def remove_newlines(s):
p = re.compile("[\n|\r\n|\n\r]")
s = re.sub(p, " ", s)
s = remove_extraneous_whitespace(s)
return s
def remove_extraneous_whitespace(s):
p = re.compile("(\s+)")
s = re.sub(p, " ", s)
return s
def cleanup(s):
return remove_newlines(s)
|
from mazel.runtimes import GoRuntime
from .utils import RuntimeTestCase
class JavascriptRuntimeTest(RuntimeTestCase):
runtime_cls = GoRuntime
def test_runtime_label(self):
runtime = self.make_runtime()
self.assertEqual(runtime.runtime_label, "go")
def test_workspace_dependencies(self):
runtime = self.make_runtime()
# TODO Actually implement
self.assertEqual(list(runtime.workspace_dependencies()), [])
|
import os
import day18_part1, day18_part2
def test_part1_example():
input = """set a 1
add a 2
mul a a
mod a 5
snd a
set a 0
rcv a
jgz a -1
set a 1
jgz a -2""".splitlines()
assert day18_part1.solve(input) == 4
def test_part1():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input = open("day18_input.txt").read().splitlines()
assert day18_part1.solve(input) == 9423
def test_part2_example():
input = """snd 1
snd 2
snd p
rcv a
rcv b
rcv c
rcv d""".splitlines()
assert day18_part2.solve(input) == 3
def test_part2():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input = open("day18_input.txt").read().splitlines()
assert day18_part2.solve(input) == 7620
|
import pandas as pd
import math
import itertools
from bokeh.palettes import Dark2_5 as palette
def parse_excel_data(directory, sheet_name):
ds = pd.ExcelFile(directory)
ds_df = ds.parse(sheet_name)
return ds_df
def extract_column_names(df):
return list(df)
def extract_row_names(df):
return list(df.index)
#parameters: dataframe, countries list, years list, and dataset's name
def convert_df2json(df, countries, years, ds_name):
data_json_list = []
colors = itertools.cycle(palette)
i = 0
for country, color in zip(countries, colors):
# add the extracted json objects into data_json_list
data_json_list.append(extract_data(i, country, color, df, years, ds_name))
i += 1
return data_json_list
# remove the datasets without values, and return the result in json format.
def extract_data(country_indice, country, color, df, years, ds_name):
tmp_years = []
tmp_value = []
# extract all years without data.
# extract all not nan ages.
i = 0
for x in list(df.iloc[country_indice, :]):
if math.isnan(x) != True:
tmp_years.append(years[i])
tmp_value.append(x)
i += 1
tmp_json = {"country": country, "years": tmp_years, ds_name: tmp_value, "color": color}
return tmp_json
def extract_column_by_indice(df, indice):
return list(df.iloc[:, indice]) |
#!/usr/bin/python
#\file classification1.py
#\brief Chainer for 2-D --> Multi-class classification test.
# Based on regression4a.py
# Search DIFF_REG for the difference.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date May.16, 2017
import random,math,copy
#Float version of range
def FRange1(x1,x2,num_div):
return [x1+(x2-x1)*x/float(num_div) for x in range(num_div+1)]
def Rand(xmin=-0.5,xmax=0.5):
return random.random()*(xmax-xmin)+xmin
def Median(array):
if len(array)==0: return None
a_sorted= copy.deepcopy(array)
a_sorted.sort()
return a_sorted[len(a_sorted)/2]
def LoadData():
#NOTE: we use only [x0,x1] (delete [:2] to use all x)
data_x= [map(float,d.split()[1:3]) for d in open('data/iris_x.dat','r').read().split('\n') if d!='']
data_y= [float(d) for d in open('data/iris_y.dat','r').read().split('\n') if d!=''] #DIFF_REG
return data_x, data_y
#Return min, max, median vectors of data.
def GetStat(data):
mi= [min([x[d] for x in data]) for d in range(len(data[0]))]
ma= [max([x[d] for x in data]) for d in range(len(data[0]))]
me= [Median([x[d] for x in data]) for d in range(len(data[0]))]
return mi,ma,me
#Dump data with dimension reduction f_reduce.
#Each row of dumped data: reduced x, original x, original y
def DumpData(file_name, data_x, data_y, f_reduce, lb=0):
fp1= file(file_name,'w')
for x,y,i in zip(data_x,data_y,range(len(data_y))):
if lb>0 and i%lb==0: fp1.write('\n')
fp1.write('%s %s %s\n' % (' '.join(map(str,f_reduce(x))), ' '.join(map(str,x)), ' '.join(map(str,y))))
fp1.close()
NEpoch= 500 #TEST
def Main():
import argparse
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
parser = argparse.ArgumentParser(description='Chainer example: regression')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
batchsize = 10
n_epoch = NEpoch
n_units = 300 #TEST
# Prepare dataset
data_x, data_y = LoadData()
batchsize= max(1,min(batchsize, len(data_y)/20)) #TEST: adjust batchsize
#dx2,dy2=GenData(300, noise=0.0); data_x.extend(dx2); data_y.extend(dy2)
data = np.array(data_x).astype(np.float32)
target = np.array(data_y).astype(np.int32) #DIFF_REG
N= len(data) #batchsize * 30
x_train= data
y_train= target
#For test:
mi,ma,me= GetStat(data_x)
f_reduce=lambda xa:[xa[0],xa[1]]
f_repair=lambda xa:[xa[0],xa[1]]
nt= 20+1
N_test= nt*nt
x_test= np.array(sum([[f_repair([x1,x2]) for x2 in FRange1(f_reduce(mi)[1],f_reduce(ma)[1],nt)] for x1 in FRange1(f_reduce(mi)[0],f_reduce(ma)[0],nt)],[])).astype(np.float32)
y_test= np.array([0.0 for x in x_test]).astype(np.int32) #DIFF_REG
#No true test data (just for plotting)
print 'Num of samples for train:',len(y_train),'batchsize:',batchsize
# Dump data for plot:
DumpData('/tmp/nn/smpl_train.dat', x_train, [[y] for y in y_train], f_reduce) #DIFF_REG
# Prepare multi-layer perceptron model
model = FunctionSet(l1=F.Linear(2, n_units),
l2=F.Linear(n_units, n_units),
l3=F.Linear(n_units, 3))
#TEST: Random bias initialization
#, bias=Rand()
#model.l1.b[:]= [Rand() for k in range(n_units)]
#model.l2.b[:]= [Rand() for k in range(n_units)]
#model.l3.b[:]= [Rand() for k in range(1)]
#print model.l2.__dict__
if args.gpu >= 0:
cuda.init(args.gpu)
model.to_gpu()
# Neural net architecture
def forward(x_data, y_data, train=True):
#train= False #TEST: Turn off dropout
dratio= 0.2 #0.5 #TEST: Dropout ratio
x, t = Variable(x_data), Variable(y_data)
h1 = F.dropout(F.relu(model.l1(x)), ratio=dratio, train=train)
h2 = F.dropout(F.relu(model.l2(h1)), ratio=dratio, train=train)
#h1 = F.dropout(F.leaky_relu(model.l1(x),slope=0.2), ratio=dratio, train=train)
#h2 = F.dropout(F.leaky_relu(model.l2(h1),slope=0.2), ratio=dratio, train=train)
#h1 = F.dropout(F.sigmoid(model.l1(x)), ratio=dratio, train=train)
#h2 = F.dropout(F.sigmoid(model.l2(h1)), ratio=dratio, train=train)
#h1 = F.dropout(F.tanh(model.l1(x)), ratio=dratio, train=train)
#h2 = F.dropout(F.tanh(model.l2(h1)), ratio=dratio, train=train)
#h1 = F.dropout(model.l1(x), ratio=dratio, train=train)
#h2 = F.dropout(model.l2(h1), ratio=dratio, train=train)
#h1 = F.relu(model.l1(x))
#h2 = F.relu(model.l2(h1))
#h1 = model.l1(x)
#h2 = model.l2(h1)
y = model.l3(h2)
#return F.mean_squared_error(y, t), y
return F.softmax_cross_entropy(y, t), F.softmax(y) #DIFF_REG
# Setup optimizer
optimizer = optimizers.AdaDelta(rho=0.9)
#optimizer = optimizers.AdaGrad(lr=0.5)
#optimizer = optimizers.RMSprop()
#optimizer = optimizers.MomentumSGD()
#optimizer = optimizers.SGD(lr=0.8)
optimizer.setup(model.collect_parameters())
# Learning loop
for epoch in xrange(1, n_epoch+1):
print 'epoch', epoch
# training
perm = np.random.permutation(N)
sum_loss = 0
for i in xrange(0, N, batchsize):
x_batch = x_train[perm[i:i+batchsize]]
y_batch = y_train[perm[i:i+batchsize]]
if args.gpu >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
optimizer.zero_grads()
loss, pred = forward(x_batch, y_batch)
loss.backward() #Computing gradients
optimizer.update()
sum_loss += float(cuda.to_cpu(loss.data)) * batchsize
print 'train mean loss={}'.format(
sum_loss / N)
if epoch%10==0:
#'''
# testing all data
preds = []
x_batch = x_test[:]
y_batch = y_test[:]
if args.gpu >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
loss, pred = forward(x_batch, y_batch, train=False)
preds = cuda.to_cpu(pred.data)
sum_loss = float(cuda.to_cpu(loss.data)) * len(y_test)
#'''
print 'test mean loss={}'.format(
sum_loss / N_test)
# Dump data for plot:
y_pred= [[y.index(max(y))]+y for y in preds.tolist()] #DIFF_REG
DumpData('/tmp/nn/nn_test%04i.dat'%epoch, x_test, y_pred, f_reduce, lb=nt+1)
def PlotGraphs():
print 'Plotting graphs..'
import os,sys
opt= sys.argv[2:]
commands=[
'''qplot -x2 aaa {opt}
-s 'set xlabel "x1";set ylabel "x2";set title "";'
-s 'set encoding utf8;symbol(z)="+xo%#"[int(z):int(z)];'
/tmp/nn/nn_test{NEpoch:04d}.dat u 1:2:'(symbol($5+1))' w labels textcolor lt 3 t '"Final({NEpoch}) epoch"'
/tmp/nn/smpl_train.dat u 1:2:'(symbol($5+1))' w labels textcolor lt 1 t '"sample"' &''',
#/tmp/nn/lwr/f1_3_est.dat w l lw 1 t '"LWR"'
#/tmp/nn/nn_test0001.dat w l t '"1st epoch"'
#/tmp/nn/nn_test0005.dat w l t '"5th epoch"'
#/tmp/nn/nn_test0020.dat w l t '"20th epoch"'
#/tmp/nn/nn_test0050.dat w l t '"50th epoch"'
#/tmp/nn/nn_test0075.dat w l t '"75th epoch"'
#/tmp/nn/nn_test0099.dat w l t '"99th epoch"'
'''qplot -x2 aaa {opt} -3d
-s 'set xlabel "x1";set ylabel "x2";set title "Class 0";'
-s 'set pm3d;unset surface;set view map;'
-s 'set encoding utf8;symbol(z)="+xo%#"[int(z):int(z)];'
/tmp/nn/nn_test{NEpoch:04d}.dat u 1:2:6 t '""'
/tmp/nn/smpl_train.dat u 1:2:'(0.0)':'(symbol($5+1))' w labels textcolor lt 1 t '"sample"' &''',
'''qplot -x2 aaa {opt} -3d
-s 'set xlabel "x1";set ylabel "x2";set title "Class 1";'
-s 'set encoding utf8;symbol(z)="+xo%#"[int(z):int(z)];'
-s 'set pm3d;unset surface;set view map;'
/tmp/nn/nn_test{NEpoch:04d}.dat u 1:2:7 t '""'
/tmp/nn/smpl_train.dat u 1:2:'(0.0)':'(symbol($5+1))' w labels textcolor lt 1 t '"sample"' &''',
'''qplot -x2 aaa {opt} -3d
-s 'set xlabel "x1";set ylabel "x2";set title "Class 2";'
-s 'set pm3d;unset surface;set view map;'
-s 'set encoding utf8;symbol(z)="+xo%#"[int(z):int(z)];'
/tmp/nn/nn_test{NEpoch:04d}.dat u 1:2:8 t '""'
/tmp/nn/smpl_train.dat u 1:2:'(0.0)':'(symbol($5+1))' w labels textcolor lt 1 t '"sample"' &''',
'''''',
]
for cmd in commands:
if cmd!='':
cmd= ' '.join(cmd.format(opt=' '.join(opt),NEpoch=NEpoch).splitlines())
print '###',cmd
os.system(cmd)
print '##########################'
print '###Press enter to close###'
print '##########################'
raw_input()
os.system('qplot -x2kill aaa')
if __name__=='__main__':
import sys
if len(sys.argv)>1 and sys.argv[1] in ('p','plot','Plot','PLOT'):
PlotGraphs()
sys.exit(0)
Main()
|
'''
Created on Dec 18, 2013
@author: anbangx
'''
class C:
def __init__(self,los):
self.los = los
def __len__(self):
print('Calling len')
return sum((len(i) for i in self.los))
def __bool__(self):
return ''.join(self.los) != 'False'
if __name__ == '__main__':
x = C(['ab','cd'])
if x:
print('pass')
x = C(['Fa', 'l', 'se'])
if x:
print('pass') |
from random import randint
maria_score = 1001
ivan_score = 1001
winner_found = False
while not winner_found:
tempSumMaria = 0
tempSumIvan = 0
diceCountMaria = 5
diceCountIvan = 5
while diceCountMaria > 0:
roll = randint(1, 6)
tempSumMaria += roll
diceCountMaria -= 1
if maria_score > 0:
maria_score -= tempSumMaria
print("Maria: current roll sum = {}, current score = {}".format(tempSumMaria, maria_score))
elif maria_score < 0:
maria_score += tempSumMaria
print("Maria: current roll sum = {}, current score = {}".format(tempSumMaria, maria_score))
if maria_score == 0:
print("Maria wins!")
winner_found = True
break
while diceCountIvan > 0:
roll = randint(1, 6)
tempSumIvan += roll
diceCountIvan -= 1
if ivan_score > 0:
ivan_score -= tempSumIvan
print("Ivan: current roll sum = {}, current score = {}".format(tempSumIvan, ivan_score))
elif ivan_score < 0:
ivan_score += tempSumIvan
print("Ivan: current roll sum = {}, current score = {}".format(tempSumIvan, ivan_score))
if ivan_score == 0:
print("Ivan wins!")
winner_found = True
break |
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self, AI_game):
super().__init__()
self.screen = AI_game.screen
self.setting = AI_game.setting
# Loading an image
self.image=pygame.image.load("laserRed01.png").convert_alpha()
self.image_rect=self.image.get_rect()
# Bullet start line
self.rect = self.image_rect
self.rect.midbottom = AI_game.ship.rect.midtop
self.y = float(self.rect.y)
def update(self):
# Direction of the bullet
self.y -= self.setting.bullet_speed
self.rect.y = self.y
def draw_bullet(self):
self.screen.blit(self.image, self.rect)
|
#!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../")))
import asyncio
import conf
import contextlib
from decimal import Decimal
import logging
import os
import time
from typing import (
List,
Optional
)
import unittest
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.order_book_tracker import OrderBookTrackerDataSourceType
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
MarketEvent,
WalletEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
BuyOrderCreatedEvent,
SellOrderCreatedEvent,
WalletWrappedEthEvent,
WalletUnwrappedEthEvent,
OrderCancelledEvent,
OrderExpiredEvent,
OrderFilledEvent,
TradeType,
TradeFee,
)
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
from hummingbot.logger import NETWORK
from hummingbot.connector.exchange.bamboo_relay.bamboo_relay_market import BambooRelayExchange
from hummingbot.core.event.events import OrderType
from hummingbot.connector.markets_recorder import MarketsRecorder
from hummingbot.model.market_state import MarketState
from hummingbot.model.order import Order
from hummingbot.model.sql_connection_manager import (
SQLConnectionManager,
SQLConnectionType
)
from hummingbot.model.trade_fill import TradeFill
from hummingbot.wallet.ethereum.web3_wallet import Web3Wallet
from hummingbot.wallet.ethereum.web3_wallet_backend import EthereumChain
s_decimal_0 = Decimal(0)
class BambooRelayExchangeUncoordinatedUnitTest(unittest.TestCase):
market_events: List[MarketEvent] = [
MarketEvent.BuyOrderCompleted,
MarketEvent.SellOrderCompleted,
MarketEvent.BuyOrderCreated,
MarketEvent.SellOrderCreated,
MarketEvent.OrderCancelled,
MarketEvent.OrderExpired,
MarketEvent.OrderFilled,
]
wallet_events: List[WalletEvent] = [
WalletEvent.WrappedEth,
WalletEvent.UnwrappedEth
]
wallet: Web3Wallet
market: BambooRelayExchange
market_logger: EventLogger
wallet_logger: EventLogger
@classmethod
def setUpClass(cls):
if conf.test_bamboo_relay_chain_id == 3:
chain = EthereumChain.ROPSTEN
elif conf.test_bamboo_relay_chain_id == 4:
chain = EthereumChain.RINKEBY
elif conf.test_bamboo_relay_chain_id == 42:
chain = EthereumChain.KOVAN
elif conf.test_bamboo_relay_chain_id == 1337:
chain = EthereumChain.ZEROEX_TEST
else:
chain = EthereumChain.MAIN_NET
cls.chain = chain
cls.base_token_asset = conf.test_bamboo_relay_base_token_symbol
cls.quote_token_asset = conf.test_bamboo_relay_quote_token_symbol
cls.clock: Clock = Clock(ClockMode.REALTIME)
cls.wallet = Web3Wallet(private_key=conf.web3_private_key_bamboo,
backend_urls=conf.test_web3_provider_list,
erc20_token_addresses=[conf.test_bamboo_relay_base_token_address,
conf.test_bamboo_relay_quote_token_address],
chain=chain)
cls.market: BambooRelayExchange = BambooRelayExchange(
wallet=cls.wallet,
ethereum_rpc_url=conf.test_web3_provider_list[0],
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=[conf.test_bamboo_relay_base_token_symbol + "-" + conf.test_bamboo_relay_quote_token_symbol],
use_coordinator=False,
pre_emptive_soft_cancels=False
)
print("Initializing Bamboo Relay market... ")
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.clock.add_iterator(cls.wallet)
cls.clock.add_iterator(cls.market)
stack = contextlib.ExitStack()
cls._clock = stack.enter_context(cls.clock)
cls.ev_loop.run_until_complete(cls.wait_til_ready())
print("Ready.")
@classmethod
async def wait_til_ready(cls):
while True:
now = time.time()
next_iteration = now // 1.0 + 1
if cls.market.ready:
break
else:
await cls._clock.run_til(next_iteration)
await asyncio.sleep(1.0)
def setUp(self):
self.db_path: str = realpath(join(__file__, "../bamboo_relay_uncordinated_test.sqlite"))
try:
os.unlink(self.db_path)
except FileNotFoundError:
pass
self.market_logger = EventLogger()
self.wallet_logger = EventLogger()
for event_tag in self.market_events:
self.market.add_listener(event_tag, self.market_logger)
for event_tag in self.wallet_events:
self.wallet.add_listener(event_tag, self.wallet_logger)
def tearDown(self):
for event_tag in self.market_events:
self.market.remove_listener(event_tag, self.market_logger)
self.market_logger = None
for event_tag in self.wallet_events:
self.wallet.remove_listener(event_tag, self.wallet_logger)
self.wallet_logger = None
async def run_parallel_async(self, *tasks):
future: asyncio.Future = safe_ensure_future(safe_gather(*tasks))
while not future.done():
now = time.time()
next_iteration = now // 1.0 + 1
await self._clock.run_til(next_iteration)
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def test_get_fee(self):
maker_buy_trade_fee: TradeFee = self.market.get_fee(conf.test_bamboo_relay_base_token_symbol,
conf.test_bamboo_relay_quote_token_symbol,
OrderType.LIMIT,
TradeType.BUY,
Decimal(20),
Decimal(0.01))
self.assertEqual(maker_buy_trade_fee.percent, 0)
self.assertEqual(len(maker_buy_trade_fee.flat_fees), 1)
taker_buy_trade_fee: TradeFee = self.market.get_fee(conf.test_bamboo_relay_base_token_symbol,
conf.test_bamboo_relay_quote_token_symbol,
OrderType.MARKET,
TradeType.BUY,
Decimal(20))
self.assertEqual(taker_buy_trade_fee.percent, 0)
self.assertEqual(len(taker_buy_trade_fee.flat_fees), 1)
self.assertEqual(taker_buy_trade_fee.flat_fees[0][0], "ETH")
def test_get_wallet_balances(self):
balances = self.market.get_all_balances()
self.assertGreaterEqual((balances["ETH"]), s_decimal_0)
self.assertGreaterEqual((balances[self.quote_token_asset]), s_decimal_0)
def test_single_limit_order_cancel(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
current_price: Decimal = self.market.get_price(trading_pair, True)
amount = Decimal("0.001")
expires = int(time.time() + 60 * 3)
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
buy_order_id = self.market.buy(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price - Decimal("0.2") * current_price,
expiration_ts=expires)
[buy_order_opened_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
self.assertEqual(self.base_token_asset + "-" + self.quote_token_asset, buy_order_opened_event.trading_pair)
self.assertEqual(OrderType.LIMIT, buy_order_opened_event.type)
self.assertEqual(float(quantized_amount), float(buy_order_opened_event.amount))
[cancellation_results,
buy_order_cancelled_event] = self.run_parallel(self.market.cancel_order(buy_order_id),
self.market_logger.wait_for(OrderCancelledEvent))
self.assertEqual(buy_order_opened_event.order_id, buy_order_cancelled_event.order_id)
# Reset the logs
self.market_logger.clear()
def test_limit_buy_and_sell_and_cancel_all(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
current_price: Decimal = self.market.get_price(trading_pair, True)
amount = Decimal("0.001")
expires = int(time.time() + 60 * 3)
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
buy_order_id = self.market.buy(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price - Decimal("0.2") * current_price,
expiration_ts=expires)
[buy_order_opened_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
self.assertEqual(buy_order_id, buy_order_opened_event.order_id)
self.assertEqual(float(quantized_amount), float(buy_order_opened_event.amount))
self.assertEqual(self.base_token_asset + "-" + self.quote_token_asset, buy_order_opened_event.trading_pair)
self.assertEqual(OrderType.LIMIT, buy_order_opened_event.type)
# Reset the logs
self.market_logger.clear()
current_price: Decimal = self.market.get_price(trading_pair, False)
sell_order_id = self.market.sell(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price + Decimal("0.2") * current_price,
expiration_ts=expires)
[sell_order_opened_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCreatedEvent))
self.assertEqual(sell_order_id, sell_order_opened_event.order_id)
self.assertEqual(float(quantized_amount), float(sell_order_opened_event.amount))
self.assertEqual(self.base_token_asset + "-" + self.quote_token_asset, sell_order_opened_event.trading_pair)
self.assertEqual(OrderType.LIMIT, sell_order_opened_event.type)
[cancellation_results, order_cancelled_event] = self.run_parallel(self.market.cancel_all(60 * 3),
self.market_logger.wait_for(OrderCancelledEvent))
is_buy_cancelled = False
is_sell_cancelled = False
for cancellation_result in cancellation_results:
if cancellation_result == CancellationResult(buy_order_id, True):
is_buy_cancelled = True
if cancellation_result == CancellationResult(sell_order_id, True):
is_sell_cancelled = True
self.assertEqual(is_buy_cancelled, True)
self.assertEqual(is_sell_cancelled, True)
# Wait for the order book source to also register the cancellation
self.assertTrue((buy_order_opened_event.order_id == order_cancelled_event.order_id or
sell_order_opened_event.order_id == order_cancelled_event.order_id))
# Reset the logs
self.market_logger.clear()
def test_order_expire(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
current_price: Decimal = self.market.get_price(trading_pair, True)
amount = Decimal("0.003")
expires = int(time.time() + 60) # expires in 1 min
self.market.buy(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price - Decimal("0.2") * current_price,
expiration_ts=expires)
[buy_order_opened_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
self.assertEqual(self.base_token_asset + "-" + self.quote_token_asset, buy_order_opened_event.trading_pair)
self.assertEqual(OrderType.LIMIT, buy_order_opened_event.type)
[buy_order_expired_event] = self.run_parallel(self.market_logger.wait_for(OrderExpiredEvent, 75))
self.assertEqual(buy_order_opened_event.order_id, buy_order_expired_event.order_id)
# Reset the logs
self.market_logger.clear()
def test_market_buy(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
amount = Decimal("0.002")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
order_id = self.market.buy(self.base_token_asset + "-" + self.quote_token_asset, amount, OrderType.MARKET)
[order_completed_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCompletedEvent))
order_completed_event: BuyOrderCompletedEvent = order_completed_event
order_filled_events: List[OrderFilledEvent] = [t for t in self.market_logger.event_log
if isinstance(t, OrderFilledEvent)]
self.assertTrue([evt.order_type == OrderType.MARKET for evt in order_filled_events])
self.assertEqual(order_id, order_completed_event.order_id)
self.assertEqual(float(quantized_amount), float(order_completed_event.base_asset_amount))
self.assertEqual(self.base_token_asset, order_completed_event.base_asset)
self.assertEqual(self.quote_token_asset, order_completed_event.quote_asset)
self.market_logger.clear()
def test_batch_market_buy(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
amount = Decimal("0.002")
current_buy_price: Decimal = self.market.get_price(trading_pair, True)
current_sell_price: Decimal = self.market.get_price(trading_pair, False)
current_price: Decimal = current_sell_price - (current_sell_price - current_buy_price) / 2
expires = int(time.time() + 60 * 3)
self.market.sell(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price,
expiration_ts=expires)
self.run_parallel(self.market_logger.wait_for(SellOrderCreatedEvent))
amount = Decimal("0.004")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
order_id = self.market.buy(self.base_token_asset + "-" + self.quote_token_asset, amount, OrderType.MARKET)
[order_completed_event,
_] = self.run_parallel(self.market_logger.wait_for(BuyOrderCompletedEvent),
self.market_logger.wait_for(SellOrderCompletedEvent))
order_completed_event: BuyOrderCompletedEvent = order_completed_event
order_filled_events: List[OrderFilledEvent] = [t for t in self.market_logger.event_log
if isinstance(t, OrderFilledEvent)]
self.assertTrue([evt.order_type == OrderType.MARKET for evt in order_filled_events])
self.assertEqual(order_id, order_completed_event.order_id)
self.assertEqual(float(quantized_amount), float(order_completed_event.base_asset_amount))
self.assertEqual(self.base_token_asset, order_completed_event.base_asset)
self.assertEqual(self.quote_token_asset, order_completed_event.quote_asset)
self.market_logger.clear()
def test_market_sell(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
amount = Decimal("0.001")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
order_id = self.market.sell(trading_pair, amount, OrderType.MARKET)
[order_completed_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCompletedEvent))
order_completed_event: SellOrderCompletedEvent = order_completed_event
order_filled_events: List[OrderFilledEvent] = [t for t in self.market_logger.event_log
if isinstance(t, OrderFilledEvent)]
self.assertTrue([evt.order_type == OrderType.MARKET for evt in order_filled_events])
self.assertEqual(order_id, order_completed_event.order_id)
self.assertEqual(float(quantized_amount), float(order_completed_event.base_asset_amount))
self.assertEqual(self.base_token_asset, order_completed_event.base_asset)
self.assertEqual(self.quote_token_asset, order_completed_event.quote_asset)
self.market_logger.clear()
def test_batch_market_sell(self):
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
amount = Decimal("0.002")
current_buy_price: Decimal = self.market.get_price(trading_pair, True)
current_sell_price: Decimal = self.market.get_price(trading_pair, False)
current_price: Decimal = current_buy_price + (current_sell_price - current_buy_price) / 2
expires = int(time.time() + 60 * 3)
self.market.buy(trading_pair=trading_pair,
amount=amount,
order_type=OrderType.LIMIT,
price=current_price,
expiration_ts=expires)
self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
amount = Decimal("0.005")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
order_id = self.market.sell(self.base_token_asset + "-" + self.quote_token_asset, amount, OrderType.MARKET)
[order_completed_event, _] = self.run_parallel(self.market_logger.wait_for(SellOrderCompletedEvent),
self.market_logger.wait_for(BuyOrderCompletedEvent))
order_completed_event: BuyOrderCompletedEvent = order_completed_event
order_filled_events: List[OrderFilledEvent] = [t for t in self.market_logger.event_log
if isinstance(t, OrderFilledEvent)]
self.assertTrue([evt.order_type == OrderType.MARKET for evt in order_filled_events])
self.assertEqual(order_id, order_completed_event.order_id)
self.assertEqual(float(quantized_amount), float(order_completed_event.base_asset_amount))
self.assertEqual(self.base_token_asset, order_completed_event.base_asset)
self.assertEqual(self.quote_token_asset, order_completed_event.quote_asset)
self.market_logger.clear()
def test_wrap_eth(self):
amount_to_wrap = Decimal("0.01")
tx_hash = self.wallet.wrap_eth(amount_to_wrap)
[tx_completed_event] = self.run_parallel(self.wallet_logger.wait_for(WalletWrappedEthEvent))
tx_completed_event: WalletWrappedEthEvent = tx_completed_event
self.assertEqual(tx_hash, tx_completed_event.tx_hash)
self.assertEqual(float(amount_to_wrap), float(tx_completed_event.amount))
self.assertEqual(self.wallet.address, tx_completed_event.address)
def test_unwrap_eth(self):
amount_to_unwrap = Decimal("0.01")
tx_hash = self.wallet.unwrap_eth(amount_to_unwrap)
[tx_completed_event] = self.run_parallel(self.wallet_logger.wait_for(WalletUnwrappedEthEvent))
tx_completed_event: WalletUnwrappedEthEvent = tx_completed_event
self.assertEqual(tx_hash, tx_completed_event.tx_hash)
self.assertEqual(float(amount_to_unwrap), float(tx_completed_event.amount))
self.assertEqual(self.wallet.address, tx_completed_event.address)
def test_z_orders_saving_and_restoration(self):
self.market.reset_state()
config_path: str = "test_config"
strategy_name: str = "test_strategy"
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
sql: SQLConnectionManager = SQLConnectionManager(SQLConnectionType.TRADE_FILLS, db_path=self.db_path)
order_id: Optional[str] = None
recorder: MarketsRecorder = MarketsRecorder(sql, [self.market], config_path, strategy_name)
recorder.start()
try:
self.assertEqual(0, len(self.market.tracking_states["limit_orders"]))
# Try to put limit buy order for 0.05 Quote Token worth of Base Token, and watch for order creation event.
current_bid_price: Decimal = self.market.get_price(trading_pair, True)
bid_price: Decimal = current_bid_price * Decimal("0.8")
quantize_bid_price: Decimal = self.market.quantize_order_price(trading_pair, bid_price)
amount: Decimal = Decimal("0.005") / bid_price
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
expires = int(time.time() + 60 * 3)
order_id = self.market.buy(trading_pair, quantized_amount, OrderType.LIMIT, quantize_bid_price,
expiration_ts=expires)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
order_created_event: BuyOrderCreatedEvent = order_created_event
self.assertEqual(order_id, order_created_event.order_id)
# Verify tracking states
self.assertEqual(1, len(self.market.tracking_states["limit_orders"]))
self.assertEqual(order_id, list(self.market.tracking_states["limit_orders"].keys())[0])
# Verify orders from recorder
recorded_orders: List[Order] = recorder.get_orders_for_config_and_market(config_path, self.market)
self.assertEqual(1, len(recorded_orders))
self.assertEqual(order_id, recorded_orders[0].id)
# Verify saved market states
saved_market_states: MarketState = recorder.get_market_states(config_path, self.market)
self.assertIsNotNone(saved_market_states)
self.assertIsInstance(saved_market_states.saved_state, dict)
self.assertIsInstance(saved_market_states.saved_state["limit_orders"], dict)
self.assertGreater(len(saved_market_states.saved_state["limit_orders"]), 0)
# Close out the current market and start another market.
self.clock.remove_iterator(self.market)
for event_tag in self.market_events:
self.market.remove_listener(event_tag, self.market_logger)
self.market: BambooRelayExchange = BambooRelayExchange(
wallet=self.wallet,
ethereum_rpc_url=conf.test_web3_provider_list[0],
trading_pairs=[self.base_token_asset + "-" + self.quote_token_asset],
use_coordinator=False,
pre_emptive_soft_cancels=False
)
for event_tag in self.market_events:
self.market.add_listener(event_tag, self.market_logger)
recorder.stop()
recorder = MarketsRecorder(sql, [self.market], config_path, strategy_name)
recorder.start()
saved_market_states = recorder.get_market_states(config_path, self.market)
self.clock.add_iterator(self.market)
self.assertEqual(0, len(self.market.limit_orders))
self.assertEqual(0, len(self.market.tracking_states["limit_orders"]))
self.market.restore_tracking_states(saved_market_states.saved_state)
self.assertEqual(1, len(self.market.limit_orders))
self.assertEqual(1, len(self.market.tracking_states["limit_orders"]))
# Cancel the order and verify that the change is saved.
self.run_parallel(self.market.cancel(trading_pair, order_id),
self.market_logger.wait_for(OrderCancelledEvent))
order_id = None
self.assertEqual(0, len(self.market.limit_orders))
self.assertEqual(1, len(self.market.tracking_states["limit_orders"]))
saved_market_states = recorder.get_market_states(config_path, self.market)
self.assertEqual(1, len(saved_market_states.saved_state["limit_orders"]))
finally:
if order_id is not None:
self.run_parallel(self.market.cancel(trading_pair, order_id),
self.market_logger.wait_for(OrderCancelledEvent))
recorder.stop()
os.unlink(self.db_path)
def test_order_fill_record(self):
config_path: str = "test_config"
strategy_name: str = "test_strategy"
trading_pair: str = self.base_token_asset + "-" + self.quote_token_asset
sql: SQLConnectionManager = SQLConnectionManager(SQLConnectionType.TRADE_FILLS, db_path=self.db_path)
order_id: Optional[str] = None
recorder: MarketsRecorder = MarketsRecorder(sql, [self.market], config_path, strategy_name)
recorder.start()
try:
# Try to buy 0.05 ETH worth of ZRX from the exchange, and watch for completion event.
current_price: Decimal = self.market.get_price(trading_pair, True)
amount: Decimal = Decimal("0.005") / current_price
order_id = self.market.buy(trading_pair, amount)
[buy_order_completed_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCompletedEvent))
# Reset the logs
self.market_logger.clear()
# Try to sell back the same amount of ZRX to the exchange, and watch for completion event.
amount = buy_order_completed_event.base_asset_amount
order_id = self.market.sell(trading_pair, amount)
[sell_order_completed_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCompletedEvent))
# Query the persisted trade logs
trade_fills: List[TradeFill] = recorder.get_trades_for_config(config_path)
self.assertEqual(2, len(trade_fills))
buy_fills: List[TradeFill] = [t for t in trade_fills if t.trade_type == "BUY"]
sell_fills: List[TradeFill] = [t for t in trade_fills if t.trade_type == "SELL"]
self.assertEqual(1, len(buy_fills))
self.assertEqual(1, len(sell_fills))
order_id = None
finally:
if order_id is not None:
self.run_parallel(self.market.cancel(trading_pair, order_id),
self.market_logger.wait_for(OrderCancelledEvent))
recorder.stop()
os.unlink(self.db_path)
def main():
logging.basicConfig(level=NETWORK)
unittest.main()
if __name__ == "__main__":
main()
|
import json
from os.path import join
from pkg_resources import resource_listdir, resource_isdir, resource_stream
from public import public
from .coordinates import AffineCoordinateModel
from .curve import EllipticCurve
from .mod import Mod
from .model import (ShortWeierstrassModel, MontgomeryModel, TwistedEdwardsModel,
EdwardsModel, CurveModel)
from .params import DomainParameters
from .point import Point, InfinityPoint
@public
def get_params(category: str, name: str, coords: str) -> DomainParameters:
"""
Retrieve a curve from a set of stored parameters. Uses the std-curves database at
https://github.com/J08nY/std-curves.
:param category: The category of the curve.
:param name: The name of the curve.
:param coords: The name of the coordinate system to use.
:return: The curve.
"""
listing = resource_listdir(__name__, "std")
categories = list(entry for entry in listing if resource_isdir(__name__, join("std", entry)))
if category not in categories:
raise ValueError("Category {} not found.".format(category))
json_path = join("std", category, "curves.json")
with resource_stream(__name__, json_path) as f:
category_json = json.load(f)
for curve in category_json["curves"]:
if curve["name"] == name:
break
else:
raise ValueError("Curve {} not found in category {}.".format(name, category))
if curve["field"]["type"] == "Binary":
raise ValueError("Binary field curves are currently not supported.")
model: CurveModel
field = int(curve["field"]["p"], 16)
order = int(curve["order"], 16)
cofactor = int(curve["cofactor"], 16)
if curve["form"] == "Weierstrass":
model = ShortWeierstrassModel()
param_names = ["a", "b"]
elif curve["form"] == "Montgomery":
model = MontgomeryModel()
param_names = ["a", "b"]
elif curve["form"] == "Edwards":
model = EdwardsModel()
param_names = ["c", "d"]
elif curve["form"] == "TwistedEdwards":
model = TwistedEdwardsModel()
param_names = ["a", "d"]
else:
raise ValueError("Unknown curve model.")
if coords not in model.coordinates:
raise ValueError("Coordinate model not supported for curve.")
coord_model = model.coordinates[coords]
params = {name: Mod(int(curve["params"][name], 16), field) for name in param_names}
for assumption in coord_model.assumptions:
locals = {}
compiled = compile(assumption, "", mode="exec")
exec(compiled, None, locals)
for param, value in locals.items():
if params[param] != value:
raise ValueError(f"Coordinate model {coord_model} has an unsatisifed assumption on the {param} parameter (= {value}).")
elliptic_curve = EllipticCurve(model, coord_model, field, params)
affine = Point(AffineCoordinateModel(model), x=Mod(int(curve["generator"]["x"], 16), field),
y=Mod(int(curve["generator"]["y"], 16), field))
generator = Point.from_affine(coord_model, affine)
return DomainParameters(elliptic_curve, generator, InfinityPoint(coord_model), order, cofactor,
name, category)
|
import pygame
import os
pygame.init()
bgDir = 'D:\\GitHub\\PyGame\\texture\\background'
bg = pygame.image.load(os.path.join(bgDir, 'bg.jpg'))
win = pygame.display.set_mode((500, 500))
win.blit(bg, (0, 0))
pygame.display.update()
while 1:
for i in pygame.event.get():
if i.type == pygame.QUIT:
exit()
pygame.time.delay(100)
|
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root == None:
return []
queue = [root]
solution = []
while queue != []:
solution.append([i.val for i in queue])
temp = []
for i in queue:
if i.left != None:
temp.append(i.left)
if i.right != None:
temp.append(i.right)
queue = temp
return solution
|
import json
import httpretty
import pytest
from hangups import auth
# pylint: disable=redefined-outer-name
class FakeCredentialsPrompt(auth.CredentialsPrompt):
def __init__(self):
self.was_prompted = False
def get_email(self):
self.was_prompted = True
return 'test@example.com'
def get_password(self):
self.was_prompted = True
return 'password'
def get_verification_code(self):
self.was_prompted = True
return '123456'
def get_authorization_code(self):
self.was_prompted = True
return 'auth_code'
@pytest.fixture
def credentials_prompt():
return FakeCredentialsPrompt()
class FakeRefreshTokenCache(auth.RefreshTokenCache):
def __init__(self):
super().__init__('fake_filename')
self._refresh_token = None
def get(self):
return self._refresh_token
def set(self, refresh_token):
self._refresh_token = refresh_token
@pytest.fixture
def refresh_token_cache():
return FakeRefreshTokenCache()
def get_form(form_id, action, input_id):
return '<form id="{}" action="{}"><input id="{}"></form>'.format(
form_id, action, input_id
)
def mock_google(verification_input_id=None):
"""Set up httpretty to mock authentication requests.
This simplifies the sequence of redirects and doesn't make any assertions
about the requests.
"""
httpretty.HTTPretty.allow_net_connect = False
httpretty.register_uri(
httpretty.GET,
'https://accounts.google.com/o/oauth2/programmatic_auth',
body=get_form(
auth.FORM_SELECTOR[1:], '/password_form', auth.EMAIL_SELECTOR[1:]
), content_type='text/html'
)
next_action = (
'/verification' if verification_input_id is not None else '/finished'
)
httpretty.register_uri(
httpretty.GET, 'https://accounts.google.com/password_form',
body=get_form(
auth.FORM_SELECTOR[1:], next_action, auth.PASSWORD_SELECTOR[1:]
), content_type='text/html'
)
httpretty.register_uri(
httpretty.GET, 'https://accounts.google.com/verification',
body=get_form(
auth.VERIFICATION_FORM_SELECTOR[1:], '/finished',
verification_input_id
), content_type='text/html'
)
httpretty.register_uri(
httpretty.GET, 'https://accounts.google.com/finished',
body='success', content_type='text/html', set_cookie='oauth_code=foo'
)
httpretty.register_uri(
httpretty.POST, 'https://accounts.google.com/o/oauth2/token',
body=json.dumps(dict(access_token='access', refresh_token='refresh')),
content_type='application/json'
)
httpretty.register_uri(
httpretty.GET, 'https://accounts.google.com/accounts/OAuthLogin',
body='uberauth', content_type='text/html'
)
httpretty.register_uri(
httpretty.GET, 'https://accounts.google.com/MergeSession',
body='uberauth', content_type='text/html',
set_cookie='session=foo; Domain=.google.com'
)
@httpretty.activate
def test_login(credentials_prompt, refresh_token_cache):
mock_google()
cookies = auth.get_auth(credentials_prompt, refresh_token_cache)
assert credentials_prompt.was_prompted
assert refresh_token_cache.get() is not None
assert cookies['session'] == 'foo'
@httpretty.activate
def test_login_totp_verification(credentials_prompt, refresh_token_cache):
mock_google(verification_input_id=auth.TOTP_CODE_SELECTOR[1:])
cookies = auth.get_auth(credentials_prompt, refresh_token_cache)
assert credentials_prompt.was_prompted
assert refresh_token_cache.get() is not None
assert cookies['session'] == 'foo'
@httpretty.activate
def test_login_phone_verification(credentials_prompt, refresh_token_cache):
mock_google(verification_input_id=auth.PHONE_CODE_SELECTOR[1:])
cookies = auth.get_auth(credentials_prompt, refresh_token_cache)
assert credentials_prompt.was_prompted
assert refresh_token_cache.get() is not None
assert cookies['session'] == 'foo'
@httpretty.activate
def test_refresh_token(credentials_prompt, refresh_token_cache):
mock_google()
refresh_token_cache.set('foo')
cookies = auth.get_auth(credentials_prompt, refresh_token_cache)
assert not credentials_prompt.was_prompted
assert refresh_token_cache.get() is not None
assert cookies['session'] == 'foo'
@httpretty.activate
def test_manual_login(credentials_prompt, refresh_token_cache):
mock_google()
cookies = auth.get_auth(
credentials_prompt, refresh_token_cache, manual_login=True
)
assert credentials_prompt.was_prompted
assert refresh_token_cache.get() is not None
assert cookies['session'] == 'foo'
|
#!/usr/bin/env python
"""
RPG: Timer
"""
CLOCK = None
import pygame
def LockFrameRate(framerate=60):
global CLOCK
if not CLOCK:
CLOCK = pygame.time.Clock()
CLOCK.tick(framerate)
|
from django.views.generic.base import View, TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView
from .models import Payment
from .forms import PaymentForm
class PublicBaseView(View):
""" A base class for all views """
class Meta:
abstract = True
def get_context_data(self, *args, **kwargs):
context = super(PublicBaseView, self).get_context_data(*args, **kwargs)
context['section'] = self.section_name or ''
return context
class PublicBasicPageView(PublicBaseView, TemplateView):
""" Basic page view """
pass
class PublicDetailView(PublicBaseView, DetailView):
""" Basic detail page view """
pass
class PublicListView(PublicBaseView, ListView):
""" Basic list view """
pass
class PublicCreateView(PublicBaseView, CreateView):
""" Basic create view """
pass
class PublicUpdateView(PublicBaseView, UpdateView):
""" Basic update view """
pass
class Dashboard(PublicBasicPageView):
template_name = 'dashboard.html'
section_name = 'dashboard'
def get_context_data(self):
context = super(Dashboard, self).get_context_data()
context['payments'] = Payment.objects.all().order_by('-start_date')
return context
dashboard = Dashboard.as_view()
class CreatePayment(PublicCreateView):
form_class = PaymentForm
template_name = 'create_payment.html'
section_name = 'create_payment'
success_url = '/'
create_payment = CreatePayment.as_view()
class EditPayment(PublicUpdateView):
model = Payment
form_class = PaymentForm
template_name = 'create_payment.html'
section_name = 'create_payment'
success_url = '/'
edit_payment = EditPayment.as_view()
|
import requests
import json
import pandas as pd
import numpy as np
import random
import time
from boto.s3.connection import S3Connection
from boto.s3.key import Key
tournament_name = 'World Golf Championships - Cadillac Championship'
year = 2015
timestamp = time.time()
def calculate_mean(hole_distribution):
running_total = 0
for score in range(hole_distribution.index[0],hole_distribution.index[len(hole_distribution)-1] + 1):
if score in hole_distribution.index:
running_total = running_total + score * hole_distribution.get_value(score)
return running_total
def shift_distribution_mean(score_distribution,mean,target):
# remember: underlying score distribution is altered. Ok since pulling course distribution from file
# find non-zero values above and below the score distribution
temp_distribution = score_distribution[score_distribution > 0]
#print temp_distribution
upper = temp_distribution[temp_distribution.index > target]
upper_avg = calculate_mean(upper)
upper_sum = sum(upper)
lower = temp_distribution[temp_distribution.index < target]
lower_avg = calculate_mean(lower)
#lower_sum = sum(lower)
pct_change = (lower_avg - target + target * upper_sum) / (target * upper_sum - upper_avg) # >1 for increase, < 1 decrease
# change score distribution. new distribution sum will be greater than zero for shift up, less than zero for shift down
for score in range(upper.index[0], upper.index[0] + len(upper)):
score_distribution[score] = score_distribution[score] * pct_change
# normalize to 1
score_distribution = score_distribution / sum(score_distribution)
# print score_distribution
# print sum(score_distribution)
return score_distribution
# create connection to bucket
c = S3Connection('AKIAIQQ36BOSTXH3YEBA','cXNBbLttQnB9NB3wiEzOWLF13Xw8jKujvoFxmv3L')
# create connection to bucket
b = c.get_bucket('public.tenthtee')
# get field from first round tee times
k = Key(b)
k.key = 'sportsData/' + str(year) + '/' + tournament_name + '/field.json'
field_string = k.get_contents_as_string()
field = json.loads(field_string)
#print field
# get hole distribution(s)
k2 = Key(b)
if tournament_name == 'World Golf Championships - Cadillac Championship':
k2.key = 'sportsData/' + str(year-1) + '/WGC Cadillac Championship/scores.json'
else:
k2.key = 'sportsData/' + str(year-1) + '/' + tournament_name + '/scores.json'
scores_string = k2.get_contents_as_string()
scores = json.loads(scores_string)
courses = scores['courses']
# if only one course, use that course for all rounds
if len(courses) == 1:
print 'test'
for player in field:
player_distribution = {}
player_distribution['player'] = player
player_distribution['status'] = 'Not started'
player_distribution['timestamp'] = 0
player_distribution['thru'] = 0 # create only the holes that will need to be simulated
player_distribution['Rd1'] = {}
player_distribution['Rd1']['teetime'] = 0
player_distribution['Rd1']['starthole'] = 0
player_distribution['Rd1']['course'] = courses[0]
player_distribution['Rd1']['holes'] = {}
for hole_num in xrange(1,18+1):
player_distribution['Rd1']['holes'][hole_num] = []
for score in xrange(1,8+1):
player_distribution['Rd1']['holes'][hole_num].append(scores[courses[0]]['holes'][str(hole_num)]['percentages'][str(score)])
player_distribution['Rd2'] = {}
player_distribution['Rd2']['course'] = courses[0]
player_distribution['Rd2']['teetime'] = 0
player_distribution['Rd2']['starthole'] = 0
player_distribution['Rd2']['holes'] = {}
for hole_num in xrange(1,18+1):
player_distribution['Rd2']['holes'][hole_num] = []
for score in xrange(1,8+1):
player_distribution['Rd2']['holes'][hole_num].append(scores[courses[0]]['holes'][str(hole_num)]['percentages'][str(score)])
player_distribution['Rd3'] = {}
player_distribution['Rd3']['course'] = courses[0]
player_distribution['Rd3']['teetime'] = 0
player_distribution['Rd3']['starthole'] = 0
player_distribution['Rd3']['holes'] = {}
for hole_num in xrange(1,18+1):
player_distribution['Rd3']['holes'][hole_num] = []
for score in xrange(1,8+1):
player_distribution['Rd3']['holes'][hole_num].append(scores[courses[0]]['holes'][str(hole_num)]['percentages'][str(score)])
player_distribution['Rd4'] = {}
player_distribution['Rd4']['course'] = courses[0]
player_distribution['Rd4']['teetime'] = 0
player_distribution['Rd4']['starthole'] = 0
player_distribution['Rd4']['holes'] = {}
for hole_num in xrange(1,18+1):
player_distribution['Rd4']['holes'][hole_num] = []
for score in xrange(1,8+1):
player_distribution['Rd4']['holes'][hole_num].append(scores[courses[0]]['holes'][str(hole_num)]['percentages'][str(score)])
# get avg strokes gained for field
k3 = Key(b)
#for player in field:
k3.key = 'AvgStrokesGained/' + str(year) + '/' + tournament_name + '/' + player
avg_strokes_gained_string = k3.get_contents_as_string()
avg_strokes_gained = json.loads(avg_strokes_gained_string)
# adjust distribution(s) for avg strokes gained
for rd in xrange(1,4+1):
rd_string = 'Rd' + str(rd)
round_score = 0
for hole_num in xrange(1,18+1):
#print player_distribution[rd_string]['holes'][hole_num], sum(player_distribution[rd_string]['holes'][hole_num]) / float(len(player_distribution[rd_string]['holes'][hole_num]))
current_distribution = pd.Series(player_distribution[rd_string]['holes'][hole_num], index=[x for x in xrange(1,8+1)])
current_mean = calculate_mean(current_distribution)
target_mean = current_mean - avg_strokes_gained['average_strokes_gained']
adjusted_distribution = shift_distribution_mean(current_distribution,current_mean,target_mean)
# INSERT WEATHER, PIN PLACEMENT CHANGES HERE
player_distribution[rd_string]['holes'][hole_num] = adjusted_distribution.tolist()
player_distribution = json.dumps(player_distribution)
# save player_distribution
k4 = Key(b)
k4.key = 'simulation/' + str(year) + '/' + tournament_name + '/' + player
k4.set_contents_from_string(player_distribution)
print player, tournament_name
# adjust distribution for avg variance
# adjust distribution for wind
# adjust distribution for pressure
# draw random numbers
#cumulative_distribution = np.cumsum(adjusted_distribution)
#print cumulative_distribution
#r = random.random()
#score = np.where(cumulative_distribution > r)[0][0] + 1 # +1 because np.where return zero-indexed array
#round_score += score
#print round_score
# return [player,hole,score,relation_to_par]
# save results in aws
# five servers with different codes
# another server that tabulates fantasy scores
|
import sys
import nltk
from nltk.tokenize import TweetTokenizer
nltk.download("punkt")
import generator.model as md
# import generator.ig_utils as igu
import generator.twt_utils as twt
def generate_tweet(creds, search_terms):
# generate a tweet based on credentials file and a list of hashtags
tm = {}
tknzr = TweetTokenizer()
# login
twitter = twt.login_oauth1(creds)
# search for most recent posts with hashtags, make them into a large string
tweets = twt.tweet_search(twitter, search_terms)
# create a tokenized list out of the large string of tweets
corpus = tknzr.tokenize(tweets)
# generate the transition matrix into tm
md.build_matrix(corpus, tm)
# create a sample tweet
sample = md.sample_tweet(tm, search_terms)
# then post the tweet
twt.text_post(twitter, sample)
# def generate_ig(src_path, creds, sentence_length, photo_path):
#
# tm = {}
# tknzr = TweetTokenizer()
#
# with open(src_path, "r+") as r:
# for line in r:
# corpus = tknzr.tokenize(line)
# md.build_matrix(corpus, tm)
#
#
# print("making some text for you...")
#
# # generate text
# sample = md.sample_sentence(tm, sentence_length)
#
#
# # or you could also just post it to instagram
#
# ig = igu.login(creds)
# # post photo with credentials
# igu.post(ig, photo_path, sample)
|
import subprocess
import re
import os
import sys
import time
import concurrent.futures
# time counter to measure program performance
start_time = time.time()
# set file location
## remember to use "/" instead of "\" and remember to put an extra "/" at the end
file_location = "C:/Users/Rob/Documents/Datalog 2018-9-10/BM1(Q MIXER)/PT/BM1 SCRUBBER SC-101-PT-101 DATA/"
#change the current working directory into the parent directory of the file location
os.chdir(file_location)
os.chdir("..")
# create a new directory for the csv file using os module
csv_folder_name = file_location.split("/")
csv_folder_name = csv_folder_name[len(csv_folder_name) - 2] + "_csv"
# set csv_file_location where new csv files will be stored
csv_file_location = os.path.abspath(os.curdir) + "/" + csv_folder_name
csv_file_location = csv_file_location + "/"
try:
os.mkdir(csv_folder_name)
except:
sys.exit("There's already a cvs folder created")
# extract a list of target file names
dirlist = os.listdir(file_location)
target_file = []
for line in dirlist:
## regular expression to find a format "0000 00 00 0014 (Float)"
regex = re.findall(r"[0-9]{4} [0-9]{2} [0-9]{2} [0-9]{2}[1][4] \(Float\)", line)
if len(regex) == 1:
target_file.append(regex[0])
# convert dat to csv using multi-thread processing
def convert_dat_to_csv(file):
file_name = file
print("converting... ", file)
## using FTViewer's subprocess to run the conversion via command line switch
viewer_location = file_location + "FTViewFileViewer.exe"
dat_location = file_location + file_name + ".dat"
csv_location = csv_file_location + file_name + ".csv"
subprocess.run([viewer_location,"/sd", dat_location, csv_location], shell=True)
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(convert_dat_to_csv, target_file)
# time counter to measure program performance
print("Finished in %s seconds" % (round(time.time() - start_time, 3))) |
#!/usr/bin/python
#
# Keeps your IAP up to date
# Version 0.1
import os
import sys
import urllib2
import re
import os.path
import urlparse
import posixpath
url001 = 'http://airnav.com/airport/'
url002 = 'http://www.ais-netherlands.nl/aim/2015-03-19-AIRAC/eAIP/html/eAIP/EH-AD-2.EHAM-en-GB.html#eham-ad-2.24/'
def error():
print ''
print ' Fatal error right in front of screen.'
print ''
return
def exit():
print ''
print ' You must have hit the wrong any key.'
print ''
return
def dst_dir():
global dstdir
print ''
print 'Welcome to IAPscrape'
print ''
dstdir = raw_input('Enter destination directory: ')
airport_icao()
return
def iap_K():
response = urllib2.urlopen(''+url001+''+airport+'').read()
charts = re.findall('href=[\'"]?([^\'" >]+).PDF',response)
for pdf in charts:
pdfs = 'http://airnav.com'+pdf+'.PDF'
cleanurl = pdfs.strip('http://airnav.com/depart?')
fullurl = 'http://'+cleanurl+''
path = urlparse.urlsplit(fullurl).path
filename = posixpath.basename(path)
print 'Downloading:', filename
response = urllib2.urlopen(fullurl)
output = open(''+fullpath+'/'+filename+'','wb')
output.write(response.read())
output.close()
return
def airport_icao():
global airport
global fullpath
print ''
airport = raw_input('Enter the four letter ICAO designator of the aiport: ')
fullpath = ''+dstdir+'/'+airport+''
if not os.path.exists(fullpath):
os.makedirs(fullpath)
print ''
iap_K()
return
dst_dir()
# EOF
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 2 12:07:59 2018
@author: lcristovao
"""
import numpy as np
class Neuron:
@staticmethod
def Add(array):
Sum=0
for e in array:
Sum+=e
return Sum
@staticmethod
def Relu(x):
if x<0:
return 0
return x
def __init__(self,weight,bias):
self.weight=weight
self.bias=bias
#inputs is an array with input numbers
def Output(self,inputs):
out=self.Add(inputs)
out+=self.bias
out=self.Relu(out)
return out
def updateWeight(self,error):
self.weight+=error
class DenseBrain:
layers=[]
#layers is an array where its size is the number of brain layers
#and the number inside each vector is the number of neurons per layer
def __init__(self,nlayers,bias):
for n_neurons in nlayers:
neurons=[]
rn=np.random.random_sample()
for i in range(n_neurons):
n=Neuron(rn,bias)
neurons.append(n)
self.layers.append(neurons)
def Propagation(self,inputs):
#shape in each pos has an array with the result of each neuron
layerout=[]
#input fase:
#input neurons
input_neurons=self.layers[0]
outs=[]
for neuron in input_neurons:
result=neuron.Output(inputs)
out.append(result)
next_layers=self.layers[1:]
for layer in next_layers:
next_outs=[]
for neuron in layer:
for out in outs:
result=neuron.Output(out)
return out
def BackPropagation(self,error):
for l in reversed(self.layers):
neurons=self.layers[l]
for neuron in neurons:
neuron.updateWeight(error/len(neurons))
@staticmethod
def Error(true_value,output_value):
return true_value-output_value
#____________________Main________________________________
brain=DenseBrain([2,8,1],0.1)
while True:
out=brain.Propagation([1,1])
print("Propagation:",out)
error=brain.Error(1,out)
print("error:",error)
brain.BackPropagation(error*0.1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from firstApp.extensions import db
class BaseModel():
# 添加一条数据
def save(self):
try:
# self 代表当前当前实例化的对象
db.session.add(self)
db.session.commit()
return True
except:
db.session.rollback()
return False
# 添加多条数据
@staticmethod
def save_all(*args):
try:
db.session.add_all(args)
db.session.commit()
return True
except:
db.session.rollback()
return False
# 删除
def delete(self):
try:
db.session.delete(self)
db.session.commit()
return True
except:
db.session.rollback()
return False
|
'''
author: juzicode
address: www.juzicode.com
公众号: 桔子code/juzicode
date: 2020.6.26
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: 桔子code/juzicode\n')
import platform
print('architecture():',platform.architecture())
print('machine():',platform.machine())
print('processor():',platform.processor())
import platform
print('system():',platform.system())
print('platform():',platform.platform())
print('uname():',platform.uname())
print('version():',platform.version())
import platform
print('python_version():',platform.python_version()) #python版本
print('python_build():',platform.python_build()) #构建信息
print('python_compiler():',platform.python_compiler()) #编译器版本
print('python_implementation():',platform.python_implementation()) #python解释器类型
print('python_version_tuple():',platform.python_version_tuple()) #python版本元组
|
from printer import Color, danger, success, info, format_table
from converter import *
def resolve(function, value, expected):
try:
result = function(value)
except ValueError as e:
result = 'ValueError'
except NotImplementedError as e:
return e.message, Color.BLUE
if result == expected:
return result, Color.GREEN
return result, Color.RED
def execute(tests):
for i, line in enumerate(tests):
result, state_code = resolve(line[0], line[1], line[2])
tests[i] = [line[0].__name__, line[1], line[2], result, state_code]
def repport(tests, index):
errors = 0
ok = 0
missing = 0
for line in tests:
if line[index] == 0:
errors += 1
elif line[index] == 1:
ok += 1
elif line[index] == 3:
missing += 1
print "Repport:\n"
print "- {:>12} Methods not yet implemented".format(info(missing))
print "- {:>12} Methods failing".format(danger(errors))
print "- {:>12} Methods doing ok".format(success(ok))
print
values = [
'ThisWasPascal',
'thisWasCamel',
'this_was_snake',
'THIS_WAS_COBOL',
'This_Was_Weird',
'this-was-kebab',
'THIS-WAS-TRAIN',
'This-Was-Weird',
'this has spaces',
'this-IsInvalid',
'This_IsInvalid'
]
tests = [
[camel_to_cobol, 'ThisWasPascal', 'ValueError'],
[camel_to_kebab, 'ThisWasPascal', 'ValueError'],
[camel_to_pascal, 'ThisWasPascal', 'ValueError'],
[camel_to_snake, 'ThisWasPascal', 'ValueError'],
[camel_to_train, 'ThisWasPascal', 'ValueError'],
[cobol_to_camel, 'ThisWasPascal', 'ValueError'],
[cobol_to_kebab, 'ThisWasPascal', 'ValueError'],
[cobol_to_pascal, 'ThisWasPascal', 'ValueError'],
[cobol_to_snake, 'ThisWasPascal', 'ValueError'],
[cobol_to_train, 'ThisWasPascal', 'ValueError'],
[kebab_to_camel, 'ThisWasPascal', 'ValueError'],
[kebab_to_cobol, 'ThisWasPascal', 'ValueError'],
[kebab_to_pascal, 'ThisWasPascal', 'ValueError'],
[kebab_to_snake, 'ThisWasPascal', 'ValueError'],
[kebab_to_train, 'ThisWasPascal', 'ValueError'],
[pascal_to_camel, 'ThisWasPascal', 'thisWasPascal'],
[pascal_to_cobol, 'ThisWasPascal', 'THIS_WAS_PASCAL'],
[pascal_to_kebab, 'ThisWasPascal', 'this-was-pascal'],
[pascal_to_snake, 'ThisWasPascal', 'this_was_pascal'],
[pascal_to_train, 'ThisWasPascal', 'THIS-WAS-PASCAL'],
[snake_to_camel, 'ThisWasPascal', 'ValueError'],
[snake_to_cobol, 'ThisWasPascal', 'ValueError'],
[snake_to_kebab, 'ThisWasPascal', 'ValueError'],
[snake_to_pascal, 'ThisWasPascal', 'ValueError'],
[snake_to_train, 'ThisWasPascal', 'ValueError'],
[train_to_camel, 'ThisWasPascal', 'ValueError'],
[train_to_cobol, 'ThisWasPascal', 'ValueError'],
[train_to_kebab, 'ThisWasPascal', 'ValueError'],
[train_to_pascal, 'ThisWasPascal', 'ValueError'],
[train_to_snake, 'ThisWasPascal', 'ValueError'],
[camel_to_cobol, 'thisWasCamel', 'THIS_WAS_CAMEL'],
[camel_to_kebab, 'thisWasCamel', 'this-was-camel'],
[camel_to_pascal, 'thisWasCamel', 'ThisWasCamel'],
[camel_to_snake, 'thisWasCamel', 'this_was_camel'],
[camel_to_train, 'thisWasCamel', 'THIS-WAS-CAMEL'],
[cobol_to_camel, 'thisWasCamel', 'ValueError'],
[cobol_to_kebab, 'thisWasCamel', 'ValueError'],
[cobol_to_pascal, 'thisWasCamel', 'ValueError'],
[cobol_to_snake, 'thisWasCamel', 'ValueError'],
[cobol_to_train, 'thisWasCamel', 'ValueError'],
[kebab_to_camel, 'thisWasCamel', 'ValueError'],
[kebab_to_cobol, 'thisWasCamel', 'ValueError'],
[kebab_to_pascal, 'thisWasCamel', 'ValueError'],
[kebab_to_snake, 'thisWasCamel', 'ValueError'],
[kebab_to_train, 'thisWasCamel', 'ValueError'],
[pascal_to_camel, 'thisWasCamel', 'ValueError'],
[pascal_to_cobol, 'thisWasCamel', 'ValueError'],
[pascal_to_kebab, 'thisWasCamel', 'ValueError'],
[pascal_to_snake, 'thisWasCamel', 'ValueError'],
[pascal_to_train, 'thisWasCamel', 'ValueError'],
[snake_to_camel, 'thisWasCamel', 'ValueError'],
[snake_to_cobol, 'thisWasCamel', 'ValueError'],
[snake_to_kebab, 'thisWasCamel', 'ValueError'],
[snake_to_pascal, 'thisWasCamel', 'ValueError'],
[snake_to_train, 'thisWasCamel', 'ValueError'],
[train_to_camel, 'thisWasCamel', 'ValueError'],
[train_to_cobol, 'thisWasCamel', 'ValueError'],
[train_to_kebab, 'thisWasCamel', 'ValueError'],
[train_to_pascal, 'thisWasCamel', 'ValueError'],
[train_to_snake, 'thisWasCamel', 'ValueError'],
[camel_to_cobol, 'this_was_snake', 'ValueError'],
[camel_to_kebab, 'this_was_snake', 'ValueError'],
[camel_to_pascal, 'this_was_snake', 'ValueError'],
[camel_to_snake, 'this_was_snake', 'ValueError'],
[camel_to_train, 'this_was_snake', 'ValueError'],
[cobol_to_camel, 'this_was_snake', 'ValueError'],
[cobol_to_kebab, 'this_was_snake', 'ValueError'],
[cobol_to_pascal, 'this_was_snake', 'ValueError'],
[cobol_to_snake, 'this_was_snake', 'ValueError'],
[cobol_to_train, 'this_was_snake', 'ValueError'],
[kebab_to_camel, 'this_was_snake', 'ValueError'],
[kebab_to_cobol, 'this_was_snake', 'ValueError'],
[kebab_to_pascal, 'this_was_snake', 'ValueError'],
[kebab_to_snake, 'this_was_snake', 'ValueError'],
[kebab_to_train, 'this_was_snake', 'ValueError'],
[pascal_to_camel, 'this_was_snake', 'ValueError'],
[pascal_to_cobol, 'this_was_snake', 'ValueError'],
[pascal_to_kebab, 'this_was_snake', 'ValueError'],
[pascal_to_snake, 'this_was_snake', 'ValueError'],
[pascal_to_train, 'this_was_snake', 'ValueError'],
[snake_to_camel, 'this_was_snake', 'thisWasSnake'],
[snake_to_cobol, 'this_was_snake', 'THIS_WAS_SNAKE'],
[snake_to_kebab, 'this_was_snake', 'this-was-snake'],
[snake_to_pascal, 'this_was_snake', 'ThisWasSnake'],
[snake_to_train, 'this_was_snake', 'THIS-WAS-SNAKE'],
[train_to_camel, 'this_was_snake', 'ValueError'],
[train_to_cobol, 'this_was_snake', 'ValueError'],
[train_to_kebab, 'this_was_snake', 'ValueError'],
[train_to_pascal, 'this_was_snake', 'ValueError'],
[train_to_snake, 'this_was_snake', 'ValueError'],
[camel_to_cobol, 'THIS_WAS_COBOL', 'ValueError'],
[camel_to_kebab, 'THIS_WAS_COBOL', 'ValueError'],
[camel_to_pascal, 'THIS_WAS_COBOL', 'ValueError'],
[camel_to_snake, 'THIS_WAS_COBOL', 'ValueError'],
[camel_to_train, 'THIS_WAS_COBOL', 'ValueError'],
[cobol_to_camel, 'THIS_WAS_COBOL', 'thisWasCobol'],
[cobol_to_kebab, 'THIS_WAS_COBOL', 'this-was-cobol'],
[cobol_to_pascal, 'THIS_WAS_COBOL', 'ThisWasCobol'],
[cobol_to_snake, 'THIS_WAS_COBOL', 'this_was_cobol'],
[cobol_to_train, 'THIS_WAS_COBOL', 'THIS-WAS-COBOL'],
[kebab_to_camel, 'THIS_WAS_COBOL', 'ValueError'],
[kebab_to_cobol, 'THIS_WAS_COBOL', 'ValueError'],
[kebab_to_pascal, 'THIS_WAS_COBOL', 'ValueError'],
[kebab_to_snake, 'THIS_WAS_COBOL', 'ValueError'],
[kebab_to_train, 'THIS_WAS_COBOL', 'ValueError'],
[pascal_to_camel, 'THIS_WAS_COBOL', 'ValueError'],
[pascal_to_cobol, 'THIS_WAS_COBOL', 'ValueError'],
[pascal_to_kebab, 'THIS_WAS_COBOL', 'ValueError'],
[pascal_to_snake, 'THIS_WAS_COBOL', 'ValueError'],
[pascal_to_train, 'THIS_WAS_COBOL', 'ValueError'],
[snake_to_camel, 'THIS_WAS_COBOL', 'ValueError'],
[snake_to_cobol, 'THIS_WAS_COBOL', 'ValueError'],
[snake_to_kebab, 'THIS_WAS_COBOL', 'ValueError'],
[snake_to_pascal, 'THIS_WAS_COBOL', 'ValueError'],
[snake_to_train, 'THIS_WAS_COBOL', 'ValueError'],
[train_to_camel, 'THIS_WAS_COBOL', 'ValueError'],
[train_to_cobol, 'THIS_WAS_COBOL', 'ValueError'],
[train_to_kebab, 'THIS_WAS_COBOL', 'ValueError'],
[train_to_pascal, 'THIS_WAS_COBOL', 'ValueError'],
[train_to_snake, 'THIS_WAS_COBOL', 'ValueError'],
[camel_to_cobol, 'This_Was_Weird', 'ValueError'],
[camel_to_kebab, 'This_Was_Weird', 'ValueError'],
[camel_to_pascal, 'This_Was_Weird', 'ValueError'],
[camel_to_snake, 'This_Was_Weird', 'ValueError'],
[camel_to_train, 'This_Was_Weird', 'ValueError'],
[cobol_to_camel, 'This_Was_Weird', 'ValueError'],
[cobol_to_kebab, 'This_Was_Weird', 'ValueError'],
[cobol_to_pascal, 'This_Was_Weird', 'ValueError'],
[cobol_to_snake, 'This_Was_Weird', 'ValueError'],
[cobol_to_train, 'This_Was_Weird', 'ValueError'],
[kebab_to_camel, 'This_Was_Weird', 'ValueError'],
[kebab_to_cobol, 'This_Was_Weird', 'ValueError'],
[kebab_to_pascal, 'This_Was_Weird', 'ValueError'],
[kebab_to_snake, 'This_Was_Weird', 'ValueError'],
[kebab_to_train, 'This_Was_Weird', 'ValueError'],
[pascal_to_camel, 'This_Was_Weird', 'ValueError'],
[pascal_to_cobol, 'This_Was_Weird', 'ValueError'],
[pascal_to_kebab, 'This_Was_Weird', 'ValueError'],
[pascal_to_snake, 'This_Was_Weird', 'ValueError'],
[pascal_to_train, 'This_Was_Weird', 'ValueError'],
[snake_to_camel, 'This_Was_Weird', 'ValueError'],
[snake_to_cobol, 'This_Was_Weird', 'ValueError'],
[snake_to_kebab, 'This_Was_Weird', 'ValueError'],
[snake_to_pascal, 'This_Was_Weird', 'ValueError'],
[snake_to_train, 'This_Was_Weird', 'ValueError'],
[train_to_camel, 'This_Was_Weird', 'ValueError'],
[train_to_cobol, 'This_Was_Weird', 'ValueError'],
[train_to_kebab, 'This_Was_Weird', 'ValueError'],
[train_to_pascal, 'This_Was_Weird', 'ValueError'],
[train_to_snake, 'This_Was_Weird', 'ValueError'],
[camel_to_cobol, 'this-was-kebab', 'ValueError'],
[camel_to_kebab, 'this-was-kebab', 'ValueError'],
[camel_to_pascal, 'this-was-kebab', 'ValueError'],
[camel_to_snake, 'this-was-kebab', 'ValueError'],
[camel_to_train, 'this-was-kebab', 'ValueError'],
[cobol_to_camel, 'this-was-kebab', 'ValueError'],
[cobol_to_kebab, 'this-was-kebab', 'ValueError'],
[cobol_to_pascal, 'this-was-kebab', 'ValueError'],
[cobol_to_snake, 'this-was-kebab', 'ValueError'],
[cobol_to_train, 'this-was-kebab', 'ValueError'],
[kebab_to_camel, 'this-was-kebab', 'thisWasKebab'],
[kebab_to_cobol, 'this-was-kebab', 'THIS_WAS_KEBAB'],
[kebab_to_pascal, 'this-was-kebab', 'ThisWasKebab'],
[kebab_to_snake, 'this-was-kebab', 'this_was_kebab'],
[kebab_to_train, 'this-was-kebab', 'THIS-WAS-KEBAB'],
[pascal_to_camel, 'this-was-kebab', 'ValueError'],
[pascal_to_cobol, 'this-was-kebab', 'ValueError'],
[pascal_to_kebab, 'this-was-kebab', 'ValueError'],
[pascal_to_snake, 'this-was-kebab', 'ValueError'],
[pascal_to_train, 'this-was-kebab', 'ValueError'],
[snake_to_camel, 'this-was-kebab', 'ValueError'],
[snake_to_cobol, 'this-was-kebab', 'ValueError'],
[snake_to_kebab, 'this-was-kebab', 'ValueError'],
[snake_to_pascal, 'this-was-kebab', 'ValueError'],
[snake_to_train, 'this-was-kebab', 'ValueError'],
[train_to_camel, 'this-was-kebab', 'ValueError'],
[train_to_cobol, 'this-was-kebab', 'ValueError'],
[train_to_kebab, 'this-was-kebab', 'ValueError'],
[train_to_pascal, 'this-was-kebab', 'ValueError'],
[train_to_snake, 'this-was-kebab', 'ValueError'],
[camel_to_cobol, 'THIS-WAS-TRAIN', 'ValueError'],
[camel_to_kebab, 'THIS-WAS-TRAIN', 'ValueError'],
[camel_to_pascal, 'THIS-WAS-TRAIN', 'ValueError'],
[camel_to_snake, 'THIS-WAS-TRAIN', 'ValueError'],
[camel_to_train, 'THIS-WAS-TRAIN', 'ValueError'],
[cobol_to_camel, 'THIS-WAS-TRAIN', 'ValueError'],
[cobol_to_kebab, 'THIS-WAS-TRAIN', 'ValueError'],
[cobol_to_pascal, 'THIS-WAS-TRAIN', 'ValueError'],
[cobol_to_snake, 'THIS-WAS-TRAIN', 'ValueError'],
[cobol_to_train, 'THIS-WAS-TRAIN', 'ValueError'],
[kebab_to_camel, 'THIS-WAS-TRAIN', 'ValueError'],
[kebab_to_cobol, 'THIS-WAS-TRAIN', 'ValueError'],
[kebab_to_pascal, 'THIS-WAS-TRAIN', 'ValueError'],
[kebab_to_snake, 'THIS-WAS-TRAIN', 'ValueError'],
[kebab_to_train, 'THIS-WAS-TRAIN', 'ValueError'],
[pascal_to_camel, 'THIS-WAS-TRAIN', 'ValueError'],
[pascal_to_cobol, 'THIS-WAS-TRAIN', 'ValueError'],
[pascal_to_kebab, 'THIS-WAS-TRAIN', 'ValueError'],
[pascal_to_snake, 'THIS-WAS-TRAIN', 'ValueError'],
[pascal_to_train, 'THIS-WAS-TRAIN', 'ValueError'],
[snake_to_camel, 'THIS-WAS-TRAIN', 'ValueError'],
[snake_to_cobol, 'THIS-WAS-TRAIN', 'ValueError'],
[snake_to_kebab, 'THIS-WAS-TRAIN', 'ValueError'],
[snake_to_pascal, 'THIS-WAS-TRAIN', 'ValueError'],
[snake_to_train, 'THIS-WAS-TRAIN', 'ValueError'],
[train_to_camel, 'THIS-WAS-TRAIN', 'thisWasTrain'],
[train_to_cobol, 'THIS-WAS-TRAIN', 'THIS_WAS_TRAIN'],
[train_to_kebab, 'THIS-WAS-TRAIN', 'this-was-train'],
[train_to_pascal, 'THIS-WAS-TRAIN', 'ThisWasTrain'],
[train_to_snake, 'THIS-WAS-TRAIN', 'this_was_train'],
[camel_to_cobol, 'This-Was-Weird', 'ValueError'],
[camel_to_kebab, 'This-Was-Weird', 'ValueError'],
[camel_to_pascal, 'This-Was-Weird', 'ValueError'],
[camel_to_snake, 'This-Was-Weird', 'ValueError'],
[camel_to_train, 'This-Was-Weird', 'ValueError'],
[cobol_to_camel, 'This-Was-Weird', 'ValueError'],
[cobol_to_kebab, 'This-Was-Weird', 'ValueError'],
[cobol_to_pascal, 'This-Was-Weird', 'ValueError'],
[cobol_to_snake, 'This-Was-Weird', 'ValueError'],
[cobol_to_train, 'This-Was-Weird', 'ValueError'],
[kebab_to_camel, 'This-Was-Weird', 'ValueError'],
[kebab_to_cobol, 'This-Was-Weird', 'ValueError'],
[kebab_to_pascal, 'This-Was-Weird', 'ValueError'],
[kebab_to_snake, 'This-Was-Weird', 'ValueError'],
[kebab_to_train, 'This-Was-Weird', 'ValueError'],
[pascal_to_camel, 'This-Was-Weird', 'ValueError'],
[pascal_to_cobol, 'This-Was-Weird', 'ValueError'],
[pascal_to_kebab, 'This-Was-Weird', 'ValueError'],
[pascal_to_snake, 'This-Was-Weird', 'ValueError'],
[pascal_to_train, 'This-Was-Weird', 'ValueError'],
[snake_to_camel, 'This-Was-Weird', 'ValueError'],
[snake_to_cobol, 'This-Was-Weird', 'ValueError'],
[snake_to_kebab, 'This-Was-Weird', 'ValueError'],
[snake_to_pascal, 'This-Was-Weird', 'ValueError'],
[snake_to_train, 'This-Was-Weird', 'ValueError'],
[train_to_camel, 'This-Was-Weird', 'ValueError'],
[train_to_cobol, 'This-Was-Weird', 'ValueError'],
[train_to_kebab, 'This-Was-Weird', 'ValueError'],
[train_to_pascal, 'This-Was-Weird', 'ValueError'],
[train_to_snake, 'This-Was-Weird', 'ValueError'],
[camel_to_cobol, 'this has spaces', 'ValueError'],
[camel_to_kebab, 'this has spaces', 'ValueError'],
[camel_to_pascal, 'this has spaces', 'ValueError'],
[camel_to_snake, 'this has spaces', 'ValueError'],
[camel_to_train, 'this has spaces', 'ValueError'],
[cobol_to_camel, 'this has spaces', 'ValueError'],
[cobol_to_kebab, 'this has spaces', 'ValueError'],
[cobol_to_pascal, 'this has spaces', 'ValueError'],
[cobol_to_snake, 'this has spaces', 'ValueError'],
[cobol_to_train, 'this has spaces', 'ValueError'],
[kebab_to_camel, 'this has spaces', 'ValueError'],
[kebab_to_cobol, 'this has spaces', 'ValueError'],
[kebab_to_pascal, 'this has spaces', 'ValueError'],
[kebab_to_snake, 'this has spaces', 'ValueError'],
[kebab_to_train, 'this has spaces', 'ValueError'],
[pascal_to_camel, 'this has spaces', 'ValueError'],
[pascal_to_cobol, 'this has spaces', 'ValueError'],
[pascal_to_kebab, 'this has spaces', 'ValueError'],
[pascal_to_snake, 'this has spaces', 'ValueError'],
[pascal_to_train, 'this has spaces', 'ValueError'],
[snake_to_camel, 'this has spaces', 'ValueError'],
[snake_to_cobol, 'this has spaces', 'ValueError'],
[snake_to_kebab, 'this has spaces', 'ValueError'],
[snake_to_pascal, 'this has spaces', 'ValueError'],
[snake_to_train, 'this has spaces', 'ValueError'],
[train_to_camel, 'this has spaces', 'ValueError'],
[train_to_cobol, 'this has spaces', 'ValueError'],
[train_to_kebab, 'this has spaces', 'ValueError'],
[train_to_pascal, 'this has spaces', 'ValueError'],
[train_to_snake, 'this has spaces', 'ValueError'],
[camel_to_cobol, 'this-IsInvalid', 'ValueError'],
[camel_to_kebab, 'this-IsInvalid', 'ValueError'],
[camel_to_pascal, 'this-IsInvalid', 'ValueError'],
[camel_to_snake, 'this-IsInvalid', 'ValueError'],
[camel_to_train, 'this-IsInvalid', 'ValueError'],
[cobol_to_camel, 'this-IsInvalid', 'ValueError'],
[cobol_to_kebab, 'this-IsInvalid', 'ValueError'],
[cobol_to_pascal, 'this-IsInvalid', 'ValueError'],
[cobol_to_snake, 'this-IsInvalid', 'ValueError'],
[cobol_to_train, 'this-IsInvalid', 'ValueError'],
[kebab_to_camel, 'this-IsInvalid', 'ValueError'],
[kebab_to_cobol, 'this-IsInvalid', 'ValueError'],
[kebab_to_pascal, 'this-IsInvalid', 'ValueError'],
[kebab_to_snake, 'this-IsInvalid', 'ValueError'],
[kebab_to_train, 'this-IsInvalid', 'ValueError'],
[pascal_to_camel, 'this-IsInvalid', 'ValueError'],
[pascal_to_cobol, 'this-IsInvalid', 'ValueError'],
[pascal_to_kebab, 'this-IsInvalid', 'ValueError'],
[pascal_to_snake, 'this-IsInvalid', 'ValueError'],
[pascal_to_train, 'this-IsInvalid', 'ValueError'],
[snake_to_camel, 'this-IsInvalid', 'ValueError'],
[snake_to_cobol, 'this-IsInvalid', 'ValueError'],
[snake_to_kebab, 'this-IsInvalid', 'ValueError'],
[snake_to_pascal, 'this-IsInvalid', 'ValueError'],
[snake_to_train, 'this-IsInvalid', 'ValueError'],
[train_to_camel, 'this-IsInvalid', 'ValueError'],
[train_to_cobol, 'this-IsInvalid', 'ValueError'],
[train_to_kebab, 'this-IsInvalid', 'ValueError'],
[train_to_pascal, 'this-IsInvalid', 'ValueError'],
[train_to_snake, 'this-IsInvalid', 'ValueError'],
[camel_to_cobol, 'This_IsInvalid', 'ValueError'],
[camel_to_kebab, 'This_IsInvalid', 'ValueError'],
[camel_to_pascal, 'This_IsInvalid', 'ValueError'],
[camel_to_snake, 'This_IsInvalid', 'ValueError'],
[camel_to_train, 'This_IsInvalid', 'ValueError'],
[cobol_to_camel, 'This_IsInvalid', 'ValueError'],
[cobol_to_kebab, 'This_IsInvalid', 'ValueError'],
[cobol_to_pascal, 'This_IsInvalid', 'ValueError'],
[cobol_to_snake, 'This_IsInvalid', 'ValueError'],
[cobol_to_train, 'This_IsInvalid', 'ValueError'],
[kebab_to_camel, 'This_IsInvalid', 'ValueError'],
[kebab_to_cobol, 'This_IsInvalid', 'ValueError'],
[kebab_to_pascal, 'This_IsInvalid', 'ValueError'],
[kebab_to_snake, 'This_IsInvalid', 'ValueError'],
[kebab_to_train, 'This_IsInvalid', 'ValueError'],
[pascal_to_camel, 'This_IsInvalid', 'ValueError'],
[pascal_to_cobol, 'This_IsInvalid', 'ValueError'],
[pascal_to_kebab, 'This_IsInvalid', 'ValueError'],
[pascal_to_snake, 'This_IsInvalid', 'ValueError'],
[pascal_to_train, 'This_IsInvalid', 'ValueError'],
[snake_to_camel, 'This_IsInvalid', 'ValueError'],
[snake_to_cobol, 'This_IsInvalid', 'ValueError'],
[snake_to_kebab, 'This_IsInvalid', 'ValueError'],
[snake_to_pascal, 'This_IsInvalid', 'ValueError'],
[snake_to_train, 'This_IsInvalid', 'ValueError'],
[train_to_camel, 'This_IsInvalid', 'ValueError'],
[train_to_cobol, 'This_IsInvalid', 'ValueError'],
[train_to_kebab, 'This_IsInvalid', 'ValueError'],
[train_to_pascal, 'This_IsInvalid', 'ValueError'],
[train_to_snake, 'This_IsInvalid', 'ValueError'],
]
executed_tests = execute(tests)
tests.insert(0, ['Method', 'Input', 'Expected', 'Output', Color.BOLD])
color_index = 4
format_table(tests, color_index)
repport(tests, color_index) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.