text stringlengths 8 6.05M |
|---|
#!/usr/bin/python3
from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
import time
import RPi.GPIO as GPIO
import Adafruit_DHT
import json
import sys
import pandas as pd
import logging
from wwo_hist import retrieve_hist_data
import os
import datetime
sys.path.append("/home/pi/.local/lib/python3.7")
import plantrec
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
CORS(app)
@app.route("/water", methods=['GET','OPTIONs'])
@cross_origin()
def water():
GPIO.setmode(GPIO.BCM)
valve = 15
GPIO.setup(valve, GPIO.OUT)
GPIO.output(valve, GPIO.HIGH)
time.sleep(20)
GPIO.output(valve, GPIO.LOW)
return "Successfully Watered"
@app.route("/prp", methods=['GET','POST','OPTIONS'])
@cross_origin()
def prp():
date = request.args.get('date_string')
zipcode = request.args.get('zipcode_str')
os.chdir("/var/www/FlaskApp/FlaskApp")
#
# frequency=24
# start_date = '01-JAN-2010'
# end_date = '01-JAN-2021'
# api_key = '0909c9292f294476aba41920211701'
# location_list = ['97603']
# zipcode = "97603"
#
# hist_weather_data = retrieve_hist_data(api_key,
# location_list,
# start_date,
# end_date,
# frequency,
# location_label = False,
# export_csv = True,
# store_df = True)
if os.path.isfile(f"/var/www/FlaskApp/FlaskApp/{zipcode}.csv"):
prplist = plantrec.prpmain(date)
else:
data3 = "PRP API call Failure"
temp = {
"date" : date,
"zip" : zipcode,
"prp" : prplist
}
y = json.dumps(temp)
return y
@app.route("/sensor", methods=['GET', 'OPTIONS'])
@cross_origin()
def sensor():
GPIO.setmode(GPIO.BCM)
sms = 17
ldr = 23
#LDR
resultldr = 0
def ldrfunc(ldr):
count = 0
GPIO.setup(ldr,GPIO.OUT)
GPIO.output(ldr,GPIO.LOW)
time.sleep(0.1)
GPIO.setup(ldr,GPIO.IN)
while (GPIO.input(ldr) == GPIO.LOW):
count += 1
return count
ldrval = ldrfunc(ldr)
#SMS
resultsms = 0
GPIO.setup(sms, GPIO.IN)
if GPIO.input(sms):
resultsms = "No Moisture Detected"
else:
resultsms = "Moisture Detected"
#DHT11
tempsensor=Adafruit_DHT.DHT11
tempsensorgpio = 4
humidity, tempC = Adafruit_DHT.read_retry(tempsensor,tempsensorgpio)
tempF = tempC * 9/5 + 32
sensor_dict = {
"resultsms": resultsms,
"ldr": ldrval,
"humidity": humidity,
"tempC": tempC,
"tempF": tempF
}
y = json.dumps(sensor_dict)
return y
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python
import urllib2, json
from sys import argv
script, url = argv
user_agent = [('User-agent', 'https://github.com/bryanheinz/scripts/blob/master/python/utilities/is-up.py')]
opener = urllib2.build_opener()
opener.addheaders = user_agent
data = json.load(opener.open("http://isitup.org/%s.json" % url))
print('')
print(data)
print('')
up = data['status_code']
if up == 1:
print("%s is up." % data['domain'])
elif up == 2:
print("%s appears down." % data['domain'])
elif up == 3:
print("%s isn't valid." % data['domain'])
else:
print("error")
print('') |
# drugdealing.py
#
import os
import sys
import random
import collections
drugsAvail = ['acid', 'cocaine', 'heroin', 'meth', 'weed']
#drugBasePrices = [5, 100, 60, 25]
#drugPriceModifier = [1, 1, 1, 1]
#drugPrices = list()
citiesAvail = ['boston', 'chicago', 'dallas', 'los angeles', 'new york']
# Enter city
# Get current prices on the street
# Ask if buy or sell?
# Which drug and how many?
# Adjust backpack values (dollars + drugs)
mainMenuOptions = ['[h]elp', '[i]nventory', '[p]rices', '[t]ravel', '[b]uy drugs', '[s]ell drugs', 'e[x]it']
mainMenuAbbr = ['h', 'i', 'p', 't', 'b', 's', 'x']
def percentage(percent, whole):
return (percent * whole) / 100.0
#subMenu_BuyOptions = ['[]']
'''class EventClass(self):
drug_events = [
('You found some hits of acid in the subway!', 3)
('You found some drugs laying on the street corner!', ),
('Weed prices hit rock bottom!'),
('Drug bust in Columbia! Cocaine is difficult to get and prices have SOARED to new heights!'),
('A weak strain of pot has hit the market and is selling CHEAP!'),
('You broke the rule- do not get high on your own supply - and consumed X units of Y.'),
('There is a heroin shortage in the city. Prices have skyrocketed.'),
('There is an excess of meth in this city and prices are lower than normal.'),
('A new type of acid has hit the market and is selling cheap!'),
('Word on these streets says heroin is selling CHEAP!'),
('The city cracked down on pot growers and there is a shortage. Weed is selling at a premium!'),
('Meth is now stronger than ever! Prices have skyrocketed!', ),
('A bad supply of cocaine has hit the market and is selling cheap!', -0.6),
('Cops chased you! You lost some dope while running!', -0.04),
('You get robbed in the subway! They took most of your stash!', -0.50)
]
def __init__(self):
super().__init__()
def GetRandomEvent(self):
rint = random.randint(0, len(EventClass.drug_events)-1)
print(f'rint={rint}')
event = collections.namedtuple('Event',['msg', 'mod'])
eventmsg = EventClass.drug_events[rint]
ev = Event(eventmsg, )
'''
class DrugBox:
def __init__(self):
super().__init__()
self.drugs = dict([(drug, 0) for drug in drugsAvail])
def __repr__(self):
return("Acid=%d, Cocaine=%d, Heroin=%d, Meth=%d, Weed=%d" \
% (self.drugs['acid'], self.drugs['cocaine'], self.drugs['heroin'],
self.drugs['meth'] ,self.drugs['weed']))
def __str__(self):
return("Acid=%d, Cocaine=%d, Heroin=%d, Meth=%d, Weed=%d" \
% (self.drugs['acid'], self.drugs['cocaine'], self.drugs['heroin'],
self.drugs['meth'] ,self.drugs['weed']))
class Backpack:
def __init__(self, startingmoney=200):
super().__init__()
#self.drugs = DrugBox()
self.mydrugbox = DrugBox()
self.mymoney = startingmoney
def __repr__(self):
return("Backpack contents: Money=%d, \nDrugs=%s" \
% (self.mymoney, self.mydrugbox))
def __str__(self):
return("Backpack contents: Money=%d, \nDrugs=%s" \
% (self.mymoney, self.mydrugbox))
class DrugName:
acid = ('acid', 0)
cocaine = ('cocaine', 1)
heroin = ('heroin', 2)
meth = ('meth', 3)
weed = ('weed', 4)
class DrugDealing:
def __init__(self, backpack, drugdealername):
super().__init__()
self.myname = drugdealername
#self.dollars = startingMoney
self.backpack = backpack
self.currentCity = 'boston'
self.currentDrugPrices = list(zip(drugsAvail,[random.randint(1,8),
random.randint(100,400),
random.randint(999,4000),
random.randint(20,200),
random.randint(10,40)]))
def GetCurrentPrice(self, drugname):
if drugname.lower() == 'acid':
offset = DrugName.acid[1]
elif drugname.lower() == 'cocaine':
offset = DrugName.cocaine[1]
elif drugname.lower() == 'heroin':
offset = DrugName.heroin[1]
elif drugname.lower() == 'meth':
offset = DrugName.meth[1]
elif drugname.lower() == 'weed':
offset = DrugName.weed[1]
print(f'{drugname} offset={offset}')
return int(self.currentDrugPrices[offset][1])
def GetAllCurrentPrices(self):
self.currentDrugPrices = list(zip(drugsAvail,[random.randint(1,5),
random.randint(40,200),
random.randint(999,4000),
random.randint(20,100),
random.randint(10,40)]))
def PrintCurrentPrices(self):
print(f'\n--------- Current Pricing -----------')
print(f'${self.currentDrugPrices[DrugName.acid[1]]}')
print(f'${self.currentDrugPrices[DrugName.cocaine[1]]}')
print(f'${self.currentDrugPrices[DrugName.heroin[1]]}')
print(f'${self.currentDrugPrices[DrugName.meth[1]]}')
print(f'${self.currentDrugPrices[DrugName.weed[1]]}')
print(f'-------------------------------------')
def ReduceCurrentPrice(self, drugName):
randPerc = random.randint(10, 60)
currentPrice = self.GetCurrentPrice(drugName) ##currentDrugPrices[drugName]
print(f'(ReducingCurrentPrice: {drugName}')
print(f'Current=${currentPrice}')
print(f'Random Perc={randPerc}')
newVal = float(currentPrice) * (randPerc / 100.0)
print(f'NEWVAL={newVal}')
def TravelToNewCity(self, newCity=None):
if newCity == None:
self.currentCity = citiesAvail[random.randint(0,4)]
print(f'\nTraveling to {self.currentCity}...')
self.GetAllCurrentPrices()
def BuyDrugs(self):
rc = 1
print(f'\n----------- Buy Drugs --------------')
drugname = input('Which drug?')
if drugname.lower() in drugsAvail:
drugquant = int(input(f'({self.backpack.mydrugbox.drugs[drugname]}) How many?'))
currprice = self.GetCurrentPrice(drugname.lower())
mytotalprice = currprice * drugquant
print(f' Buy {drugquant} of {drugname} at {currprice} dollars per unit (${mytotalprice})?')
yesorno = str(input('[y]es or [n]o?')).lower()
if yesorno == 'y':
if self.backpack.mymoney >= mytotalprice:
self.backpack.mydrugbox.drugs[drugname] += drugquant
self.backpack.mymoney -= mytotalprice
print(f'Balance now= {self.backpack.mymoney}')
else:
print(f'You do not have enough money!')
# END IF
print(f'-------------------------------------')
rc = 0
else:
rc = 0
# END IF
else:
print(f'Unknown drug!')
rc = 0
return rc
def SellDrugs(self):
rc = 1
print(f'\n----------- Sell Drugs --------------')
drugname = input('Which drug?')
if drugname.lower() in drugsAvail:
drugquant = int(input(f'({self.backpack.mydrugbox.drugs[drugname]}) How many?'))
currprice = self.GetCurrentPrice(drugname.lower())
mytotalprice = currprice * drugquant
if self.backpack.mydrugbox.drugs[drugname] >= drugquant:
print(f' Sell {drugquant} of {drugname} at {currprice} dollars per unit (${mytotalprice})?')
yesorno = str(input('[y]es or [n]o?')).lower()
if yesorno == 'y':
self.backpack.mydrugbox.drugs[drugname] -= drugquant
self.backpack.mymoney += mytotalprice
rc = 0
else:
rc = 0
# END IF
else:
print(f'You do not have enough to sell {drugquant}!')
# END IF
else:
print(f'Unknown drug!')
# END IF
return rc
if __name__ == "__main__":
print('\n==================================')
print(f' DrugDealing [v0.0.1] ')
print('==================================')
bp = Backpack(200)
ddMain = DrugDealing(bp, 'Lefty')
ddMain.PrintCurrentPrices()
process = True
#### MAIN PROCE SSING LOOP #####
while process == True:
print(f'\n(Wallet: ${bp.mymoney})')
reply = str(input(mainMenuAbbr)).lower()
if reply in mainMenuAbbr:
if reply == "x":
print(f'Bye!')
break
elif reply == "h":
## HELP ##
print(mainMenuOptions)
elif reply == "t":
## TRAVEL ##
ddMain.TravelToNewCity()
ddMain.ReduceCurrentPrice('cocaine')
ddMain.PrintCurrentPrices()
elif reply == "p":
## PRICES ##
ddMain.PrintCurrentPrices()
elif reply == "i":
## INVENTORY ##
print(f'\n----------- Inventory ------------')
print(f'Drug Dealer: {ddMain.myname}')
print(f'Total Cash: ${ddMain.backpack.mymoney}')
print(f'Drugs: {bp.mydrugbox}')
print(f'\n----------------------------------')
elif reply == "b":
## BUY ##
rc = ddMain.BuyDrugs()
elif reply == "s":
## SELL ##
rc = ddMain.SellDrugs()
else:
## UNKNOWN ##
print(f'Try again!')
# END IF
else:
print(f'Unknown option!')
# END IF
# END WHILE |
#!/usr/bin/python3
class Complex:
def __init__(self, realpart, imagpart):
self.realpart = realpart
self.imagpart = imagpart
x = complex(3.0, -4.5)
print(x.real, x.imag)
class Test:
def prt(self):
print(self)
print(self.__class__)
t = Test()
t.prt()
|
# 문제 설명
# 게임 캐릭터를 4가지 명령어를 통해 움직이려 합니다. 명령어는 다음과 같습니다.
# U: 위쪽으로 한 칸 가기
# D: 아래쪽으로 한 칸 가기
# R: 오른쪽으로 한 칸 가기
# L: 왼쪽으로 한 칸 가기
# 캐릭터는 좌표평면의 (0, 0) 위치에서 시작합니다. 좌표평면의 경계는 왼쪽 위(-5, 5), 왼쪽 아래(-5, -5), 오른쪽 위(5, 5), 오른쪽 아래(5, -5)로 이루어져 있습니다.
# 방문길이1_qpp9l3.png
# 예를 들어, "ULURRDLLU"로 명령했다면
# 방문길이2_lezmdo.png
# 1번 명령어부터 7번 명령어까지 다음과 같이 움직입니다.
# 방문길이3_sootjd.png
# 8번 명령어부터 9번 명령어까지 다음과 같이 움직입니다.
# 방문길이4_hlpiej.png
# 이때, 우리는 게임 캐릭터가 지나간 길 중 캐릭터가 처음 걸어본 길의 길이를 구하려고 합니다. 예를 들어 위의 예시에서 게임 캐릭터가 움직인 길이는 9이지만, 캐릭터가 처음 걸어본 길의 길이는 7이 됩니다. (8, 9번 명령어에서 움직인 길은 2, 3번 명령어에서 이미 거쳐 간 길입니다)
# 단, 좌표평면의 경계를 넘어가는 명령어는 무시합니다.
# 예를 들어, "LULLLLLLU"로 명령했다면
# 방문길이5_nitjwj.png
# 1번 명령어부터 6번 명령어대로 움직인 후, 7, 8번 명령어는 무시합니다. 다시 9번 명령어대로 움직입니다.
# 방문길이6_nzhumd.png
# 이때 캐릭터가 처음 걸어본 길의 길이는 7이 됩니다.
# 명령어가 매개변수 dirs로 주어질 때, 게임 캐릭터가 처음 걸어본 길의 길이를 구하여 return 하는 solution 함수를 완성해 주세요.
# 제한사항
# dirs는 string형으로 주어지며, 'U', 'D', 'R', 'L' 이외에 문자는 주어지지 않습니다.
# dirs의 길이는 500 이하의 자연수입니다.
# 입출력 예
# dirs answer
# "ULURRDLLU" 7
# "LULLLLLLU" 7
def solution(dirs):
lst = {'U':(0,1),'D':(0,-1),'L':(-1,0),'R':(1,0)}
answer = []
departure_x,departure_y = 0,0
for move in dirs:
arrival_x = (departure_x + lst[move][0])
arrival_y = (departure_y + lst[move][1])
if 5 >= arrival_x >= -5 and 5 >= arrival_y >= -5:
answer.append((departure_x, departure_y, arrival_x, arrival_y))
answer.append((arrival_x, arrival_y, departure_x, departure_y))
departure_x = arrival_x
departure_y = arrival_y
return len(set(answer))//2 |
import sqlite3
conn = sqlite3.connect("nyt.db")
cur= conn.cursor()
#SELECT e.rank,b.name FROM Books as b,entries as e WHERE b.id=e.id;
def update_score():
scores ={}
cur.execute("SELECT id FROM Books ORDER BY score desc")
l=cur.fetchall()
count=0
for id in l:
id=id[0]
count=count + 1
cur.execute("UPDATE Books SET rank=? WHERE id=?",(count,id))
update_score()
conn.commit()
conn.close()
|
#!/usr/bin/env python3
"""This is mydemo.py, a test for turtle.py"""
from turtle import *
import random
import math
import time
import platform
if platform.system() == 'Linux':
from evdev import list_devices, InputDevice, ecodes
PLANE_SPEED = 4
TURBO_SPEED = PLANE_SPEED * 2
BLT_SPEED = PLANE_SPEED * 4
MSLE_SPEED = PLANE_SPEED * 2
MSLE_LIFE = 8
MSLE_TURN = 2.5 # missile turn degree
HIT_RANGE = 15
def reg_shape_plane(color, shape_name):
s = Shape("compound")
poly1 = ((0, 20), (-4, -8), (0, -6), (4, -8))
s.addcomponent(poly1, color, color)
poly2 = ((0, 16), (-12, -2), (12, -2))
s.addcomponent(poly2, color, color)
register_shape(shape_name, s)
def reg_shape_missle(color, shape_name):
s = Shape("compound")
poly = ((-1, 0), (0, 12), (1, 0))
s.addcomponent(poly, color)
register_shape(shape_name, s)
def new_bullet(blt_list, plane):
got_blt = False
if len(blt_list) < 5:
b = Turtle(visible=False)
got_blt = True
b.up()
b.shapesize(0.15)
b.fillcolor("black")
b.shape("circle")
else: # re-use hidden bullet
for b in blt_list:
if not b.isvisible():
got_blt = True
blt_list.remove(b)
break
if not got_blt:
return
b.setpos(plane.xcor(), plane.ycor())
b.setheading(plane.heading())
b.showturtle()
blt_list.append(b)
def new_missle(misl_list, plane):
if len(misl_list) < 2:
m = Turtle(visible=False)
if plane.shape() == "g_plane_shape":
m.shape("g_missile")
else:
m.shape("b_missile")
m.up()
else:
found = False
for m in misl_list:
if not m.isvisible():
found = True
break
if found:
index = misl_list.index(m)
misl_list.remove(m)
if misl_list == misl_list1:
mtime_list1.pop(index)
else:
mtime_list2.pop(index)
else:
return
m.setpos(plane.xcor(), plane.ycor())
m.setheading(plane.heading())
m.showturtle()
t = time.time()
misl_list.append(m)
if misl_list == misl_list1:
mtime_list1.append(t)
else:
mtime_list2.append(t)
def p1_shoot():
if p1.isvisible():
new_bullet(blt_list1, p1)
def p2_shoot():
if p2.isvisible():
new_bullet(blt_list2, p2)
def p1_fire():
global last_fire_time1
if not p1.isvisible():
return
t = time.time()
if t - last_fire_time1 > 0.5:
new_missle(misl_list1, p1)
last_fire_time1 = t
def p2_fire():
global last_fire_time2
if not p2.isvisible():
return
t = time.time()
if t - last_fire_time2 > 0.5:
new_missle(misl_list2, p2)
last_fire_time2 = t
def p1_turn_left():
p1.left(10)
def p1_turn_right():
p1.right(10)
def p2_turn_left():
p2.left(10)
def p2_turn_right():
p2.right(10)
def p1_turbo():
global p1_state
if p1_state == 0:
p1_state = 1
ontimer(p1_turbo, 1000)
onkey(None, "w")
elif p1_state == 1:
p1_state = 2
ontimer (p1_turbo, 4000)
elif p1_state == 2:
p1_state = 0
onkey(p1_turbo, "w")
def p2_turbo():
global p2_state
if p2_state == 0:
p2_state = 1
ontimer(p2_turbo, 1000)
onkey(None, "Up")
elif p2_state == 1:
p2_state = 2
ontimer (p2_turbo, 4000)
elif p2_state == 2:
p2_state = 0
onkey(p2_turbo, "Up")
def plane_explode(p):
global life_list1, life_list2
global p1_state, p2_state
px_state = p1_state if p == p1 else p2_state
#print("px_state:%d" % px_state)
if px_state >= 11 and px_state <= 20:
if p == p1:
if not p.shape() == "b_plane_shape":
p.shape("b_plane_shape")
return
if p == p2:
if not p.shape() == "g_plane_shape":
p.shape("g_plane_shape")
return
pic = "pics/expo-%d.gif" % (px_state - 10)
p.shape(pic)
px_state = px_state + 1
if p == p1: p1_state += 1
else: p2_state += 1
if px_state == 21:
p.hideturtle()
if p == p1: p1_state = 0
else: p2_state = 0
if len(life_list1) == 0 or len(life_list2) == 0:
#getscreen().exitonclick()
return
if p == p1:
p.shape("b_plane_shape")
id = life_list1.pop(0)
else:
p.shape("g_plane_shape")
id = life_list2.pop(0)
p.clearstamp(id)
p.setx(random.uniform(-window_width() / 2, window_width() / 2))
p.sety(random.uniform(-window_height() / 2, window_height() / 2))
p.showturtle()
return
"""
my pos (x, y), center (cx,cy), delta x and delta y (dx,dy)
"""
def in_range(x, cx, dx):
if (x <= cx - dx) or (x >= cx + dx):
return False
else:
return True
"""Return True if should turn left"""
def left_or_right(m, p, log=False):
beta = math.atan2(p.ycor() - m.ycor(), p.xcor() - m.xcor())
beta = math.degrees(beta)
turn_left_angle = 360 - m.heading() + beta
if turn_left_angle > 360:
turn_left_angle -= 360
if turn_left_angle < 0:
turn_left_angle += 360
if log:
print ("m(%d, %d) p(%d, %d) beta:%d mheading:%d TL_angle:%d" %
(m.xcor(), m.ycor(), p.xcor(), p.ycor(), beta, m.heading(), turn_left_angle))
if turn_left_angle < 180:
return True
else:
return False
def objects_move():
global p1_state, p2_state
if p1_state == 1:
p1.fd(TURBO_SPEED)
elif p1_state == 0 or p1_state == 2:
p1.fd(PLANE_SPEED)
elif p1_state >= 11 and p1_state <= 20:
p1.fd(PLANE_SPEED)
plane_explode(p1)
if not in_range(p1.xcor(), 0, window_width() / 2):
p1.setx(-p1.xcor())
if not in_range(p1.ycor(), 0, window_height() / 2):
p1.sety(-p1.ycor())
if p2_state == 1:
p2.fd(TURBO_SPEED)
elif p2_state == 0 or p2_state == 2:
p2.fd(PLANE_SPEED)
elif p2_state >= 11 and p2_state <= 20:
p2.fd(PLANE_SPEED)
plane_explode(p2)
if p2.xcor() >= window_width() / 2 or p2.xcor() <= -window_width() / 2:
p2.setx(-p2.xcor())
if p2.ycor() >= window_height() / 2 or p2.ycor() <= -window_height() / 2:
p2.sety(-p2.ycor())
for wpn in blt_list1 + misl_list1:
if not wpn.isvisible():
continue
# wpn is valid
elif not in_range(wpn.xcor(), 0, window_width() / 2) or \
not in_range(wpn.ycor(), 0, window_height() / 2):
wpn.hideturtle()
continue # out of range
elif wpn in blt_list1:
wpn.fd(BLT_SPEED)
else: # wpn is missile
if left_or_right(wpn, p2):
wpn.left(MSLE_TURN)
else:
wpn.right(MSLE_TURN)
wpn.fd(MSLE_SPEED)
index = misl_list1.index(wpn)
if time.time() - mtime_list1[index] > MSLE_LIFE:
wpn.hideturtle()
# check weapon vs plane2
if in_range(wpn.xcor(), p2.xcor(), HIT_RANGE) and \
in_range(wpn.ycor(), p2.ycor(), HIT_RANGE):
wpn.hideturtle()
p2_state = 11
if len(life_list2) == 0:
hideturtle()
write("BLUE WON !!!", align="center", font=("Arial", 32, "normal"))
for wpn in blt_list2 + misl_list2:
if not wpn.isvisible():
continue
# wpn is valid
if not in_range(wpn.xcor(), 0, window_width() / 2) or \
not in_range(wpn.ycor(), 0, window_height() / 2):
wpn.hideturtle()
continue # out of range
elif wpn in blt_list2:
wpn.fd(BLT_SPEED)
else: # wpn is missile
if left_or_right(wpn, p1):
wpn.left(MSLE_TURN)
else:
wpn.right(MSLE_TURN)
wpn.fd(MSLE_SPEED)
index = misl_list2.index(wpn)
if time.time() - mtime_list2[index] > MSLE_LIFE:
wpn.hideturtle()
# check weapon vs plane1
if in_range(wpn.xcor(), p1.xcor(), HIT_RANGE) and \
in_range(wpn.ycor(), p1.ycor(), HIT_RANGE):
wpn.hideturtle()
p1_state = 11
if len(life_list1) == 0:
hideturtle()
write("GREEN WON !!!", align="center", font=("Arial", 32, "normal"))
update()
ontimer(objects_move, 30) # 33.3 frame per second
return
if platform.system() == 'Linux':
def find_gamepad():
global gamepads
gamepads = []
for path in list_devices():
# for path in ['/dev/input/event7', '/dev/input/event8']:
dev = InputDevice(path)
if dev.name == "USB Gamepad ":
print(dev.path, dev.name, dev.phys)
gamepads.append(dev)
return gamepads
def do_gamepad():
global gamepads
global p1_state, p2_state # 0:Normal / 1:Turbo / 2:Restore
for d in gamepads:
keys = d.active_keys()
for key in keys:
if key == ecodes.BTN_THUMB: #"A"
if d is gamepads[0]: p1_shoot()
else: p2_shoot()
elif key == ecodes.BTN_THUMB2: #"B"
if d is gamepads[0]: p1_fire()
else: p2_fire()
elif key == ecodes.BTN_PINKIE: #"R-TRIG"
if d is gamepads[0]:
if p1_state == 0: p1_turbo()
else:
if p2_state == 0: p2_turbo()
pad = d.absinfo(ecodes.ABS_X)
if pad.value == 0:
if d is gamepads[0]: p1_turn_left()
else: p2_turn_left()
elif pad.value == 255:
if d is gamepads[0]: p1_turn_right()
else: p2_turn_right()
# schedule for next checking
ontimer(do_gamepad, 100)
else:
def find_gamepad():
return []
def do_gamepad():
pass
def main():
reg_shape_plane("blue", "b_plane_shape")
reg_shape_plane("green", "g_plane_shape")
reg_shape_missle("blue", "b_missile")
reg_shape_missle("green", "g_missile")
for x in range(1, 11):
pic = "pics/expo-%d.gif" % x
register_shape(pic)
global p1, p2, p1_state, p2_state
global blt_list1, blt_list2, misl_list1, misl_list2
global life_list1, life_list2
global gamepads
global last_fire_time1, last_fire_time2
global mtime_list1, mtime_list2
game_over = False
last_fire_time1 = last_fire_time2 = 0
p1_state = 0
p2_state = 0
mtime_list1 = []
mtime_list2 = []
blt_list1 = []
blt_list2 = []
misl_list1 = []
misl_list2 = []
life_list1 = []
life_list2 = []
tracer(False)
p1 = Turtle(visible=False)
p1.shape("b_plane_shape")
p1.up()
p1.goto(-window_width() / 2 + 30, window_height() / 2 - 30)
for i in range(4):
s_id = p1.stamp()
life_list1.append(s_id)
p1.fd(30)
p1.setheading(270)
p1.showturtle()
p2 = Turtle(visible=False)
p2.shape("g_plane_shape")
p2.up()
p2.goto(window_width() / 2 - 30, window_height() / 2 - 30)
p2.setheading(180)
for i in range(4):
id = p2.stamp()
life_list2.append(id)
p2.fd(30)
p2.setheading(270)
p2.showturtle()
onkey(p1_turn_left, "a")
onkey(p1_turn_right, "d")
onkey(p1_shoot, "space")
onkey(p1_fire, "s")
onkey(p1_turbo, "w")
onkey(p2_turn_left, "Left")
onkey(p2_turn_right, "Right")
onkey(p2_shoot, "Return")
onkey(p2_fire, "Down")
onkey(p2_turbo, "Up")
listen()
gamepads = find_gamepad()
if len(gamepads):
do_gamepad()
objects_move()
return "EVENTLOOP"
if __name__ == '__main__':
msg = main()
mainloop()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from colorama import Fore
if __name__ == '__main__':
print(Fore.GREEN + 'Hello World')
|
import unittest
from selenium import webdriver
from bs4 import BeautifulSoup
class seleniumTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.PhantomJS()
def testEle(self):
driver = self.driver
driver.get('http://www.douyu.com/directory/all')
print(driver.title.encode('utf8'))
def tearDown(self):
print 'down'
if __name__ == "__main__":
unittest.main()
|
from Board.Map.Tile import *
from Vector2 import Vector2
class Map:
def __init__(self, game, tiles=None, selectedTile=None):
self.Resolution = game.Settings.Resolution
self.Tiles = tiles if tiles is not None else self.GenerateTiles(game)
self.SelectedTile = selectedTile
def GenerateTiles(self, game):
maxTiles = Vector2(18, 18)
maxLength = min(self.Resolution.X // maxTiles.X, self.Resolution.Y // maxTiles.Y)
maxTileSize = Vector2(maxLength, maxLength)
tiles = []
game.Settings.SetMapSize(Vector2(maxTiles.X * maxLength, maxTiles.X * maxLength))
for X in range(0, maxTiles.X):
row = []
for Y in range(0, maxTiles.Y):
logicTile = game.Logic.Map.GetTile(Vector2(X, Y))
TileType = self.DetermineTileType(logicTile)
row.append(TileType(Vector2(X, Y), maxTileSize, logicTile))
tiles.append(row)
return tiles
@property
def ActiveTile(self):
return self.SelectedTile
def SetActiveTile(self, position: Vector2):
for tile in self.TilesIterator:
tile.Selected = False
if position is None:
self.SelectedTile = None
else:
self.SelectedTile = self.Tiles[position.X][position.Y]
self.SelectedTile.Selected = True
def DetermineTileType(self, logicTile):
import GameLogic.Map
if type(logicTile) is GameLogic.Map.DesertTile:
return DesertTile
elif type(logicTile) is GameLogic.Map.ForestTile:
return ForestTile
elif type(logicTile) is GameLogic.Map.GoldTile:
return GoldTile
elif type(logicTile) is GameLogic.Map.IceTile:
return IceTile
elif type(logicTile) is GameLogic.Map.SeaTile:
return SeaTile
elif type(logicTile) is GameLogic.Map.SwampTile:
return SwampTile
else:
raise Exception("%s type is not sported" % str(type(logicTile)))
@property
def TilesIterator(self):
for row in self.Tiles:
for tile in row:
yield tile
def Update(self, game, onSelectedTileChanged):
isClicked = next((True for tile in self.TilesIterator if tile.IsClickedByMouse(game)), False)
nList = []
for row in self.Tiles:
nRow = []
for tile in row:
newTile = tile.Update(game)
if isClicked:
if newTile.IsClickedByMouse(game):
newTile.Selected = True
self.SelectedTile = newTile
onSelectedTileChanged(newTile.LogicTile)
else:
newTile.Selected = False
elif tile == self.SelectedTile:
self.SelectedTile = newTile
nRow.append(newTile)
nList.append(nRow)
return Map(game, nList, self.SelectedTile)
def Draw(self, game):
for tile in self.TilesIterator:
tile.Draw(game)
# draw the units after al the tiles so you can move above the tiles
for tile in self.TilesIterator:
if tile.Unit is not None:
tile.Unit.Draw(game)
|
#
# Copyright (C) 2020-2030 Thorium Corp FP <help@thoriumcorp.website>
#
from odoo import api, fields, models, modules
class ThoriumcorpPractitioner(models.Model):
_name = 'thoriumcorp.practitioner'
_description = 'Thoriumcorp Practitioner'
_inherit = 'thoriumcorp.abstract_entity'
_sql_constraints = [(
'thoriumcorp_practitioner_unique_code',
'UNIQUE (code)',
'Internal ID must be unique',
)]
thoriumcorp_center_primary_id = fields.Many2one(
string='Primary thoriumcorp center',
comodel_name='medical.center',
)
# thoriumcorp_center_secondary_ids = fields.Many2many(
# string='Secondary thoriumcorp center',
# comodel_name='medical.center',
# )
code = fields.Char(
string='Internal ID',
help='Unique ID for this professional',
required=True,
default=lambda s: s.env['ir.sequence'].next_by_code(s._name + '.code'),
)
role_ids = fields.Many2many(
string='Roles',
comodel_name='thoriumcorp.role',
)
practitioner_type = fields.Selection(
[
('internal', 'Internal Entity'),
('external', 'External Entity')
],
string='Entity Type',
)
specialty_id = fields.Many2one(
string="Main specialty",
comodel_name='thoriumcorp.specialty',
)
specialty_ids = fields.Many2many(
string='Other specialties',
comodel_name='thoriumcorp.specialty'
)
info = fields.Text(string='Extra info')
@api.model
def _get_default_image_path(self, vals):
res = super(ThoriumcorpPractitioner, self)._get_default_image_path(vals)
if res:
return res
practitioner_gender = vals.get('gender', 'male')
if practitioner_gender == 'other':
practitioner_gender = 'male'
image_path = modules.get_module_resource(
'thoriumcorp_practitioner',
'static/src/img',
'practitioner-%s-avatar.png' % practitioner_gender,
)
return image_path
#class ThoriumcorpPatientDisease(models.Model):
# _name = 'thoriumcorp.patient_disease'
# _inherit = 'thoriumcorp.patient_disease'
#
# practitioner_id = fields.Many2one(
# comodel_name='thoriumcorp.practitioner',
# string='Physician', index=True
# )
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-26 23:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('name', models.CharField(max_length=155)),
('phone', models.CharField(blank=True, max_length=25, null=True)),
('url', models.URLField(blank=True, null=True)),
('date_create', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=55)),
('first_name', models.CharField(max_length=55)),
('last_name', models.CharField(max_length=55)),
('date_create', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to='client.Company')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=155)),
('description', models.TextField(blank=True, null=True)),
('date_create', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='client.Company')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=155)),
('date_create', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_tasks', to='client.Task')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='client.Project')),
],
),
]
|
import os
import pickle
from neural_model import NeuralModel
from sklearn.decomposition import PCA
from util.neuron_metadata import NeuronMetadataCollection
from util.analysis_util import *
neuron_metadata_collection = NeuronMetadataCollection.load_from_chem_json('data/chem.json')
# Make all the param values be exactly formatted for caching.
def adjust_param(val):
return float('%.3f' % val)
def get_cache_file_path(C, Gc, ggap, gsyn):
return "cached_notebook_results/cached_simulation_C={0}_Gc={1}_ggap={2}_gsyn={3}".format(C, Gc, ggap, gsyn)
def simulate_until_stable(C, Gc, ggap, gsyn,
min_n_timesteps = 1000,
max_n_timesteps = 50000,
n_timesteps_convergence_check = 1000,
max_amplitude_raw_diff = 1,
max_amplitude_scaled_diff = 0.05,
debug = True):
"""
See simulate().
The difference is we can make multiple simulate() calls if the amplitude
hasn't converged yet.
See util/analysis_util.py > get_amplitude_convergence.
"""
# Check if cached result exists
C = adjust_param(C)
Gc = adjust_param(Gc)
ggap = adjust_param(ggap)
gsyn = adjust_param(gsyn)
cache_file = get_cache_file_path(C, Gc, ggap, gsyn)
if os.path.isfile(cache_file):
print("Loading saved results from pickle file {}".format(cache_file))
with open(cache_file, "rb") as f:
return pickle.load(f)
# If cached result doesn't exist, compute
n_timesteps = min_n_timesteps
increment = n_timesteps_convergence_check
all_dynamics = None
while(True):
all_dynamics = simulate(C, Gc, ggap, gsyn, n_timesteps)
last_dynamics = all_dynamics[n_timesteps - n_timesteps_convergence_check:,:]
pca = PCA(n_components = 1)
projected_X = pca.fit_transform(last_dynamics)
# Check amplitude convergence of top PC.
amplitude_diff_raw, amplitude_diff_scaled = get_amplitude_differences(projected_X[:,0])
amplitude = get_amplitude(projected_X[:,0])
if debug:
print(("Simulation length {0:.2f}, raw amplitude diff {1:.2f}," +
" scaled amplitude diff {2:.2f}, amplitude {2:.2f}")
.format(n_timesteps, amplitude_diff_raw, amplitude_diff_scaled, amplitude))
# Define convergence as when amplitude difference of two continguous time chunks is small enough.
# Small-amplitude is needed to detect a focus, where the amplitude just keeps getting smaller.
# Raw diff needed to catch stable focus, where amplitude keeps getting smaller.
# Normalized diff needed to catch limit cycles with large amplitudes, but model has roundoff errors.
if (amplitude_diff_raw < max_amplitude_raw_diff
or amplitude_diff_scaled < max_amplitude_scaled_diff):
break
else:
n_timesteps += increment
# Binary search for smallest simulation length to reach convergence
# TODO: A better way is to place this convergence logic in neural_model.py,
# so we don't restart the simulation.
increment *= 2
if n_timesteps > max_n_timesteps:
print("n_timesteps {} is too high! We give up on convergence :(".format(n_timesteps))
break
# Update cache
with open(cache_file, "wb") as f:
pickle.dump(all_dynamics, f)
return all_dynamics
def simulate(C, Gc, ggap, gsyn,
n_timesteps):
"""
Runs a standard simulation of NeuralModel with the given parameter values
Note this function does not specify seed, it lets the model use a random seed
Args:
C - cell membrane capacitance pF / 100 = arb
Gc - cell membrane conductance pS / 100 = arb
ggap - global gap junction conductance pS / 100 = arb
gsyn - global synaptic conductance pS / 100 = arb
n_timesteps - how long to run the model for
Returns:
fwd_dynamics (n_timesteps - 300 x n_neurons) - matrix of normalized membrane potential time series for
all neurons.
"""
# initialize model
model = NeuralModel(neuron_metadata_collection, C, Gc, ggap, gsyn)
model.set_current_injection("AVBL", 2.3)
model.set_current_injection("AVBR", 2.3)
model.set_current_injection("PLML", 1.4)
model.set_current_injection("PLMR", 1.4)
model.init()
# simulate
(v_mat, s_mat, v_normalized_mat) = model.run(n_timesteps)
return v_normalized_mat
|
#coding:utf-8
#输入参数生成一个URL库的list
class urlku(object):
def ulrs(self,numb):
url_ku = []
url_zu = "http://www.tmsf.com/newhouse/property_searchall.htm?keytype=1&searchkeyword=&keyword=&sid=&districtid=&areaid=&dealprice=&propertystate=&propertytype=&ordertype=&priceorder=&openorder=&view720data=&page="
for u in range(1,numb+1):
url_new = url_zu +str(u) + "&bbs="
url_ku.append(url_new)
return url_ku |
import exceptions
class UnitsError(exceptions.Exception):
pass
class Units(object):
byte=0
g_byte=1
m_byte=2
k_byte=3
t_byte=4
second=5
percentage=6
kB = 7
def __init__(self):
self.units_types = {
'byte':[Units.byte,
Units.g_byte,
Units.m_byte,
Units.k_byte,
Units.t_byte,
Units.kB],
'time':[Units.second],
'other':[Units.percentage]
}
self.string_units = {
Units.byte: 'byte',
Units.g_byte: 'GB',
Units.m_byte: 'MB',
Units.k_byte: 'KB',
Units.t_byte: 'TB',
Units.second: 'second',
Units.percentage: '%',
Units.kB: 'kB'
}
def is_unit(self, unit_type):
if int(unit_type) in self.string_units.keys():
return True
return False
def get_units(self):
units = []
for x in self.units_types.values():
units.extend(x)
return units
def get_string_units(self):
return self.string_units
def get_string_unit(self, unit_type):
if self.is_unit(unit_type):
return self.string_units[unit_type]
else:
msg = 'The specified unit %s is wrong' % str(unit_type)
raise UnitsError(msg)
|
import json
import boto3
import uuid
def lambda_handler(event, context):
body = event["body"]
response = send_message(body)
return {
"statusCode": 200,
"body": json.dumps({
"message_id": response['MessageId'],
"event": body,
}),
}
def get_queue():
sqs = boto3.resource('sqs')
return sqs.get_queue_by_name(QueueName='newton_sqs.fifo')
def send_message(body):
queue = get_queue()
return queue.send_message(MessageBody=body, MessageGroupId=str(uuid.uuid4()))
|
# Generated by Django 3.1.6 on 2021-02-19 11:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_auto_20210207_1523'),
('purchase_request', '0004_sitespurchaserequest_month'),
]
operations = [
migrations.AlterField(
model_name='mainpurchaserequest',
name='site_name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='mainorder_rev', to='sites.site'),
),
migrations.AlterField(
model_name='sitespurchaserequest',
name='site_name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='siteorder_rev', to='sites.site'),
),
]
|
import matplotlib.pyplot as plt
from matplotlib import path
import numpy as np
from scipy.optimize import least_squares
import matplotlib.patches as patches
import cv2
import hylite
from hylite.reference.features import HyFeature
from hylite import HyData
from hylite.project import pix_to_ray_pano, pix_to_ray_persp
class Panel( HyData ):
"""
A class for identifying calibration reference in images and storing
the observed pixel radiances and known target reflectance values. This is used
by, e.g., empirical line calibration procedures.
"""
def __init__(self, material, radiance, **kwds):
"""
Generic constructor. Can be of the following forms:
*Arguments*:
- material = a hylite.reference.Target instance containing target reflectance data for this panel.
- radiance = either a HyImage object (which contains some reference pixels) or a
NxM numpy array containing radiance values for N pixels across M bands.
*Keywords*:
- wavelengths = wavelengths corresponding to the radiance values (if radiance is an array rather than a HyImage
object).
- method = 'manual' (default) to manually pick reference in image (using an interactive plot). Can also be
'sobel' or 'laplace' to use the corresponding edge detectors to automatically identify the
calibration target (using OpenCV contouring).
- bands = list of band indices (integer) or wavelengths (float) to use when selecting the target.
- edge_thresh = the edge threshold (as percentile of gradient values) used when automatically identifying reference.
Default is 95.
- area_thresh = the minimum area (in pixels) of candidate panels. Used to discard small/bright areas. Default is 100.
- shrink = a shrink factor to reduce the size of reference identified automatically (and so remove dodgy pixels
near the target edge/frame. Default is 0.4.
- db = If True, visualisation of the edge detection layers will be plotted for debug purposes. Default is false.
"""
super().__init__(None) # initialise header etc.
self.source_image = None # init defaults
self.outline = None
self.normal = None
if isinstance(radiance, np.ndarray): # radiance is a list of pixels
# check and copy radiance data
if len(radiance.shape) == 1: # we've been given mean radiances
radiance = radiance[np.newaxis, :]
assert len(radiance.shape) == 2, "Error, radiance must be a 2-D array (N pixels by M bands)."
self.data = radiance.copy()
# check and copy wavelength data
assert 'wavelengths' in kwds, "Error - wavelengths must be provided for pixel array."
self.set_wavelengths(np.array(kwds["wavelengths"]))
elif radiance.is_image(): # radiance is a hyimage
self.source_image = radiance # store reference to original image
method = kwds.get("method", 'manual') # what method to use to pick target?
bands = kwds.get("bands", 428.0)
# select target region
if 'manual' in method.lower(): # pick region using interactive plot
verts = radiance.pickPolygons(region_names=['Target'], bands=bands)[0]
verts = np.vstack([verts, verts[0][None, :]]) # add return to first point
self.outline = path.Path(verts) # make matplotlib path from selected region
else:
db = kwds.get('db', False) # draw plots?
# calculate greyscale image
if isinstance(bands, tuple) or isinstance(bands, list):
bands = [radiance.get_band_index(b) for b in bands] # convert to indices
gray = np.sum(radiance.data[:, :, bands], axis=2) / np.nanmax(radiance.data[:, :, bands])
else:
bands = radiance.get_band_index(bands)
gray = radiance.data[:, :, bands] / np.nanmax(radiance.data[:, :, bands])
gray = cv2.GaussianBlur(gray, (3, 3), 0) # apply slight blur to improve edge detection
if db:
plt.figure(figsize=(20, 10))
plt.imshow(gray.T, cmap='gray')
plt.title("Greyscale")
plt.show()
# extract edges
if 'sobel' in method.lower() or 'auto' in method.lower(): # pick edges using sobel filter
sobelx = cv2.Sobel(gray.astype(np.float32), cv2.CV_64F, 1, 0, ksize=5) # x
sobely = cv2.Sobel(gray.astype(np.float32), cv2.CV_64F, 0, 1, ksize=5) # y
sobel = np.sqrt(sobelx ** 2 + sobely ** 2)
thresh = np.nanpercentile(sobel, kwds.get('edge_thresh', 95))
edge = sobel > thresh
elif 'laplace' in method.lower(): # pick edges using laplace filter
laplacian = cv2.Laplacian(gray.astype(np.float32), cv2.CV_32F)
thresh = np.nanpercentile(laplacian, kwds.get('edge_thresh', 95))
edge = laplacian > thresh
else:
assert False, "Error - %s is not a recognised extraction method. Try 'sobel' or 'laplace'." % method
if db:
plt.figure(figsize=(20, 10))
plt.imshow(edge.T)
plt.title("Edge")
plt.show()
# contour and find object contours
_, threshold = cv2.threshold(edge.astype(np.uint8) * 254, 240, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find brightest quadrilateral with area above threshold
maxCtr = None
maxBright = -1
area_thresh = kwds.get("area_thresh", 100)
for cnt in contours:
# simplify/approximate contour
approx = cv2.approxPolyDP(cnt, 0.1 * cv2.arcLength(cnt, True), True)
# is it a quadrilateral?
if approx.shape[0] == 4:
# calculate area
area = cv2.contourArea(approx)
if area > area_thresh: # large enough?
# calculate approx brightness (by summing bounding box)
verts = np.array([approx[:, 0, 1], approx[:, 0, 0]]).T
xmin, xmax = np.min(verts[..., 0]), np.max(verts[..., 0])
ymin, ymax = np.min(verts[..., 1]), np.max(verts[..., 1])
patch = gray[xmin:xmax, ymin:ymax]
bright = np.nanmedian(patch)
if db:
plt.imshow(gray.T, cmap='gray')
plt.title("Candidate panel")
plt.axvline(xmin, color='r')
plt.axvline(xmax, color='r')
plt.axhline(ymin, color='r')
plt.axhline(ymax, color='r')
plt.show()
# store?
if maxBright < bright:
maxCtr = approx
maxBright = bright
# convert to maplotlib path
verts = np.array([maxCtr[:, 0, 1], maxCtr[:, 0, 0]]).T
verts = np.vstack([verts, verts[0][None, :]]) # add return to first point
self.outline = path.Path(verts, closed=True) # make matplotlib path from selected region
# shrink to 40% of original size (to remove frame and dodgy edge pixels)
centroid = np.mean(self.outline.vertices[1::, :], axis=0)
verts = kwds.get("shrink", 0.4) * (self.outline.vertices - centroid) + centroid
self.outline = path.Path(verts, closed=True)
# calculate pixels within selected region
xx, yy = np.meshgrid(np.arange(radiance.xdim()), np.arange(radiance.ydim()))
xx = xx.flatten()
yy = yy.flatten()
points = np.vstack([xx, yy]).T # coordinates of each pixel
mask = self.outline.contains_points(points) # identify points within path
mask = mask.reshape((radiance.ydim(), radiance.xdim())).T # reshape to pixel mask
# extract pixel reflectance values based on this mask
self.data = np.array([radiance.data[:, :, b][mask] for b in range(radiance.band_count())]).T
# also copy across wavelength info
assert radiance.has_wavelengths(), "Error - radiance image must have wavelength information."
self.set_wavelengths(radiance.get_wavelengths()) # get wavelength data
else:
assert False, "Error: radiance argument must be a HyImage instance or a numpy array of pixels."
# if we have lots of target pixels (we don't need that many), only keep top 50% [as darker ones likely result from
# dodgy border effects
if self.data.shape[0] > 30:
brightness = np.nanmean(self.data, axis=1)
mask = brightness > np.nanpercentile(brightness, 50)
self.data = self.data[mask, :]
# extract reflectance data from target
target_bands = material.get_wavelengths()
assert np.nanmin(target_bands) <= np.nanmin(
self.get_wavelengths()), "Error - calibration range does not cover pixel range. " \
"Radiance data starts at %.1f nm but calibration data starts %.1f nm." % (
np.nanmin(self.get_wavelengths()), np.nanmin(target_bands))
assert np.nanmax(target_bands) >= np.nanmax(
self.get_wavelengths()), "Error - calibration range does not cover pixel range. " \
"Radiance data ends at %.1f nm but calibration data ends %.1f nm." % (
np.nanmax(self.get_wavelengths()), np.nanmax(target_bands))
idx = [np.argmin(np.abs(target_bands - w)) for w in self.get_wavelengths()] # matching wavelengths
self.reflectance = material.get_reflectance()[idx]
self.material = material
def copy(self):
"""
Make a deep copy of this panel instance.
*Returns*
- a new Panel instance.
"""
return Panel( self.material, self.data, wavelengths=self.get_wavelengths() )
def get_mean_radiance(self):
"""
Calculate and return the mean radiance for each band of all the pixels in this calibration region.
"""
return np.nanmean(self.data, axis=0)
def get_reflectance(self):
"""
Get the known (reference) reflectance of this panel.
"""
return self.reflectance
def get_normal(self, cam=None, recalc=False):
"""
Get the normal vector of this panel by assuming its outline is square (prior to projection onto the camera).
*Arguments*:
- cam = a Camera object describing the pose of the camera from which the panel is viewed. Default is None (to retrieve
previously stored normals)
- recalc = True if a precomputed (or otherwise defined) normal vector should be recalculated. Default is False.
*Returns*:
- norm = the normal vector of the panel (in world coordinates). This is also stored as self.normal.
"""
if recalc or self.normal is None:
# check outline is available
assert cam is not None, "Error - normal vector not previously defined. Please specify a camera object (cam) to estimate normal."
assert self.outline is not None, "Error - self.outline must contain four points to estimate this panels normal vector..."
# get corners of panel and convert to rays
corners = np.array([self.outline.vertices[i, :] for i in range(4)])
if cam.is_panoramic():
ray1 = pix_to_ray_pano(corners[0, 0], corners[0, 1], cam.fov, cam.step, cam.dims)
ray2 = pix_to_ray_pano(corners[1, 0], corners[1, 1], cam.fov, cam.step, cam.dims)
ray3 = pix_to_ray_pano(corners[2, 0], corners[2, 1], cam.fov, cam.step, cam.dims)
ray4 = pix_to_ray_pano(corners[3, 0], corners[3, 1], cam.fov, cam.step, cam.dims)
else:
ray1 = pix_to_ray_persp(corners[0, 0], corners[0, 1], cam.fov, cam.dims)
ray2 = pix_to_ray_persp(corners[1, 0], corners[1, 1], cam.fov, cam.dims)
ray3 = pix_to_ray_persp(corners[2, 0], corners[2, 1], cam.fov, cam.dims)
ray4 = pix_to_ray_persp(corners[3, 0], corners[3, 1], cam.fov, cam.dims)
a = 1.0 # length of each square (in arbitrary coordinates)
h = np.sqrt(2) # length of hypot relative to sides
def opt(x, sol=False):
# get test depths
z1, z2, z3, z4 = x
# calculate edge coordinates
A = ray1 * z1
B = ray2 * z2
C = ray3 * z3
D = ray4 * z4
# and errors with edge lengths
AB = np.linalg.norm(B - A)
BC = np.linalg.norm(C - B)
CD = np.linalg.norm(D - C)
DA = np.linalg.norm(A - D)
AC = np.linalg.norm(C - A)
BD = np.linalg.norm(D - B)
if not sol:
return [AB - a, BC - a, CD - a, DA - a, AC - h, BD - h] # return for optimiser
else: # return solution (normal vector)
AB = (B - A) / AB
BC = (C - B) / BC
return np.cross(AB, BC)
# get normal vector in camera coords
sol = least_squares(opt, (10.0, 10.0, 10.0, 10.0))
norm = opt(sol.x, sol=True)
# rotate to world coords
norm = np.dot(cam.get_rotation_matrix(), norm)
self.set_normal(norm)
return self.normal
def set_normal(self, n ):
"""
Set panel normal vector to a known vector.
*Arguments*:
- n = a (3,) numpy array containing the normal vector in world coordinates.
"""
if n is None:
self.normal = None # remove normal
else:
assert len(n) == 3, "Error - n must be a (3,) normal vector."
self.normal = np.array(n) / np.linalg.norm(n) # enforce n has length 1.0
if self.normal[2] < 0:
self.normal *= -1 # panel always points upwards
def get_skyview(self, hori_elev=0.0, up=np.array([0, 0, 1])):
"""
Get this panels skyview factor. Normal vector must be defined, otherwise an error will be thrown.
*Arguments*:
- hori_elev = the angle from the panel to the horizon (perpendicular to the panel's orientation) in degrees.
Used to reduce the sky view factor if the panel is below the horizon (e.g. in an open pit mine).
Default is 0.0 (i.e. assume a flat horizon). Can also be negative if sky is visible below the
(flat) horizon.
- up = the vertical (up) vector. Default is [0,0,1].
*Returns*:
- this panels sky view factor (assuming the panel is relatively unoccluded and the horizon is flat).
"""
# proportion of sky visible assuming horizontal horizon
s = (np.pi - np.arccos(np.dot(up, self.normal))) / np.pi
# adjust according to hori_elev [ and enforce range from 0 - 1.0
return min(max(0, s - (np.deg2rad(hori_elev) / np.pi)), 1.0)
def get_alpha(self, illudir):
"""
Return the reflected light fraction of this panel based on the specified illumination direction using
Lamberts' cosine law.
"""
assert len(illudir) == 3, "Error - illudir must be a (3,) numpy array."
if illudir[2] > 0: # check illudir is pointing downwards
illudir = illudir * -1
return max( 0, np.dot( -self.normal, illudir ) )
def quick_plot(self, bands=hylite.RGB, **kwds):
"""
Quickly plot the outline of this calibration target for quality checking etc.
*Arguments*:
- bands = the image bands to plot as a preview. Default is io.HyImage.RGB.
*Keywords*:
- keywords are passed to HyData.plot_spectra( ... ).
"""
if self.source_image is not None: # plot including preview of panel
fig, ax = plt.subplots(1, 2, figsize=(15, 5), gridspec_kw={'width_ratios': [1, 3]})
# plot base image
self.source_image.quick_plot(bands, ax=ax[0])
ax[0].set_xticks([])
ax[0].set_yticks([])
# plot target on image and set extents
patch = patches.PathPatch(self.outline, edgecolor='orange', fill=False, lw=2)
bbox = self.outline.get_extents()
padx = (bbox.max[0] - bbox.min[0]) * 1.5
pady = (bbox.max[1] - bbox.min[1]) * 1.5
ax[0].add_patch(patch)
ax[0].set_xlim(bbox.min[0] - padx, bbox.max[0] + padx)
ax[0].set_ylim(bbox.max[1] + pady, bbox.min[1] - pady)
# plot spectra
kwds['labels'] = kwds.get('labels', HyFeature.Themes.ATMOSPHERE)
self.plot_spectra( ax=ax[1], **kwds )
else: # no image data, just plot spectra
kwds['labels'] = kwds.get('labels', HyFeature.Themes.ATMOSPHERE)
fig, ax = self.plot_spectra(**kwds)
ax.set_ylabel('Downwelling Radiance')
return fig, ax
def plot_ratio(self, ax = None):
"""
Plots the ratio between known reflectance and observed radiance for each band in this target.
*Arguments*:
- ax = the axes to plot on. If None (default) then a new axes is created.
*Returns*:
-fig, ax = the figure and axes objects containing the plot.
"""
if ax is None:
fig, ax = plt.subplots(figsize=(15, 10))
# calculate ratio
ratio = self.get_mean_radiance() / self.get_reflectance()
# plot
ax.plot( self.get_wavelengths(), ratio )
ax.set_ylabel("radiance / reflectance" )
ax.set_xlabel("Wavelength (nm)")
return fig, ax
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from spyro.utils import progress
from spyro.value_estimation import STATION_NAMES
try:
from fdsim.helpers import lonlat_to_xy
except:
progress("fdsim not installed, some functions might not work.")
try:
import geopandas as gpd
except:
progress('geopandas not installed, some functions might not work.')
def set_sns_params(font_scale=1.2, **kwargs):
sns.set(font_scale=font_scale, **kwargs)
def quantile_range(num_quantiles=50):
"""Generate evenly spaced values in (0, 1) that can be used as quantile-positions.
Parameters
----------
num_quantiles: int, default=50
The number of quantile-positions to generate.
"""
return np.arange(0.5 * (1. / num_quantiles), 1, 1. / num_quantiles)
def get_table_quantiles(table, num_quantiles=51, inner_key="responses"):
"""Calculate quantiles over the simulated responses.
Parameters
----------
table: dict
The table containing all possible states as keys and a dictionary of results
as values.
num_quantiles: int, default=51
The number of quantiles to compute. Quantiles will be evenly spread over the
interval (0, 1), e.g., when num_quantiles=5, will use q=[0.1, 0.3, 0.5, 0.7, 0.9].
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array to compute
quantiles over.
"""
taus = quantile_range(num_quantiles)
quantile_table = {}
for key in table.keys():
quantile_table[key] = np.quantile(table[key][inner_key], taus)
return quantile_table
def get_reachable_states(table, state):
"""Filter a table to keep only the states are reachable from the current state.
Parameters
----------
table: dict
The table containing all possible states. Must have states as keys.
state: tuple, array
The current state.
"""
n = sum(state)
return {key: value for key, value in table.items() if sum(key) == n}
def get_num_relocations(state1, state2):
"""Get the total number of relocations necessary to get from one state to the other.
Parameters
----------
state1, state2: tuple, array
The two states to transition between.
Returns
-------
num_relocations: int
The number of relocations.
"""
return np.maximum(np.array(state1) - np.array(state2), 0).sum()
def group_states_by_num_relocations(table, state, max_relocs=None):
"""Organze a table of reachable states by the number of relocations that is
required to reach that state.
Parameters
----------
table: dict
A table with states as keys. Results are only valid if the table
contains only states that are reachable from the current state; i.e.,
sums of the states must be the same.
state: tuple, array
The current state.
max_relocs: int, default=None
The maximum number of relocations to consider. Leaves any states requiring
a higher number out of the results.
Returns
-------
table_dict: dict
A dictionary with integers representing the required number of relocations
as keys and the part of the original table corresponding to this number
as values.
"""
nums = [get_num_relocations(key, state) for key in table.keys()]
if max_relocs is None:
max_relocs = max(nums)
tables_dict = {n: {} for n in range(1, max_relocs + 1)}
for i, (key, value) in enumerate(table.items()):
try:
tables_dict[nums[i]][key] = value
except KeyError:
pass
return tables_dict
def get_state_expectations(table, inner_key=None, std=False):
"""Find the state with the highest or lowest expectation.
Parameters
----------
table: dict
A table with states as keys.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
"""
def f(arr):
return np.mean(arr), np.std(arr)
func = f if std else np.mean
if inner_key is None:
expected_table = {k: func(v) for k, v in table.items()}
else:
expected_table = {k: func(v[inner_key]) for k, v in table.items()}
return expected_table
def get_state_with_best_expectation(table, inner_key=None, minimum=True):
"""Find the state with the highest or lowest expectation.
Parameters
----------
table: dict
A table with states as keys.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
minimum: bool, default=True
If true, finds the state with the minimal expectation, rather than the
maximum.
Returns
-------
state: tuple
The best state in expectation.
values: any
The results corresponding to the best state. Exact contents depend on
inputs. Usually this will be a dictionary with multiple arrays, or
otherwise an array.
"""
arg_best = np.argmin if minimum else np.argmax
expected_table = get_state_expectations(table, inner_key=inner_key)
best_key = list(table.keys())[arg_best(list(expected_table.values()))]
return best_key, table[best_key]
def get_best_states_by_num_relocations(table, state, to_quantiles=True, inner_key="responses",
minimum=True, num_quantiles=50, max_relocs=None):
"""Find the best state that we can reach from the current state by different
number of relocations.
Parameters
----------
table: dict
A table with states as keys.
state: tuple, array
The current state.
minimum: bool, default=True
If true, finds the state with the minimal expectation, rather than the
maximum.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
"""
reachable_table = get_reachable_states(table, state)
if to_quantiles:
reachable_table = get_table_quantiles(reachable_table, num_quantiles=num_quantiles, inner_key=inner_key)
inner_key = None
tables_by_relocs = group_states_by_num_relocations(reachable_table, state, max_relocs=max_relocs)
results = {}
for k, ktable in tables_by_relocs.items():
state, values = get_state_with_best_expectation(ktable, inner_key=inner_key, minimum=minimum)
results[k] = {"state": state, "values": values, "num_relocs": k}
return results
def augment_quantile_data(quantiles, values, num_points=1000):
"""Generate data points based on quantile positions and values, so that a dense distribution
can be plotted based on them.
Parameters
----------
quantiles: array-like
The quantile-positions.
values: array-like
The values of the quantile-positions in `quantiles`. Must be of the same length.
num_points: int
The number of data points to generate. More points results in a more dense distribution
but may be more computationally expensive in subsequent tasks (e.g., plotting).
Returns
-------
data: array-like
A generated array with data points, simulated by interpolating between the provided
quantiles.
"""
return np.interp(np.arange(0, np.max(quantiles), 1 / num_points), quantiles, values)
def quantile_kde_plot(y, *args, **kwargs):
"""plot a kernel density plot based on quantile values.
Parameters
----------
y: array-like
The values of the quantiles. Entries are assumed to refer to the values of equally
spaced quantiles, e.g., [0.1, 0.2, 0.3, ...], and not [0.1, 0.2, 0.35, 0.40, ...].
The positions of the quantiles are then derived from the length of the array.
"""
qs = quantile_range(len(y))
y_aug = augment_quantile_data(qs, y)
return sns.kdeplot(y_aug, *args, **kwargs)
def ridge_plot_response_distributions(data, row_col, value_col, quantile_input=False, clip=None,
title="Response time improvement by number of relocations"):
"""Make a Ridge Plot of response time distributions for different situations.
Parameters
----------
data: pd.DataFrame
The data to plot.
row_col, value_col: str
The columns to split by and to plot the distributions of respectively.
"""
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Initialize the FacetGrid object
pal = sns.cubehelix_palette(10, light=.7)
g = sns.FacetGrid(data, row=row_col, hue=row_col, aspect=6, height=1.5, palette=pal)
# Draw the densities in a few steps
if quantile_input:
g.map(quantile_kde_plot, value_col, clip_on=False, shade=True, alpha=1., lw=1.5, bw=.2, gridsize=50, clip=clip)
g.map(quantile_kde_plot, value_col, clip_on=False, color="w", lw=1.5, bw=.2, gridsize=50, clip=clip)
else:
g.map(sns.kdeplot, value_col, clip_on=False, shade=True, alpha=1., lw=1.5, bw=.2, gridsize=50, clip=clip)
g.map(sns.kdeplot, value_col, clip_on=False, color="w", lw=1.5, bw=.2, gridsize=50, clip=clip)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", size=16, transform=ax.transAxes)
g.map(label, value_col)
# Set the subplots to overlap
g.fig.tight_layout()
g.fig.subplots_adjust(top=1.02, hspace=-.45)
# Remove axes details that don't play well with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
g.fig.suptitle(title, weight="bold", size=18)
return g.fig
def plot_best_reachable_states(table, state, quantile_input=False, to_quantiles=False,
inner_key="responses", minimum=True, num_quantiles=50,
max_relocs=None, value_name="response times", clip=None):
"""Find the best state that we can reach from the current state by different
number of relocations.
Parameters
----------
table: dict
A table with states as keys.
state: tuple, array
The current state.
minimum: bool, default=True
If true, finds the state with the minimal expectation, rather than the
maximum.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
max_relocs: int, default=None
The maximum number relocations to consider.
"""
if quantile_input:
to_quantiles = False
inner_key = None
best_states_per_reloc = get_best_states_by_num_relocations(
table, state, to_quantiles=to_quantiles, inner_key=inner_key,
minimum=minimum, num_quantiles=num_quantiles, max_relocs=max_relocs
)
if to_quantiles or quantile_input:
df = pd.concat([pd.DataFrame({"relocations": k, value_name: v["values"]})
for k, v in best_states_per_reloc.items()],
axis=0)
else:
df = pd.concat([pd.DataFrame({"relocations": k, value_name: v["values"][inner_key]})
for k, v in best_states_per_reloc.items()],
axis=0)
return ridge_plot_response_distributions(df, row_col="relocations", value_col=value_name,
quantile_input=to_quantiles or quantile_input,
title="Response time improvement by number of relocations")
def group_states_by_vehicle_count(table):
"""Organize all states by their total vehicle count.
Parameter
---------
table: dict
A table with states as keys.
"""
nums = [sum(key) for key in table.keys()]
tables_dict = {n: {} for n in range(max(nums) + 1)}
for i, (key, value) in enumerate(table.items()):
tables_dict[nums[i]][key] = value
return tables_dict
def get_best_state_by_vehicle_count(table, inner_key="responses", minimum=True):
"""Find the best state per available number of vehicles.
Parameters
----------
table: dict
A table with states as keys.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
minimum: bool, default=True
If true, finds the state with the minimal expectation, rather than the
maximum.
"""
tables_by_count = group_states_by_vehicle_count(table)
best_states = {}
for k, ktable in tables_by_count.items():
if ktable: # checks if dict is empty (usually the case for all-zero state)
best_state, value = get_state_with_best_expectation(ktable, inner_key=inner_key)
best_states[k] = {"state": best_state, "data": value}
return best_states
def get_top_n_states(table, n=10, inner_key=None, minimum=True):
"""Retrieve the data for the top n best states.
Parameters
----------
table: dict
A table with states as keys.
"""
means = get_state_expectations(table, inner_key=inner_key, std=False)
df = pd.DataFrame({"state": list(means.keys()), "mean": list(means.values())})
df.sort_values("mean", ascending=True if minimum else False, inplace=True)
return {k: table[k] for k in df["state"].iloc[0:n]}
def get_top_n_states_by_vehicle_count(table, n=10, inner_key="responses", minimum=True):
"""Find the best n states per available number of vehicles.
Parameters
----------
table: dict
A table with states as keys.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
minimum: bool, default=True
If true, finds the state with the minimal expectation, rather than the
maximum.
"""
tables_by_count = group_states_by_vehicle_count(table)
best_states = {}
for k, ktable in tables_by_count.items():
if ktable: # checks if dict is empty (usually the case for all-zero state)
top_n_dict = get_top_n_states(ktable, inner_key=inner_key, minimum=minimum)
best_states[k] = top_n_dict
return best_states
def table_to_df(table, inner_key=None, add_rank=False, state_as_index=True):
"""Convert a dictionary-structured table into a Pandas DataFrame.
Parameters
----------
table: dict
The table to convert.
inner_key: str, default=None
The key of the inner results dictionary that points to the array with values.
Returns
-------
df: pd.DataFrame
The resulting data frame.
"""
ix = pd.Index([tuple(key) for key in table.keys()], dtype=tuple)
if inner_key is None:
df = pd.DataFrame([v for v in table.values()], index=ix)
else:
df = pd.DataFrame([v[inner_key] for v in table.values()], index=ix)
if add_rank:
df["rank"] = np.arange(1, len(df) + 1)
df.index.name = "state"
if not state_as_index:
df = df.reset_index(drop=False)
return df
def nested_table_to_df(table_dict, inner_key=None, outer_name="number of vehicles", add_rank=True):
"""Convert a nested table-dictionary to a DataFrame.
Parameters
----------
table_dict: dict
The nested dictionary.
inner_key: str, default="responses"
The key of the inner results dictionary that points to the array with values.
outer_name: str, default='number of vehicles'
The new name of the column corresponding to the outer-most keys in the dictionary.
Returns
-------
df: pd.DataFrame
The results as a DataFrame with the states (2nd level keys) as index, a column referring
to the first level keys and columns corresponding to the values.
"""
outer_keys = list(table_dict.keys())
dfs = [table_to_df(tab, inner_key=inner_key, add_rank=add_rank) for tab in table_dict.values()]
for i in range(len(outer_keys)):
dfs[i][outer_name] = outer_keys[i]
df = pd.concat(dfs, axis=0)
return df
def get_station_occurences_from_grouped_table(table_dict, inner_key=None,
outer_name="total vehicles",
station_names=None, to_long=True):
"""Extract the number of times a station is occupied in the states in a grouped table,
e.g., in a table of top 10 states grouped by the total number of vehicles.
Parameters
----------
table_dict: dict
The nested dictionary of tables.
inner_key: any, default=None
Key to the values if the tables are nested as well.
outer_name: str, default="total vehicles"
How to call the keys of the outer-most dictionary.
station_names: iterable, default=None
Names of the stations in the order of the states. If None, dummy names are provided.
to_long: bool, default=True
Whether to create a column 'station' (True) or to keep each station as a column
(False).
Returns
-------
df: pd.DataFrame
The resulting DataFrame.
"""
df_table = nested_table_to_df(table_dict, outer_name=outer_name, add_rank=True)
states = np.array([list(v) for v in df_table.index.values])
if station_names is None:
station_names = ["station_{}".format(i) for i in range(len(states[0]))]
df_states = pd.DataFrame(states, columns=station_names)
df = df_states
df[outer_name] = df_table[outer_name].values
df["rank"] = df_table["rank"].values
if to_long:
df = pd.melt(df, [outer_name, 'rank']).rename(columns={'variable': 'station', 'value': 'vehicles'})
return df
def plot_heatmap_of_vehicle_positions(data, x="station", y="total vehicles", values="vehicles",
ax=None, title="Station occupancy among best 10 states",
max_y=16, *args, **kwargs):
"""Plot a heatmap of how often stations are occupied in the states in the data.
Parameters
----------
data: pd.DataFrame
The data to plot. Designed for the output of
`get_station_occurences_from_grouped_table`.
x, y, values: str
Columns to use at the x and y axes and as values in the heatmap.
ax: matplotlib.Axis, default=None
Axis to plot on. If None, creates new.
title: str, default='Station occupancy among best 10 states'
The title of the plot.
max_y: int, default=16
The maximum number of vehicles to plot.
*args, **kwargs: any
Parameters of `sns.heatmap`.
Returns
-------
fig: matplotlib.pyplot.figure
The heatmap.
"""
sns.set(font_scale=1.6)
fig, ax = plt.subplots(figsize=(15, 10))
pivoted = pd.pivot_table(data, index=x, columns=y, values=values)
if max_y is not None:
pivoted = pivoted.loc[:, pivoted.columns <= max_y]
ax = sns.heatmap(pivoted, ax=ax, cmap="YlGnBu", *args, **kwargs)
ax.set_title(title, weight="bold", size=20, pad=20)
fig.tight_layout()
return fig
def heatmap(x, y, values, data=None, *args, **kwargs):
"""Simple heatmap function that pivots data and plots a heatmap.
This function is used in `plot_faceted_heatmap_of_vehicle_positions`.
Parameters
----------
x, y, values: str
Columns to use at the x and y axes and as values in the heatmap.
data: pd.DataFrame
The data to plot.
*args, **kwargs: any
Parameters of `sns.heatmap`.
"""
pivoted = pd.pivot_table(data, index=y, columns=x, values=values)
ax = sns.heatmap(pivoted, *args, **kwargs)
return ax
def plot_faceted_heatmap_of_vehicle_positions(data, x="rank", y="station", values="vehicles",
multiples="total vehicles", title="Station occupancy in best states by total number of vehicles",
*args, **kwargs):
"""Plot small multiple heatmaps.
Parameters
----------
data: pd.DataFrame
The data to plot.
x, y, values, multiples: str
Columns to use at the x and y axes and as values in the heatmap and to group on for the
small plots.
*args, **kwargs: any
Parameters passed to `sns.heatmap`.
Returns
-------
fig: matplotlib.pyplot.figure
The resulting figure.
"""
set_sns_params()
g = sns.FacetGrid(data, col=multiples, height=4, aspect=1, col_wrap=4, sharex=False)
g.map_dataframe(heatmap, x=x, y=y, values=values, data=data, yticklabels=True, xticklabels=True, *args, **kwargs)
for ax in g.axes:
ax.set_xlabel("state rank (1=best)")
ax.set_ylabel(y)
g.fig.tight_layout()
g.fig.suptitle(title, weight="bold", size=20)
g.fig.subplots_adjust(top=0.95)
return g.fig
def plot_faceted_barplot_of_vehicle_positions(data, y="station", x="vehicles",
multiples="total vehicles", *args, **kwargs):
"""Plot small multiple heatmaps.
Parameters
----------
data: pd.DataFrame
The data to plot.
x, y, values, multiples: str
Columns to use at the x and y axes and as values in the heatmap and to group on for the
small plots.
*args, **kwargs: any
Parameters passed to `sns.heatmap`.
Returns
-------
fig: matplotlib.pyplot.figure
The resulting figure.
"""
sns.set(style='white')
# aggregate if necessary
data = data.groupby([y, multiples])[x].sum().reset_index()
# plot
g = sns.FacetGrid(data, col=multiples, col_wrap=5, sharex=True)
g.map_dataframe(sns.barplot, x=x, y=y, orient="h", *args, **kwargs)
for ax in g.axes:
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
g.fig.tight_layout()
return g.fig
def load_tables(*paths):
"""Read multiple tables from disk and return them in a list.
Parameters
----------
paths: str
The paths to table files to load."""
return [pickle.load(open(p, "rb")) for p in paths]
def append_arrays_in_dicts(*dikts, keys=["responses", "targets"]):
"""Create a dictionary with arrays appended from multiple dictionaries.
Parameters
----------
dikts: dict
The dictionaries to merged.
keys: list, str, default=['responses', 'targets']
The keys in the dictionaries that point to arrays that should be appended.
"""
return {k: np.concatenate([d[k] for d in dikts]) for k in keys}
def merge_tables(*tables, to_quantiles=True, key="responses", num_quantiles=51, save_path=None):
"""Merge multiple tables into one big one.
Parameters
----------
tables: dict
The tables to merge. Should all have the same set of keys / states.
to_quantiles: bool, default=True
Whether to calculate quantiles over the obtained values rather than keep the raw ones.
key: str, default="responses"
If to_quantiles=True, the key is the key in the inner dictionary that points to the array
over which to compute quantiles. If to_quantiles=False, key is the (list of) keys for which
arrays of different tables should be appended.
num_quantiles: int, default=51
The number of quantiles to compute when to_quantiles=True.
save_path: str, default=None
The path to save the resulting table. If None, does not save.
Returns
-------
merged_table: dict
The merged table.
"""
assert len(tables) > 1, "Must provide more than one table"
assert set(tables[0].keys()) == set(tables[0].keys()), "Keys are not the same for all tables"
merged = tables[0]
for i, state in enumerate(tables[0].keys()):
progress("Merging results for state {} / {}.".format(i + 1, len(merged)), same_line=True, newline_end=(i + 1 == len(merged)))
merged[state] = append_arrays_in_dicts(*[t[state] for t in tables], keys=key if isinstance(key, list) else [key])
if to_quantiles:
progress("Obtaining quantiles for '{}''".format(key))
merged = get_table_quantiles(merged, num_quantiles=num_quantiles, inner_key=key)
if save_path is not None:
pickle.dump(merged, open(save_path, "wb"))
progress("Merged table save at {}".format(save_path))
return merged
def calc_mean_quantiles(*tables):
"""Calculate the means of multiple quantile estimates.
Assumes the quantiles have been estimated from samples
of the same size.
Parameters
----------
*tables: dicts
The tables to merge. Must have states as keys and
numpy arrays of quantiles as values.
Returns
-------
quantile_table: dict
States as keys and numpy array of merged quantile estimates as values.
"""
n = len(tables)
matrices = [np.array(list(table.values())) for table in tables]
mean = np.zeros_like(matrices[0])
for i in range(n):
mean += matrices[i] / n
keys = list(tables[0].keys())
result = {keys[i]: mean[i, :] for i in range(len(keys))}
return result
def merge_tables_in_chunks(*paths):
"""Load tables in chunks, calculate quantiles, and get the mean
of the quantiles estimates of different chunks.
For correct estimation of the quantiles, it is assumed that each
file has the same sample size.
Parameters
----------
*paths: str
The paths to the tables that should be combined.
Returns
-------
mean_table: dict
The quantile estimates for each state.
"""
assert len(paths) % 2 == 0, "Must be an even number of paths"
num_chunks = len(paths) / 2
path_chunks = [[paths[i], paths[i + 1]] for i in range(len(paths)) if i % 2 == 0]
qtables = []
for chunk_paths in path_chunks:
chunk_tables = load_tables(*chunk_paths)
qtables.append(merge_tables(*chunk_tables, to_quantiles=True, save_path=None))
mean_table = calc_mean_quantiles(*qtables)
return mean_table
def get_station_coords(path, station_col="kazerne"):
"""Obtain x, y coordinates for each station.
Parameters
----------
path: str
The path to the station location Excel file.
station_col: str, default='kazerne'
The column in the station data that gives the station names or IDs.
Returns
-------
coord_dict: dict
The coordinates like {'STATION_NAME' -> (x, y)}.
"""
station_locations = pd.read_excel(path, sep=";", decimal=".")
station_locations[["x", "y"]] = station_locations[["lon", "lat"]].apply(
lambda x: lonlat_to_xy(x[0], x[1]), axis=1).apply(pd.Series)
coord_dict = {}
for i, station in enumerate(station_locations[station_col]):
coord_dict[station.upper()] = tuple(station_locations[['x', 'y']].iloc[i])
return coord_dict
def plot_state_on_map(state, geo_df, coords, prev_state=None, station_names=STATION_NAMES,
annotate=True, figsize=None, ax=None, yshift=450,
shift_stations=["HENDRIK", "ANTON"]):
"""Plot a state on the map, showing which stations are occupied or empty.
Parameters
----------
state: tuple
The station occupancy.
geo_df: geopandas.DataFrame
The polygons of the underlying map.
coords: dict
Coordinates of the stations in a dictionary like {STATION -> (x, y)}.
station_names: list-like, default=spyro.value_estimation.STATION_NAMES
The names of the stations corresponding to `state`.
annotate: bool, default=True
Whether to print station names on the map.
figsize: tuple, default=None
The figure size.
ax: matplotlib.pyplot.Axes, default=None
The Axis to plot on. If None, creates new one.
Returns
-------
ax: Axis
The plotted map.
"""
def map_color(old, new, palette):
if old == new:
return palette[0]
elif new == 0:
return palette[1]
else:
return palette[2]
def annotate_station(j, txt, condition=True, size=9, grey=False):
txt_color = 'grey' if grey else 'black'
if condition:
if station_names[j] in shift_stations:
ax.annotate(txt, (x[j], y[j]), xytext=(x[j]+100, y[j]+100 + yshift), size=9, color=txt_color)
else:
ax.annotate(txt, (x[j], y[j]), xytext=(x[j]+100, y[j]+100), size=9, color=txt_color)
if prev_state is None:
prev_state = state
ax = geo_df.plot(figsize=figsize, alpha=0.3, ax=ax)
x = [coords[s][0] for s in station_names]
y = [coords[s][1] for s in station_names]
sizes = [100 if v > 0 else 15 for v in state]
colors = sns.color_palette('tab10', 3)
c = [map_color(prev_state[v], state[v], colors) for v in range(len(state))]
ax.scatter(x, y, c=c, s=sizes)
# annotate stations with names
for i, txt in enumerate(station_names):
if annotate == 'changed':
annotate_station(i, txt, condition=(state[i] != prev_state[i]), grey=False)
elif annotate:
annotate_station(i, txt, grey=state[i] == 0)
ax.set_xticks([])
ax.set_yticks([])
sns.despine(ax=ax, left=True, bottom=True)
return ax
def map_plot_facet_wrapper(states, geo_df=None, coords=None, ax=None, annotate=False, *args, **kwargs):
"""Wrap `plot_state_on_map` with a signature suitable for sns.FacetGrid."""
ax = plt.gca()
_ = plot_state_on_map(states.values[0], geo_df, coords, ax=ax, annotate=annotate)
def plot_best_states_on_map(table, geopath="../../Data/geoData/vakken_dag_ts.geojson",
stationpath="../../Data/kazernepositie en voertuigen.xlsx",
station_names=STATION_NAMES, min_count=0, max_count=16,
inner_key=None, minimum=True, top=0.92):
"""Plot the best configuration of vehicles on a map for each possible vehicle count.
Parameters
----------
table: dict
Table of state-values like {'state' -> [value1, value2, ..., ...]}. Can be quantiles.
geopath: str, default='../../Data/geoData/vakken_dag_ts.geojson'
The path to the underlying map polygons.
stationpath: str, default="../../Data/kazernepositie en voertuigen.xlsx"
Path to the station coordinate data.
station_names: list-like, default=spyro.value_estimation.STATION_NAMES
The names of the stations corresponding to `state`.
min_count, max_count: int, default=0, 16
The min and max number of total vehicles to plot the best state for.
inner_key, minimum: any
Passed to `get_best_state_by_vehicle_count`.
Returns
-------
fig: matplotlib.Figure
The Faceted plot of configurations.
"""
# load data
stationcoords = get_station_coords(stationpath)
geodf = gpd.read_file(geopath)
# get best states and filter
best_by_count = get_best_state_by_vehicle_count(table, inner_key=inner_key, minimum=minimum)
if min_count is not None:
best_by_count = {k: v for k, v in best_by_count.items() if k >= min_count}
if max_count is not None:
best_by_count = {k: v for k, v in best_by_count.items() if k <= max_count}
df = pd.DataFrame.from_dict(best_by_count, orient='index')
df.index.name = "vehicles"
df = df.reset_index(drop=False)
# plot
sns.set(style="white")
g = sns.FacetGrid(data=df, col="vehicles", col_wrap=4)
g.map(map_plot_facet_wrapper, "state", geo_df=geodf, coords=stationcoords)
g.fig.suptitle("Best vehicle configuration per number of vehicles", weight="bold", size=18)
for ax in g.axes:
ax.set_xlabel('')
g.fig.tight_layout()
g.fig.subplots_adjust(top=top, wspace=0.01, hspace=0.01)
return g.fig
|
"""setup for embeddable objects plugin"""
from setuptools import setup
setup(
name = 'BloodhoundEmbeddingPlugin',
version = '0.1',
description = "Embeddable objects plugin support for Apache(TM) Bloodhound.",
author = "Apache Bloodhound",
license = "Apache License v2",
url = "http://bloodhound.apache.org/",
packages = ['bhembedding',],
package_data = {'bhembedding' : ['templates/*.html', 'htdocs/*.css',]},
entry_points = {'trac.plugins': [
'bhembedding.api = bhembedding.api',
],},
test_suite='bhembedding.tests.test_suite',
)
|
#!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
import re
import sys
def pulldata(fn):
soup = BeautifulSoup(open(fn).read())
for l in soup.findAll('a', href= re.compile('xml$')):
print('%s "facebook"' % l['href'])
if __name__ == '__main__':
pulldata(sys.argv[1])
|
from __future__ import print_function
import re
class Token(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return "Token(name={}, value={})".format(self.name, repr(self.value))
class Tokenizer(object):
def __init__(self, tokens):
self.regexp = '|'.join('(?P<%s>%s)' % token for token in tokens)
def tokenize(self, stream):
get_token = re.compile(self.regexp).match
pos = 0
m = get_token(stream)
while m is not None:
name = m.lastgroup
value = m.group(name)
yield Token(name, value)
pos = m.end()
m = get_token(stream, pos)
if pos != len(stream):
raise RuntimeError("Unexpected character '{}' at pos {}".format(stream[pos], pos))
if __name__ == '__main__':
import sys
tokens = [
('NUMBER', '\d+(\.\d*)?'),
('SET', 'set'),
('WRITE', 'write'),
('PLUS', '\+'),
('COMMA', '\,'),
('NEWLINE', '\n'),
('SKIP', '[ \t]'),
]
tokenizer = Tokenizer(tokens)
for token in tokenizer.tokenize(sys.argv[1]):
print(token)
|
from django.shortcuts import render
from salvados.models import Salvado
from django.views import generic
from salvados.forms import SalvadoForm
from django.urls import reverse_lazy
class ListarSalvados(generic.ListView):
model=Salvado
template_name="salvados/listar_salvados.html"
context_object_name="obj"
class InsertarSalvado(generic.CreateView):
model=Salvado
template_name="salvados/insertar_salvado.html"
context_object_name="obj"
form_class=SalvadoForm
success_url=reverse_lazy("salvados:salvados_list")
class EditarSalvado(generic.UpdateView):
model=Salvado
template_name="salvados/insertar_salvado.html"
context_object_name="obj"
form_class=SalvadoForm
success_url=reverse_lazy("salvados:salvados_list")
class BorrarSalvado(generic.DeleteView):
model=Salvado
template_name="salvados/borrar_salvado.html"
context_object_name="obj"
form_class=SalvadoForm
success_url=reverse_lazy("salvados:salvados_list") |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Automobile data set has wenty-six atributes/columns
headers = ["symboling",
"normalized-losses",
"make","fuel-type",
"aspiration",
"num-of-doors",
"body-style",
"drive-wheels",
"engine-location",
"wheel-base",
"length","width",
"height",
"curb-weight",
"engine-type",
"num-of-cylinders",
"engine-size",
"fuel-system",
"bore","stroke",
"compression-ratio",
"horsepower",
"peak-rpm",
"city-mpg",
"highway-mpg",
"price"]
# Just read the file in the same folder
df = pd.read_csv("data.csv", names = headers)
# There are many missing values
print(df.sample(8), "\n")
# They will replaced by "NaN"
df.replace("?", np.nan, inplace = True)
# Identify the number of missing values
missing_data = df.isnull()
print(missing_data.sample(8), "\n")
for column in missing_data.columns.values.tolist():
print(column)
print(missing_data[column].value_counts(),"\n")
# Replace by mean
forChanging = [ "normalized-losses",
"bore",
"stroke",
"horsepower",
"peak-rpm"]
for atribute in forChanging:
avg = df[atribute].astype("float").mean(axis = 0)
df[atribute].replace(np.nan, avg, inplace = True)
print("Average {}: {}".format(atribute, avg))
# Replace by most common type
mode = df["num-of-doors"].value_counts().idxmax()
df["num-of-doors"].replace(np.nan, mode, inplace = True)
# Drop every row without price and actualize the index
df.dropna(subset = ["price"], axis = 0, inplace = True)
df.reset_index(drop = True, inplace = True)
# Now, the data has no missing values
print("\n", df.sample(8), "\n")
# We make sure the data has the correct data format
print(df.dtypes, "\n")
# Actualize words to numbers
forChanging = [ "normalized-losses",
"bore",
"stroke",
"price",
"peak-rpm"]
for atribute in forChanging:
df[atribute]= df[atribute].astype("float")
# Data Standarization (International System of Units)
df["city-L/100km"] = 235 / df ["city-mpg"]
df["highway-L/100km"] = 235 / df["highway-mpg"]
df.drop(columns = ["city-mpg","highway-mpg"])
print(df.sample(8), "\n")
# Data Normalization (between the maximum)
forChanging = [ "length",
"width",
"height"]
for atribute in forChanging:
df[atribute] = df[atribute] / df[atribute].max()
print(df[forChanging].sample(8), "\n")
# Binnig (Low-Mid-High meter)
df["horsepower"] = df["horsepower"].astype(int, copy = True)
plt.hist(df["horsepower"], bins = 3)
plt.xlabel("Horsepower")
plt.ylabel("Count")
plt.title("Horsepower bins")
plt.show()
# Data actualization
horsepower_bins = np.linspace(df["horsepower"].min(), df["horsepower"].max(), 4)
group_names = {"Low", "Medium", "Hight"}
df["horsepower-binned"] = pd.cut(df["horsepower"], horsepower_bins, labels = group_names, include_lowest = True)
print(df[["horsepower", "horsepower-binned"]].sample(8), "\n")
print(df["horsepower-binned"].value_counts(), "\n")
# Dummy variables (1-0 stages for regresion models)
dummy_variable_1 = pd.get_dummies(df["fuel-type"])
dummy_variable_1.rename(columns = { 'gas':'fuel-type-gas',
'diesel':'fuel-type-diesel'},
inplace = True)
df = pd.concat([df, dummy_variable_1], axis = 1)
df.drop("fuel-type", axis = 1, inplace = True)
dummy_variable_2 = pd.get_dummies(df["aspiration"])
dummy_variable_2.rename(columns = { 'std':'aspiration-std',
'turbo': 'aspiration-turbo'},
inplace = True)
df = pd.concat([df, dummy_variable_2], axis = 1)
df.drop("aspiration", axis = 1, inplace = True)
print(df.sample(8), "\n")
#df.to_csv('clean_data.csv')
|
class Library:
def __init__(self):
self.x = 'sup'
def method(self, text):
print(self.x + ' ' + text)
|
import socketio
from multiprocessing import Process
import signal
import sys
import logging
## LOGGING INFO
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(level=logging.INFO)
## Websocket client
class WebsocketClient():
def __init__(self,ws_address,on_msg_callback = ""):
# Variables
self.ws_address = ws_address
self.sio = ""
self.ws_process = ""
self.on_msg_callback = on_msg_callback
# Signal handler
signal.signal(signal.SIGINT, self.keyboard_interupt)
def keyboard_interupt(self,signal, frame):
self.ws_process.join()
def on_connect(self):
LOGGER.info("ws connected.")
def on_message(self,msg):
LOGGER.info("ws got msg: %s",msg)
if self.on_msg_callback is not None:
self.on_msg_callback(msg)
def on_disconnect(self):
LOGGER.info("ws diconnected.")
def get_ws_sio(self):
return self.sio
def init_websocketio(self):
sio = socketio.Client()
# self.sio.reconnection = False
sio.on("connect",self.on_connect)
sio.on("disconnect",self.on_disconnect)
sio.on('my_message', self.on_message)
sio.connect(self.ws_address)
return sio
def start_it(self):
# Thread webscoket io
self.sio = self.init_websocketio()
self.ws_process = Process(target=self.sio.wait)
self.ws_process.start()
if __name__ == '__main__':
ws_address = 'http://localhost:6000'
app = WebsocketClient(ws_address)
app.start_it()
|
# Generated by Django 3.1.7 on 2021-07-19 23:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('subtitle', models.CharField(max_length=50)),
('release_date', models.DateField()),
('imagen', models.ImageField(null=True, upload_to='portadas')),
('Description', models.TextField()),
('authors', models.ManyToManyField(to='biblioapp.Author')),
('categories', models.ManyToManyField(to='biblioapp.Category')),
],
),
]
|
from flask.ext.wtf import Form
from wtforms import TextField, SelectField, validators
class AddSubscriptionForm(Form):
handphone = TextField('handphone',
validators=[validators.Regexp(r'^\d+$'), validators.Length(min=8, max=8)])
email = TextField('email', validators=[validators.Regexp(r'^[\w.@+-]+$'), validators.Length(min=4, max=50)])
# area = SelectField('Area', choices=[
# ('', 'Select Region'), ('east', 'East'), ('west', 'West'), ('south', "South"), ('north', "North"),
# ('central', "Central"), ('all', "All")])
lat = TextField('lat')
lng = TextField('lng') |
#-----------------------------------------------------------------------------------------------------------------------
# Project: resnet-finetune-demo
# Filename: resnet_demo.py
# Date: 16.06.2017
# Author: Adam Brzeski - CTA.ai
#-----------------------------------------------------------------------------------------------------------------------
"""
Simple Resnet-152 demo script, allowing you test the model on images by simply copying them into clipboard.
"""
import os
import sys
import numpy as np
import skimage.io
import skimage.transform
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QWidget
from matplotlib import pyplot as plt
import helper
from resnet import resnet152
WEIGHTS = os.path.expanduser("ml/models/keras/resnet152/resnet152_weights_tf.h5")
CLSID_TO_HUMAN = os.path.expanduser('ml/models/keras/resnet152/imagenet1000_clsid_to_human.txt')
model = resnet152.resnet152_model(WEIGHTS)
with open(CLSID_TO_HUMAN, 'r') as f:
id2label = eval(f.read())
@pyqtSlot()
def clipboard_changed():
clipboard = QApplication.clipboard()
try:
if clipboard.mimeData().hasImage():
image = clipboard.pixmap()
image = helper.qimage_to_array(image)
clipboard.clear()
if image.shape:
print("----------------------------------------------------------------------------------------")
print("Processing: image from clipboard")
process(image)
skimage.io.imshow(image[:,:,::-1])
plt.show()
print("\nWaiting for an image...")
except Exception as e:
print("ERROR:", e)
def process(im):
# Convert to RGB (to comply with helper.preprocess())
im = im[:, :, ::-1]
# Preprocess
im = helper.preprocess(im)
# Predict
prediction = model.predict(im)
# Print results
prediction = prediction.flatten()
top_idx = np.argsort(prediction)[::-1][:5]
for i, idx in enumerate(top_idx):
print("{}. {:.2f} {}".format(i+1, prediction[idx], id2label[idx]))
app = QApplication(sys.argv)
clipboard = app.clipboard()
clipboard.dataChanged.connect(clipboard_changed)
Form = QWidget()
Form.show()
print("Model ready. You can now copy your test image to clipboard.")
print("Waiting for an image...")
sys.exit(app.exec_())
|
import cv2
print(cv2.__version__)
vidcap = cv2.VideoCapture('passing_sample.mkv')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("frames/frame%04d.png" % count, image)
success,image = vidcap.read()
count += 1
import imageio
import os
image_folder="frames/"
image_list = sorted([os.path.join(image_folder, image_file) for image_file in os.listdir(image_folder)])
with imageio.get_writer('passing_sample.gif', mode='I') as writer:
for filename in image_list:
image = imageio.imread(filename)
writer.append_data(image)
|
import logging
from datetime import datetime
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
from common.gen import LinearCongruentialGenerator, UniformGenerator, NormalGenerator
from common.log import init_logging
from l6_filters.filter import ExponentiallyCorrelatedFilter
def main():
init_logging(file='logs/l6-output-%s.log' % datetime.now(), debug=True)
# Конфигурации
initial_value = 9645730
lcg = LinearCongruentialGenerator(initial_value)
initial_gen = NormalGenerator(lcg, mu=1, sigma=2)
max_step = 10000
plot_steps = 100
correlation_steps = 50
experiments_number = 100
show_white_noise_distributions = False
show_corellated_noise_distributions = False
hypothesis_a = 0.1
hypothesis_k = lambda count: 1.73 * count ** (1 / 3)
# Исходные данные
sigma = np.sqrt(4)
delta = 3.0
t = 50.0
# 1. Генераторы дискретного белого шума
uniform_interval = np.sqrt(12 * (sigma ** 2)) / 2
uniform_gen = UniformGenerator(lcg, min=-uniform_interval, max=uniform_interval)
normal_gen = NormalGenerator(lcg, mu=0, sigma=sigma)
if show_white_noise_distributions:
uniform_noise = [uniform_gen() for _ in range(0, 10000)]
normal_noise = [normal_gen() for _ in range(0, 10000)]
uniform_noise_histogram = np.histogram(uniform_noise, bins=30, range=(-3, 3), density=True)
normal_noise_histogram = np.histogram(normal_noise, bins=30, range=(-12, 12), density=True)
_, (left_ax, right_ax) = plt.subplots(1, 2)
left_ax.bar(range(plot_steps), uniform_noise[:plot_steps])
right_ax.plot(uniform_noise_histogram[1][:-1], uniform_noise_histogram[0])
right_ax.set_ylim(0, 0.4)
plt.show()
_, (left_ax, right_ax) = plt.subplots(1, 2)
left_ax.bar(range(plot_steps), normal_noise[:plot_steps])
right_ax.plot(normal_noise_histogram[1][:-1], normal_noise_histogram[0])
right_ax.set_ylim(0, 0.4)
plt.show()
# 2. Формирующий фильтр
if show_corellated_noise_distributions:
uniform_filter = ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen,
v_gen=uniform_gen)
uniform_filter.model(max_step=max_step)
uniform_filter_histogram = np.histogram(uniform_filter.y, bins=30, range=(-5, 5),
density=True)
normal_filter = ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen,
v_gen=normal_gen)
normal_filter.model(max_step=max_step)
normal_filter_histogram = np.histogram(normal_filter.y, bins=30, range=(-5, 5),
density=True)
_, (left_ax, right_ax) = plt.subplots(1, 2)
left_ax.bar(range(plot_steps), uniform_filter.y[:plot_steps])
right_ax.plot(uniform_filter_histogram[1][:-1], uniform_filter_histogram[0])
plt.show()
_, (left_ax, right_ax) = plt.subplots(1, 2)
left_ax.bar(range(plot_steps), normal_filter.y[:plot_steps])
right_ax.plot(normal_filter_histogram[1][:-1], normal_filter_histogram[0])
plt.show()
uniform_filters = [ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen,
v_gen=uniform_gen)
for _ in range(experiments_number)]
normal_filters = [ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen,
v_gen=normal_gen)
for _ in range(experiments_number)]
def auto_correlation(x, length):
return np.array([1] + [np.corrcoef(x[:-i], x[i:])[0, 1] for i in range(1, length)])
logging.info('Performing %s experiments for uniform filter' % experiments_number)
for index, filter in enumerate(uniform_filters):
if index and index % 10 == 0:
logging.info('Experiments has been performed %s/%s' % (index, experiments_number))
filter.model(max_step=max_step)
logging.info('All experiments has been performed for uniform filter')
logging.info('Collecting autocorrelation, M and D for uniform filter')
uniform_correlation = reduce(np.add, [auto_correlation(filter.y, correlation_steps)
for filter in uniform_filters]) \
/ experiments_number
uniform_outputs = reduce(np.append, [filter.y for filter in uniform_filters])
uniform_M = np.mean(uniform_outputs)
uniform_M2 = np.mean(uniform_outputs ** 2)
uniform_D = uniform_M2 - uniform_M ** 2
logging.info('M = %s' % uniform_M)
logging.info('D = %s' % uniform_D)
logging.info('Performing %s experiments for normal filter' % experiments_number)
for index, filter in enumerate(normal_filters):
if index and index % 10 == 0:
logging.info('Experiments has been performed %s/%s' % (index, experiments_number))
filter.model(max_step=max_step)
logging.info('All experiments has been performed for normal filter')
logging.info('Collecting autocorrelation, M and D for normal filter')
normal_correlation = reduce(np.add, [auto_correlation(filter.y, correlation_steps)
for filter in normal_filters]) \
/ experiments_number
normal_outputs = reduce(np.append, [filter.y for filter in normal_filters])
normal_M = np.mean(normal_outputs)
normal_M2 = np.mean(normal_outputs ** 2)
normal_D = normal_M2 - normal_M ** 2
logging.info('M = %s' % normal_M)
logging.info('D = %s' % normal_D)
logging.info('Plotting autocorrelation functions')
plt.plot(range(correlation_steps), uniform_correlation)
plt.plot(range(correlation_steps), normal_correlation)
plt.show()
def spectrum(w, correlation):
return 2 * np.sum([np.cos(w * k) * correlation[k] for k in range(len(correlation))])
spectrum_dots = np.linspace(0, 1, correlation_steps)
uniform_spectrum = [spectrum(w, uniform_correlation) for w in spectrum_dots]
normal_spectrum = [spectrum(w, normal_correlation) for w in spectrum_dots]
logging.info('Plotting spectrum functions')
plt.plot(spectrum_dots, uniform_spectrum)
plt.plot(spectrum_dots, normal_spectrum)
plt.show()
logging.info('Testing distributions hypothesis')
def normal_f(x, mu, sigma):
return np.exp(-((x - mu) ** 2) / (2 * sigma ** 2)) \
/ np.sqrt(2 * np.pi * sigma ** 2)
def interval_Z(interval_prob, interval_count, total_count):
return (interval_count - total_count * interval_prob) ** 2 / (total_count * interval_prob)
hypothesis_intervals = np.linspace(-6, 6, plot_steps)
uniform_Z = 0
normal_Z = 0
for interval_start, interval_end in zip(hypothesis_intervals[:-1], hypothesis_intervals[1:]):
interval_hypothesis_prob = integrate.quad(func=lambda x: normal_f(x, mu=0, sigma=sigma),
a=interval_start, b=interval_end)[0]
interval_uniform_count = np.logical_and(uniform_outputs >= interval_start,
uniform_outputs < interval_end).sum()
interval_normal_count = np.logical_and(normal_outputs >= interval_start,
normal_outputs < interval_end).sum()
uniform_Z += interval_Z(interval_hypothesis_prob, interval_uniform_count,
uniform_outputs.size)
normal_Z += interval_Z(interval_hypothesis_prob, interval_normal_count,
normal_outputs.size)
from scipy.stats.distributions import chi2
def X2(a, k):
return chi2.ppf(q=1 - a, df=k - 1)
uniform_X2 = X2(hypothesis_a, hypothesis_k(uniform_outputs.size))
normal_X2 = X2(hypothesis_a, hypothesis_k(normal_outputs.size))
logging.info('Uniform noise Z = %s' % uniform_Z)
logging.info('Uniform noise X2 = %s' % uniform_X2)
logging.info('Normal noise Z = %s' % normal_Z)
logging.info('Normal noise X2 = %s' % normal_X2)
if __name__ == '__main__':
main()
|
import numpy as np
#import bpy
#def draw_cube(verts):
# edges = [(0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (4, 6), (5, 7), (6, 7), (0, 4), (1, 5), (2, 6), (3, 7)]
# mesh = bpy.data.meshes.new('Pyramid_Mesh')
# mesh.from_pydata(verts, edges, [])
#mesh.update()
#pyramid = bpy.data.objects.new('Pyramid', mesh)
#scene = bpy.context.scene
#scene.objects.link(pyramid)
#def draw_line(point1,point2):
# line=[(0,1)]
# verts=[point1,point2]
# mesh = bpy.data.meshes.new('Pyramid_Mesh')
#mesh.from_pydata(verts, line, [])
#mesh.update()
# pyramid = bpy.data.objects.new('Pyramid', mesh)
# scene = bpy.context.scene
# scene.objects.link(pyramid)
def bresenham(point1,point2):
x1=point1[0]
y1=point1[1]
z1=point1[2]
x2=point2[0]
y2=-point2[1]
z2=point2[2]
dx=abs(x2-x1);
dy=abs(y2-y1);
dz=abs(z2-z1);
sx=np.sign(x2-x1)
sy=np.sign(y2-y1)
sz=np.sign(z2-z1)
print("起点坐标:")
print(x1,y1,z1)
point_list=[(x1,y1,z1)] #start point
point1=(x1,y1,z1)
point2=(x2,y2,z2)# end point
print("终点坐标:")
print(point2)
#draw_line(point1,point2)
if dx>dy and dx>dz:
x=x1
y=y1
z=z1
d1=2*dy-dx
d2=2*dz-dx
for i in range(dx):
x=x+sx
if d1<0:
y=y
d1=d1+2*dy;
if d2<0:
z=z
d2=d2+2*dz;
else:
z=z+sz
d2=d2+2*(dz-dx)
else:
y=y+sy
d1=d1+2*(dy-dx)
if d2<0:
z=z
d2=d2+2*dz;
else:
z=z+sz
d2=d2+2*(dz-dx)
point_list.append((x,y,z))
elif dy>dx and dy>dz:
#print(dy)
x = x1
y = y1
z = z1
d1 = 2 * dx - dy
d2 = 2 * dz - dy
#print(x, y, z)
for i in range(dy):
y = y + sy
if d1 < 0:
x = x
d1 = d1 + 2 * dx;
if d2 < 0:
z = z
d2 = d2 + 2 * dz;
else:
z = z + sz
d2 = d2 + 2 * (dz - dy)
else:
x = x + sx
d1 = d1 + 2 * (dx - dy)
if d2 < 0:
z = z
d2 = d2 + 2 * dz;
else:
z = z + sz
d2 = d2 + 2 * (dz - dy)
point_list.append((x, y, z))
elif dz>dx and dz>dy:
#print(dz)
x = x1
y = y1
z = z1
d1 = 2 * dx - dz
d2 = 2 * dy - dz
#print(x, y, z)
for i in range(dz):
z = z + sz
if d1 < 0:
x = x
d1 = d1 + 2 * dx;
if d2 < 0:
y =y
d2 = d2 + 2 * dy;
else:
y = y + sy
d2 = d2 + 2 * (dy - dz)
else:
x = x + sx
d1 = d1 + 2 * (dx - dz)
if d2 < 0:
y =y
d2 = d2 + 2 * dy;
else:
y = y + sy
d2 = d2 + 2 * (dy - dz)
point_list.append((x, y, z))
print("point_list:")
print(point_list)
#mesh = bpy.data.meshes.new('Pyramid_Mesh')
#mesh.from_pydata(point_list, [], [])
#mesh.update()
#pyramid = bpy.data.objects.new('Pyramid', mesh)
#scene = bpy.context.scene
#scene.objects.link(pyramid)
# ##cube
cube_list=[]
for i in range(len(point_list)-1):
n=np.array(point_list[i])
m=np.array(point_list[i+1])
a=m-n
if abs(a[0]*a[1]*a[2]) ==1: # x,y,z different
xmin=min(n[0],m[0])
xmax=max(n[0],m[0])
ymin=min(n[1],m[1])
ymax=max(n[1],m[1])
zmin=min(n[2],m[2])
zmax=max(n[2],m[2])
cube=[(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin,ymin,zmin,xmax,ymax,zmax))
# draw_cube(cube)
#draw_cube(cube)
#print(cube)
elif a[0]==0 and abs(a[1])==1 and abs(a[2])==1: #same x value
xmin = min(n[0],n[0]+sx)
xmax = max(n[0], n[0] + sx)
ymin = min(n[1], m[1])
ymax = max(n[1], m[1])
zmin = min(n[2], m[2])
zmax = max(n[2], m[2])
#print("value_x=0")
if i ==0:
cubex1 = [(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
# draw_cube(cubex1)
else:
cubex1=[(n[0],ymin,zmin),(n[0]+1,ymin,zmin),(n[0],ymax,zmin),(n[0]+1,ymax,zmin),(n[0],ymin,zmax),(n[0]+1,ymin,zmax),(n[0],ymax,zmax),(n[0]+1,ymax,zmax)]
cubex2=[(n[0]-1,ymin,zmin),(n[0],ymin,zmin),(n[0]-1,ymax,zmin),(n[0],ymax,zmin),(n[0]-1,ymin,zmax),(n[0],ymin,zmax),(n[0]-1,ymax,zmax),(n[0],ymax,zmax)]
cube_list.append((cubex1[0][0],cubex1[0][1],cubex1[0][2] ,cubex1[7][0],cubex1[7][1],cubex1[7][2]))
cube_list.append((cubex2[0][0],cubex2[0][1],cubex2[0][2] ,cubex2[7][0],cubex1[7][1],cubex2[7][2]))
#draw_cube(cubex1)
# draw_cube(cubex2)
#print(cubex1)
#print(cubex2)
elif a[1]==0 and abs(a[0])==1 and abs(a[2])==1: # same y value
xmin=min(n[0],m[0])
xmax=max(n[0],m[0])
ymin=min(n[1],n[1]+sy)
ymax = max(n[1], n[1] + sy)
zmin=min(n[2],m[2])
zmax=max(n[2],m[2])
#print("value_y=0")
if i ==0:
cubey1 = [(xmin, ymin, zmin), (xmax, ymin, zmin), (xmin, ymax, zmin), (xmax,ymax, zmin),
(xmin, ymin, zmax), (xmax, ymin, zmax), (xmin, ymax, zmax), (xmax, ymax, zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
# draw_cube(cubey1)
else:
cubey1=[(xmin,n[1]-1,zmin),(xmax,n[1]-1,zmin),(xmin,n[1],zmin),(xmax,n[1],zmin),(xmin,n[1]-1,zmax),(xmax,n[1]-1,zmax),(xmin,n[1],zmax),(xmax,n[1],zmax)]
cubey2=[(xmin,n[1],zmin),(xmax,n[1],zmin),(xmin,n[1]+1,zmin),(xmax,n[1]+1,zmin),(xmin,n[1],zmax),(xmax,n[1],zmax),(xmin,n[1]+1,zmax),(xmax,n[1]+1,zmax)]
cube_list.append((cubey1[0][0],cubey1[0][1],cubey1[0][2] ,cubey1[7][0],cubey1[7][1],cubey1[7][2]))
cube_list.append((cubey2[0][0], cubey2[0][1], cubey2[0][2], cubey2[7][0], cubey2[7][1], cubey2[7][2]))
# draw_cube(cubey1)
# draw_cube(cubey2)
#print(cubey1)
#print(cubey2)
elif a[2]==0 and abs(a[1])==1 and abs(a[0])==1: #same z value
xmin=min(n[0],m[0])
xmax=max(n[0],m[0])
ymin=min(n[1],m[1])
ymax=max(n[1],m[1])
zmin=min(n[2],n[2]+sz)
zmax = max(n[2], n[2] + sz)
#print("value_z=0")
if i == 0:
cubez1 = [(xmin, ymin, zmin), (xmax, ymin, zmin), (xmin, ymax, zmin), (xmax, ymax, zmin),
(xmin, ymin, zmax), (xmax, ymin, zmax), (xmin, ymax, zmax), (xmax, ymax, zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
# draw_cube(cubez1)
else:
cubez1=[(xmin,ymin,n[2]-1),(xmax,ymin,n[2]-1),(xmin,ymax,n[2]-1),(xmax,ymax,n[2]-1),(xmin,ymin,n[2]),(xmax,ymin,n[2]),(xmin,ymax,n[2]),(xmax,ymax,n[2])]
cubez2=[(xmin,ymin,n[2]),(xmax,ymin,n[2]),(xmin,ymax,n[2]),(xmax,ymax,n[2]),(xmin,ymin,n[2]+1),(xmax,ymin,n[2]+1),(xmin,ymax,n[2]+1),(xmax,ymax,n[2]+1)]
cube_list.append((cubez1[0][0],cubez1[0][1],cubez1[0][2] ,cubez1[7][0],cubez1[7][1],cubez1[7][2]))
cube_list.append((cubez2[0][0], cubez2[0][1], cubez2[0][2], cubez2[7][0], cubez2[7][1], cubez2[7][2]))
#draw_cube(cubez1)
# draw_cube(cubez2)
elif a[0]==0 and a[1]==0 and a[2]!=0: # x1=x2.y1=y2
if i ==0:
z=np.array(point_list[i+2])# next next point
xmin=min(abs(n[0]),abs(z[0]))*sx
xmax=xmin+sx
ymin=min(abs(n[1]),abs(z[1]))*sy
ymax=ymin+sy
zmin=min(n[2],m[2])
zmax=max(n[2],m[2])
cube2=[(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),
(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
# draw_cube(cube2)
else:
z=np.array(point_list[i-1])
xmin=min(abs(n[0]),abs(z[0]))*sx
xmax=xmin+sx
ymin=min(abs(n[1]),abs(z[1]))*sy
ymax=ymin+sy
zmin=min(n[2],m[2])
zmax=max(n[2],m[2])
cube2=[(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),
(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
# draw_cube(cube2)
elif a[1]==0 and a[2]==0 and a[0]!=0: #y1=y2,z1=z2
if i ==0:
z=np.array(point_list[i+2])# next next point
print("z=")
print(z)
xmin = min(n[0], m[0])
xmax = max(n[0],m[0])
ymin = min(abs(n[1]), abs(z[1])) * sy
ymax = ymin + sy
zmin = min(abs(n[2]), abs(z[2])) * sz
zmax = zmin + sz
cube2=[(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),
(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
print(cube2)
#draw_cube(cube2)
else:
z = np.array(point_list[i - 1])
xmin = min(n[0], m[0])
xmax = max(n[0],m[0])
ymin = min(abs(n[1]), abs(z[1])) * sy
ymax = ymin + sy
zmin = min(abs(n[2]), abs(z[2])) * sz
zmax = zmin + sz
cube2=[(xmin,ymin,zmin),(xmax,ymin,zmin),(xmin,ymax,zmin),(xmax,ymax,zmin),
(xmin,ymin,zmax),(xmax,ymin,zmax),(xmin,ymax,zmax),(xmax,ymax,zmax)]
cube_list.append((xmin, ymin, zmin, xmax, ymax, zmax))
print(cube2)
#draw_cube(cube2)
print("cube_list:")
print(cube_list)
return cube_list
|
import sys
import pyspark
conf = pyspark.SparkConf()
sc = pyspark.SparkContext(conf=conf)
sqlContext = pyspark.SQLContext(sc)
review_file_path = sys.argv[1]
metadata_file_path = sys.argv[2]
out_file_path = sys.argv[3]
# Step 1 ======================================================================
# Find the number of unique reviewer IDs for each product from the review file.
# (Use pair RDD to accomplish this step, the key is the product ID/asin)
# One pair: e.g. ("5555991584", "A2EFCYXHNK06IS")
# =============================================================================
review_rdd = sqlContext.read.json(review_file_path).rdd
asin_id_rdd = review_rdd.map(lambda x: (x['asin'], x['reviewerID'])).distinct()
grouped_asin_id_rdd = asin_id_rdd.groupByKey()
asin_unique_id_rdd = grouped_asin_id_rdd.map(lambda pair: (pair[0], len(pair[1])))
# Step 2 ======================================================================
# Create an RDD, based on the metadata, consisting of key/value-array pairs,
# key is the product ID/asin and value should contain the price of the product.
# =============================================================================
metadata_rdd = sqlContext.read.json(metadata_file_path).rdd
asin_price_rdd = metadata_rdd.map(lambda x: (x['asin'], x['price'])) # Assume all asin already unique
# Step 3 ======================================================================
# Join the pair RDD in Step 2 with the set of product-ID and
# unique reviewer ID count pairs calculated in Step 1.
# Result: e.g. ("5555991584", (1000, 9.49))
# =============================================================================
asin_unique_id_price_rdd = asin_unique_id_rdd.join(asin_price_rdd)
# Step 4 ======================================================================
# Display the product ID, unique reviewer ID count, and the product price
# for the top 10 products based on the unique reviewer ID count.
# =============================================================================
sorted_asin_unique_id_price_rdd = asin_unique_id_price_rdd.sortBy(lambda x: x[1][0], False)
with open(out_file_path, 'w+') as f:
for (asin, (count, price)) in sorted_asin_unique_id_price_rdd.take(10):
f.write(f"{asin} {count} {price}\n")
sc.stop() |
from django.contrib.auth.models import User
from order.models import Order
from django import forms
class OrderForm(forms.ModelForm):
first_name = forms.CharField(label='First name')
last_name = forms.CharField(label='Last name')
class Meta:
model = Order
fields = '__all__'
def __init__(self, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = False
self.fields['last_name'].required = False
self.fields['phone'].required = False
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
# Create your views here.
import string
import random
from captcha.image import ImageCaptcha
from log_regapp.models import User
def login(request):
return render(request,'log_regapp/login.html')
def check_user(request):
name = request.GET.get("username")
pwd = request.GET.get("userpwd")
print("*******=======*************")
result = User.objects.filter(email=name, password=pwd)
if result:
request.session['who']=result[0].nickname
request.session['status'] = '1'
return HttpResponse('0')
return HttpResponse("1")
def checkcap(request):
captcha = request.GET.get("captcha")
# print(captcha)#能拿到用户输入的验证码
cod0 = request.session.get("code")
print("cod0==", cod0)
if captcha.lower() == cod0.lower():
return HttpResponse("1")
return HttpResponse("0")
def regist(requet):
return render(requet,'log_regapp/regist.html')
def registlogic(request):
tel=request.POST.get('phone')
request.session['tel']=tel
nickname=request.POST.get('nickname')
password=request.POST.get('txt_password')
User.objects.create(email=tel,nickname=nickname,password=password,status=1)
request.session['who']=nickname
request.session['status']='1'
return redirect('log_regapp:regist_ok')
def regist_ok(request):
t=request.session.get('tel')
n=request.session.get('who')
del request.session['tel']
print(t)
return render(request,'log_regapp/regist_ok.html',{'T':t,'N':n})
# ajax验证注册号码是否已经存在
def check(request):
name = request.GET.get("username")
print(name, "====================") # 能接收到
result = User.objects.filter(email=name) # 在数据库中比对
print(result, "result")
if result:
# return HttpResponse("right_3.jpg")
return HttpResponse("0") # 存在是0
if name:
return HttpResponse("1") # 通过是1
return HttpResponse("0")
# 生成验证码
def getcaptcha(request):
image = ImageCaptcha() # 构造一个Image对象
code = random.sample(string.ascii_letters + string.digits, 5) # 随机产生5个随机数
code = "".join(code)
print(code)
request.session["code"] = code # 存储验证码
data = image.generate(code)
print(data)
return HttpResponse(data, "image/png")
def indent(request):
return render(request,'log_regapp/indent.html')
def order_info(request):
name=request.POST.get('receive_people')
print(name)
address=request.POST.get('position')
post_code=request.POST.get('post_code')
tel=request.POST.get('tel')
cart=request.session.get('cart')
print(name,address,post_code,tel)
return redirect('payapp:page1', cart.total_price)
|
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import thinkplot
from matplotlib import rc
rc('animation', html='jshtml')
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
from thinkstats2 import RandomSeed
RandomSeed(17)
from Cell2D import Cell2D, Cell2DViewer
from scipy.signal import correlate2d
def add_island(a, height=0.1):
"""Adds an island in the middle of the array.
height: height of the island
"""
n, m = a.shape
radius = min(n, m) // 20
i = n//2
j = m//2
a[i-radius:i+radius, j-radius:j+radius] += height
class ReactionDiffusion(Cell2D):
"""Reaction-Diffusion Cellular Automaton"""
kernel = np.array([[.05, .2, .05], [.2, -1, .2], [.05, .2, .05]])
options = dict(mode='same', boundary = 'wrap')
def __init__(self, n, params, noise = 0.1):
"""Initializes the attributes.
n: number of rows
params: tuple of (da, Db, f, k)
"""
self.params = params
self.array = np.ones((n, n), dtype = float)
self.array2 = noise*np.random.random((n, n))
add_island(self.array2)
def step(self):
"""Executes one time step."""
A = self.array
B = self.array2
ra, rb, f, k = self.params
cA = correlate2d(A, self.kernel, **self.options)
cB = correlate2d(B, self.kernel, **self.options)
reaction = A*B**2
self.array += ra * cA - reaction + f*(1-A)
self.array2 += rb *cB + reaction - (f+k)*B
class RDViewer(Cell2DViewer):
"""Generates images and animations"""
cmapu = plt.get_cmap('Reds')
cmapv = plt.get_cmap('Blues')
options = dict(alpha = 0.7, interpolation = 'bicubic')
def __init__(self, viewee):
"""Initializes the attributes.
viewee: the object to be represented
"""
self.viewee = viewee
self.imu = None
self.imv = None
def draw(self, grid = False):
"""Draws the cells."""
au = self.viewee.array.copy()
av = self.viewee.array2.copy()
n, m = av.shape
plt.axis([0, m, 0, n])
plt.xticks([])
plt.yticks([])
self.options['extent'] = [0, m, 0, n]
self.imu = plt.imshow(au, cmap = self.cmapu, **self.options)
self.imv = plt.imshow(av, cmap = self.cmapv, **self.options)
def animate_func(self, i):
"""Draws one frame of the animation"""
if i > 0:
self.step(iters = 100)
self.imu.set_array(self.viewee.array)
self.imv.set_array(self.viewee.array2)
return(self.imu, self.imv)
def make_viewer(f, k, n=100):
"""Makes a ReactionDiffusion viewer with given parameters."""
params = 0.5, 0.25, f, k
rd = ReactionDiffusion(n, params)
viewer = RDViewer(rd)
return viewer
params1 = 0.5, 0.25, 0.035, 0.057 #white spots
params2 = 0.5, 0.25, 0.055, 0.062 #coral
params3 = 0.5, 0.25, 0.039, 0.065 #blue spots
rd = ReactionDiffusion(n=100, params=params3)
viewer = RDViewer(rd)
anim = viewer.animate(frames=20)
plt.show(anim)
# using the make_viewer function
# viewer = make_viewer(0.035, 0.057)
# thinkplot.preplot(cols = 3)
# viewer.step(1000)
# viewer.draw()
# thinkplot.subplot(2)
# viewer.step(2000)
# viewer.draw()
# thinkplot.subplot(3)
# viewer.step(4000)
# viewer.draw()
# thinkplot.tight_layout()
# thinkplot.save('chapter7-2')
# plt.show() |
import os
import pandas as pd
from functools import partial, reduce
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import random
import gc
#=================== Environment variables ===================
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Dense, Lambda, Conv3D
from tensorflow.keras.layers import Activation, BatchNormalization
from tensorflow.keras.layers import Input, concatenate, Add, Flatten, Dropout
from tensorflow.keras.layers import GlobalAveragePooling3D, GlobalMaxPooling3D, MaxPooling3D
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
import horovod.tensorflow.keras as hvd
#======================= Set up Horovod ======================
# comment out this chunk of code if you train with 1 gpu
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
#========================= Build Model =======================
def __default_conv3D(input, filters=8, kernel_size=3, strides=(1,1,1), weight_decay = 1e-4, **kwargs):
'''
Description: set up defaut parameters for Conv3D layers
'''
DefaultConv3D = partial(
keras.layers.Conv3D,
filters = filters,
kernel_size=kernel_size,
strides=strides,
padding="SAME",
use_bias=True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
kernel_initializer="he_normal",
**kwargs
)
return DefaultConv3D()(input)
def __init_conv(input, filters=64, strides=(1,1,1), weight_decay=5e-4):
'''
Description: initial convolutional layers before ResNeXt block
Args: input: input tensor
filters: number of filters
strides: strides, must be a tuple
weight_decay: parameter for l2 regularization
Return: output tensor
'''
x = __default_conv3D(input, filters=filters, strides=strides, weight_decay=weight_decay)
x = BatchNormalization(axis = -1)(x)
x = Activation('relu')(x)
x = MaxPooling3D(pool_size = (2,2,2))(x)
return x
def __init_grouped_conv(input, filters = 128, strides = (1,1,1), weight_decay = 5e-4):
init = __default_conv3D(input, filters = filters - input.shape[-1] * 2, strides=strides, weight_decay=weight_decay)
group_channel = [init]
for i in range(input.shape[-1]):
x = Lambda(lambda z:z[:, :, :, :, i])(input)
x = tf.keras.backend.expand_dims(x, -1)
x = __default_conv3D(x, filters = 2, strides = strides, weight_decay=weight_decay)
group_channel.append(x)
group_merge = concatenate(group_channel, axis = -1)
x = BatchNormalization()(group_merge)
x = Activation('relu')(x)
return x
def __bottleneck_layer(input, filters = 64, kernel_size = 3, strides = (1,1,1), cardinality = 16, weight_decay = 5e-4):
'''
Description: bottleneck layer for a single path(cardinality = 1)
Args: input: input tensor
filters : number of filters for the last layer in a single path, suppose to be total number
of filters // cardinality of ResNeXt block.
strides : strides, must be tuple of 3 elements
'''
x = input
x = __default_conv3D(x, filters = filters // 2 // cardinality, kernel_size = 1, strides = strides, weight_decay=weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = __default_conv3D(x, filters = filters // 2 // cardinality, kernel_size = kernel_size, strides = (1,1,1), weight_decay=weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = __default_conv3D(x, filters = filters, kernel_size = 1, strides = (1,1,1), weight_decay=weight_decay)
return x
def __ResNeXt_block(input, filters = 64, kernel_size = 3, strides = (1,1,1), cardinality = 16, weight_decay = 5e-4):
'''
Description: refer to the ResNeXt architechture. One ResNeXt_block contains several paths (cardinality) of bottleneck layers joint by a skip connection.
'''
if strides[0] == 1:
init = input
elif strides[0] > 1:
init = __default_conv3D(input, filters = filters, kernel_size=kernel_size, strides=strides, weight_decay = weight_decay)
init = BatchNormalization()(init)
x = [init]
for i in range(cardinality):
x_sub = __bottleneck_layer(input, filters = filters, kernel_size=kernel_size, strides=strides, cardinality=cardinality, weight_decay=weight_decay)
x_sub = BatchNormalization()(x_sub)
x.append(x_sub)
x = Add()(x)
x = Activation('relu')(x)
return x
def create_model(input, filters = 64, depth = (2,2,2), cardinality = 16, weight_decay = 5e-4):
'''
Description:
Args: input: input tf tensor
filters: filter numbers of initial convolutional layer and first chunk ResNeXt blocks. Filter number doubles there after
depth: a tuple of number of ResNeXt blocks for each step of feature map resolution.
cardinality: number of bottleneck layer paths
weight_decay: l2 regularization parameter
Return: output: output tf tensor
'''
input_1, input_2, input_3 = input
N = len(depth)
filter_list = []
for i in range(N):
filter_list.append(filters * (2**i))
x = __init_grouped_conv(input_1, filters=filters, strides=(2,2,2), weight_decay=weight_decay)
for dep, filters in zip(depth, filter_list):
for i in range(dep):
strides = (2,2,2) if i == 0 else (1,1,1)
x = __ResNeXt_block(x, filters = filters, strides=strides, cardinality = cardinality, weight_decay = weight_decay)
x = GlobalAveragePooling3D()(x)
x = Flatten()(x)
x = Dropout(0.3)(x)
#x = Dense(128, activation = 'relu', kernel_regularizer = keras.regularizers.l2(weight_decay))(x)
#x = Dropout(0.3)(x)
#x = Dense(64, activation = 'relu', kernel_regularizer = keras.regularizers.l2(weight_decay))(x)
#x = Dropout(0.3)(x)
#x = Dense(32, activation = 'relu', kernel_regularizer = keras.regularizers.l2(weight_decay))(x)
#x = Dropout(0.3)(x)
#x = Dense(5, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay))(x)
x = Dense(64, activation = 'relu', kernel_regularizer = keras.regularizers.l2(weight_decay))(x)
y = Dense(64, activation = 'relu', kernel_regularizer = keras.regularizers.l2(weight_decay))(input_3)
x = keras.layers.concatenate([x,y,input_2], axis = -1)
output_1 = Dense(1, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay), name = 'output_1')(x)
output_2 = Dense(1, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay), name = 'output_2')(x)
output_3 = Dense(1, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay), name = 'output_3')(x)
output_4 = Dense(1, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay), name = 'output_4')(x)
output_5 = Dense(1, activation = 'linear', kernel_regularizer = keras.regularizers.l2(weight_decay), name = 'output_5')(x)
return output_1, output_2, output_3, output_4, output_5
#================= Build Data pipeline =================
def normalize_channel(img):
shape = img.shape
for i in range(shape[0]):
map = img[i,:,:,:]
mean = np.mean(map)
std = np.std(map)
if std == 0.0:
pass
else:
img[i,:,:,:] = (map - mean) / std
img = img.transpose()
return img
def normalize(img):
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / std
img = img.transpose()
return img
def FeatureGenerator(file_list,loading_np, fnc_np, augmentation = False):
def generator():
for file, load, fnc in zip(file_list, loading_np, fnc_np):
gc.collect(2)
#ith h5py.File(file, "r") as f:
#img = f["SM_feature"][()]
img = pickle.load(open(file, "rb"))
img = normalize(img)
#y = (i.reshape((1,)) for i in y)
if augmentation:
aug = random.randint(1,3)
if aug == 3:
#img = tf.convert_to_tensor(img, dtype = tf.float32)
yield img, load, fnc
else:
img = np.flip(img, axis = aug)
yield img, load, fnc
else:
yield img, load, fnc
return generator
def LabelGenerator(y_list):
def generator():
for y in y_list:
y1 = y[0].reshape((1,))
y2 = y[1].reshape((1,))
y3 = y[2].reshape((1,))
y4 = y[3].reshape((1,))
y5 = y[4].reshape((1,))
yield y1,y2,y3,y4,y5
return generator
def DatasetReader(file_list, loading_np, fnc_np, y_list, shuffle_size, batch_size, augmentation = False):
generator1 = FeatureGenerator(file_list, loading_np, fnc_np, augmentation=augmentation)
dataset1 = tf.data.Dataset.from_generator(generator1,
output_types = (tf.float32, )*3,
output_shapes = (tf.TensorShape((53, 63, 52, 53)),
tf.TensorShape((26,)),
tf.TensorShape((1383,))
)
)
generator2 = LabelGenerator(y_list)
dataset2 = tf.data.Dataset.from_generator(generator2,
output_types = (tf.float32, )*5,
output_shapes = (tf.TensorShape((1,)),)*5
)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
dataset = dataset.batch(batch_size).shuffle(shuffle_size).repeat().shard(hvd.size(), hvd.rank())
#return dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset.prefetch(1)
DATA_PATH = "../fMRI_train_pk"
df = pd.read_csv("train_scores.csv")
df = df.dropna()
file_ls = []
y_ls = []
for _, row in df.iterrows():
file_ls.append(os.path.join(DATA_PATH, str(int(row["Id"]))+".pk"))
ys = [item for _, item in row.iteritems()]
y_ls.append(ys[1:])
y_ls = np.array(y_ls, dtype = np.float32)
loading_np = pickle.load(open('loading.pk', 'rb'))
fnc_np = pickle.load(open('fnc.pk', 'rb'))
train_f, test_f, train_load, test_load, train_fnc, test_fnc, train_label, test_label = train_test_split(
file_ls, loading_np, fnc_np, y_ls, test_size = 0.3, random_state = 42
)
val_f, evl_f, val_load, evl_load, val_fnc, evl_fnc, val_label, evl_label = train_test_split(
test_f, test_load, test_fnc, test_label, test_size = 0.5, random_state = 42
)
'''
load_sc = StandardScaler()
fnc_sc = StandardScaler()
train_load = load_sc.fit_transform(train_load)
train_fnc = fnc_sc.fit_transform(train_fnc)
val_load = load_sc.transform(val_load)
val_fnc = fnc_sc.transform(val_fnc)
evl_load = load_sc.transform(evl_load)
evl_fnc = fnc_sc.transform(evl_fnc)
'''
BATCH_SIZE = 8
train_set = DatasetReader(train_f, train_load, train_fnc, train_label, 16, BATCH_SIZE, augmentation = True)
val_set = DatasetReader(val_f, val_load, val_fnc, val_label, 8, BATCH_SIZE // 2, augmentation = False)
evl_set = DatasetReader(evl_f, evl_load, evl_fnc, evl_label, 8, BATCH_SIZE // 2, augmentation = False)
#================== Configure Callbacks ==================
checkpoint_cb = keras.callbacks.ModelCheckpoint("./my_logs/multimodal/ResNeXt_ft128_dep22_w5-4_car16_{epoch}.h5",
monitor = 'val_loss', mode = 'min',
save_best_only=True
)
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print("\nval/train: {:.2f} \n".format(logs["val_loss"] / logs["loss"]))
root_logdir = os.path.join(os.curdir, "./my_logs/multimodal")
def get_run_logdir(comment="_ResNeXt_ft128_dep22_w5-4_car16_NAdam0.0001_drop0.3_flip_continue"):
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S{}".format(comment))
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [
hvd.callbacks.BroadcastGlobalVariablesCallback(0)
]
if hvd.rank() == 0:
callbacks.append(tensorboard_cb)
callbacks.append(checkpoint_cb)
#================== Training ==================
'''
input_1 = Input(shape = (53, 63, 52, 53), dtype = tf.float32, name = 'input_1')
input_2 = Input(shape = (26,), dtype = tf.float32, name = 'input_2')
input_3 = Input(shape = (1383), dtype = tf.float32, name = 'input_3')
input = (input_1, input_2, input_3)
output = create_model(input, filters = 128,depth=(2,2), cardinality=16, weight_decay = 5e-4)
model = Model(input, output)
optimizer = keras.optimizers.Nadam(
learning_rate=0.0001 * hvd.size(), beta_1=0.9, beta_2=0.999, epsilon=1e-08
)
# set up Horovod
optimizer = hvd.DistributedOptimizer(optimizer)
model.compile(loss="mape",
optimizer=optimizer,
metrics=["mape"],
loss_weights = [0.3, 0.175, 0.175, 0.175, 0.175],
experimental_run_tf_function=False)
'''
model = keras.models.load_model('./my_logs/multimodal/ResNeXt_ft128_dep22_w5-4_car16.h5')
history = model.fit(train_set, steps_per_epoch= 256 // BATCH_SIZE, epochs=100,
validation_data=val_set,
validation_steps=256 // 4,
callbacks=callbacks,
verbose = 1 if hvd.rank() == 0 else 0
) |
#!/usr/bin/python3
"""LockedClass Module"""
class LockedClass():
"""LockedClass"""
__slots__ = ['first_name']
def __init__(self, first_name=''):
self.first_name = first_name
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("task_21.csv")
# print(df["std::sort"])
fig, ax = plt.subplots(1, 1)
ax.plot([i*10 for i in range(len(df["std::sort"]))], df["std::sort"], label='std::sort')
ax.plot([i*10 for i in range(len(df["std::nth_element"]))], df["std::nth_element"], label="std::nth_element")
ax.set_xlabel("num_elements")
ax.set_ylabel("num_comparisons")
plt.legend()
plt.savefig("plot.pdf")
|
from django.contrib import admin
from Avaliacao.models import TemplateAvaliacao
from Avaliacao.Questao.models import FiltroQuestao
class FiltroQuestaoInline(admin.TabularInline):
model = FiltroQuestao
extra = 4
class TemplateAvaliacaoAdmin(admin.ModelAdmin):
fieldsets = [
('Template de Avaliacao', {'fields': ['titulo','turma','ativa','data_inicio','data_termino','permite_simulado']}),
]
readonly_fields = ('ativa',)
inlines = [FiltroQuestaoInline]
list_display = ('titulo',)
admin.site.register(TemplateAvaliacao, TemplateAvaliacaoAdmin)
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: Fabien Rosso
# Version 0.1.1 - 19 Avril 2016
# Version 0.1.2 - 29 Avril 2016
import pickle
from datetime import date, time, datetime
def convertInt (listeStr):
listeInt = []
for x in listeStr:
listeInt.append(int(x))
return listeInt
dateStart = convertInt(raw_input("Entrez la date de début (jj.mm.aaaa): ").split('.'))
dateStop = convertInt(raw_input("Entrez la date de fin (jj.mm.aaaa): ").split('.'))
timleSlotMini = convertInt(raw_input("Entrez une heure de début (hh:mm): ").split(':'))
timleSlotMaxi = convertInt(raw_input("Entrez une heure de fin (hh:mm): ").split(':'))
interShotDelaySec = float(raw_input("Entrez l'intervalle entre les photos (minute): "))*60
host = raw_input("Entrez l'ip du serveur (77.147.64.38): ")
if host == "":
host = "77.147.64.38"
port = 50100
portSsh = raw_input("Entrez le port de connection ssh: ")
if user == "":
portSsh = 525
user = raw_input("Entrez le nom de l'utilisateur sur le serveur (spm) : ")
if user == "":
user = "spm"
dateStart = datetime(dateStart[2],dateStart[1],dateStart[0],timleSlotMini[0],timleSlotMini[1])
dateStop = datetime(dateStop[2],dateStop[1],dateStop[0],timleSlotMaxi[0],timleSlotMaxi[1])
timleSlotMini = time(timleSlotMini[0],timleSlotMini[1])
timleSlotMaxi = time(timleSlotMaxi[0],timleSlotMaxi[1])
with open("configServer", "wb") as fichierConfig:
configWrite = pickle.Pickler(fichierConfig)
configWrite.dump(host)
configWrite.dump(port)
configWrite.dump(user)
configWrite.dump(portSsh)
with open("config", "wb") as fichierConfig:
configWrite = pickle.Pickler(fichierConfig)
#enregistrement dans le fichier config
configWrite.dump(dateStart)
configWrite.dump(dateStop)
configWrite.dump(timleSlotMini)
configWrite.dump(timleSlotMaxi)
configWrite.dump(interShotDelaySec)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-23 12:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrr', '0011_remove_timeheartzones_activity_summary_id'),
]
operations = [
migrations.RemoveField(
model_name='aacalculations',
name='avg_heart_rate',
),
migrations.RemoveField(
model_name='aacalculations',
name='duration_below_aerobic_range',
),
migrations.RemoveField(
model_name='aacalculations',
name='duration_hrr_not_recorded',
),
migrations.RemoveField(
model_name='aacalculations',
name='duration_in_aerobic_range',
),
migrations.RemoveField(
model_name='aacalculations',
name='duration_in_anaerobic_range',
),
migrations.RemoveField(
model_name='aacalculations',
name='max_heart_rate',
),
migrations.RemoveField(
model_name='aacalculations',
name='percent_aerobic',
),
migrations.RemoveField(
model_name='aacalculations',
name='percent_anaerobic',
),
migrations.RemoveField(
model_name='aacalculations',
name='percent_below_aerobic',
),
migrations.RemoveField(
model_name='aacalculations',
name='percent_hrr_not_recorded',
),
migrations.RemoveField(
model_name='aacalculations',
name='total_duration',
),
migrations.AddField(
model_name='aacalculations',
name='data',
field=models.TextField(blank=True, null=True),
),
]
|
#引入socket#数据的传输,数据为bytes类型
import socket
#创建套接字
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#给套接字绑定地址和端口
s.bind(('localhost',8080))
#监听
s.listen(5)
print('我正在等待数据')
#接收链接
conn,address=s.accept()
#接收请求信息
while True:
res=conn.recv(1024)#recv堵塞
print('--他:',res.decode())#输出请求信息
data =input('我:')#输入响应
conn.send(data.encode())#将响应返回
s.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 22 16:38:56 2019
@author: nico
"""
#%% importo los paquetes necesarios
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statistics as stats
import sys
sys.path.append('/home/nico/Documentos/facultad/6to_nivel/pds/git/pdstestbench')
from pdsmodulos.signals import signals as sg
from pdsmodulos.signals import FFT
#%% limpio el entorno
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
#%% Estalesco los datos necesarios
N = 1000 # muestras
fs = 1000 # Hz
df = fs / N
a0 = 1 # Volts
p0 = 0 # radianes
f0 = df# Hz
#%% Genero las variables necesarias
signal = np.zeros(N)
ruido = np.zeros(N)
energia_frecuencia4 = 0
energia_frecuencia8 = 0
energia_frecuencia16 = 0
energia_q4 = 0
energia_q8 = 0
energia_q16 = 0
energia_e4 = 0
energia_e8 = 0
energia_e16 = 0
#%% generacion y muestreo de las senoidal
tt, signal = sg.seno(fs, f0, N, a0, p0)
tt, ruido = sg.noise (fs, f0, N, a0, SNR=10.0)
SR = signal + ruido
#%% cuntifico las señales
SQ4 = sg.quantizer(SR, 4)
SQ8 = sg.quantizer(SR, 8)
SQ16 = sg.quantizer(SR, 16)
#%% calculo el error de cuantificacion
e4 = SR - SQ4
e8 = SR - SQ8
e16 = SR - SQ16
#%% FFT
fftSR = np.fft.fft(SR)
mod_fftSR = abs(fftSR)
# FFT señales quantizadas
fftSQ4 = np.fft.fft(SQ4)
mod_fftSQ4 = abs(fftSQ4)
fftSQ8 = np.fft.fft(SQ8)
mod_fftSQ8 = abs(fftSQ8 )
fftSQ16 = np.fft.fft(SQ16)
mod_fftSQ16 = abs(fftSQ16)
#FFT señales de error
ffte4 = np.fft.fft(e4)
mod_ffte4 = abs(ffte4)
ffte8 = np.fft.fft(e8)
mod_ffte8 = abs(ffte8)
ffte16 = np.fft.fft(e16)
mod_ffte16 = abs(ffte16)
#%% Cálculo de las energías
# Energías frecuenciales de las señales con ruido
for ii in range(0,N):
energia_frecuencia4 += mod_fftSR[ii]**2
energia_frecuencia4 /= N**2
for ii in range(0,N):
energia_frecuencia8 += mod_fftSR[ii]**2
energia_frecuencia8 /= N**2
for ii in range(0,N):
energia_frecuencia16 += mod_fftSR[ii]**2
energia_frecuencia16 /= N**2
# Energías de la señal cuantizadas
for ii in range(0,N):
energia_q4 += mod_fftSQ4[ii]**2
energia_q4 /= N**2
for ii in range(0,N):
energia_q8 += mod_fftSQ8[ii]**2
energia_q8 /= N**2
for ii in range(0,N):
energia_q16 += mod_fftSQ16[ii]**2
energia_q16 /= N**2
# Energías de la señal de error
for ii in range(0,N):
energia_e4 += mod_ffte4[ii]**2
energia_e4 /= N**2
for ii in range(0,N):
energia_e8 += mod_ffte8[ii]**2
energia_e8 /= N**2
for ii in range(0,N):
energia_e16 += mod_ffte16[ii]**2
energia_e16 /= N**2
resultadosSR = [energia_frecuencia4, energia_frecuencia8, energia_frecuencia16]
resutladosSq = [energia_q4, energia_q8, energia_q16]
resultadose = [energia_e4, energia_e8, energia_e16]
#%% Cálculo del valor medidio de e*
valor_medio_e4 = 0
for ii in range(0,N) :
valor_medio_e4 += e4[ii]
valor_medio_e4 /= N
valor_medio_e8 = 0
for ii in range(0,N) :
valor_medio_e8 += e8[ii]
valor_medio_e8 /= N
valor_medio_e16 = 0
for ii in range(0,N) :
valor_medio_e16 += e16[ii]
valor_medio_e16 /= N
#%% Cálculo del valor RMS
valor_RMS_e4 = 0
for ii in range(0,N) :
valor_RMS_e4 += e4[ii]**2
valor_RMS_e4 /= N
valor_RMS_e4 = np.sqrt(valor_RMS_e4)
valor_RMS_e8 = 0
for ii in range(0,N) :
valor_RMS_e8 += e8[ii]**2
valor_RMS_e8 /= N
valor_RMS_e8 = np.sqrt(valor_RMS_e8)
valor_RMS_e16 = 0
for ii in range(0,N) :
valor_RMS_e16 += e16[ii]**2
valor_RMS_e16 /= N
valor_RMS_e16 = np.sqrt(valor_RMS_e16)
#%% Cálculos de estadísticos
# valor medio
vm_e4 = stats.mean(e4)
vm_e8 = stats.mean(e8)
vm_e16 = stats.mean(e16)
# varianza
var_e4 = stats.variance(e4)
var_e8 = stats.variance(e8)
var_e16 = stats.variance(e16)
# desvio estanda
dstd_e4 = stats.stdev(e4)
dstd_e8 = stats.stdev(e8)
dstd_e16 = stats.stdev(e16)
resultado_vm = [vm_e4, vm_e8, vm_e16]
resultado_var = [var_e4, var_e8, var_e16]
resultado_dstd = [dstd_e4, dstd_e8, dstd_e16]
#%% Gŕaficos de las señales en tiempo
# señal senoidal
plt.figure("Gráfico de la señal temporal")
plt.subplot(3,1,1)
plt.plot(tt, signal, 'b', label='f0 = 1Hz ')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de la señal temporal sin ruido')
plt.legend(loc = 'upper right')
#Ruido
plt.subplot(3,1,2)
plt.plot(tt, ruido, 'b', label='f0 = 1Hz y SNR=10')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico el ruido con SNR = 10')
plt.legend(loc = 'upper right')
# senoidal con ruido
plt.subplot(3,1,3)
plt.plot(tt, SR, 'b', label='f0 = 1Hz y SNR =10')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de la señal temporal con ruido con SNR=10')
plt.legend(loc = 'upper right')
plt.tight_layout()
#%% Gráficos de la señal cuantizada
plt.figure("Gráfico de la señal cuantizada")
plt.subplot(3,1,1)
plt.plot(tt, SQ4, 'b', label='SQ4')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de la señal cuantizada con SQ4')
plt.legend(loc = 'upper right')
#Ruido
plt.subplot(3,1,2)
plt.plot(tt, SQ8, 'b', label='SQ8')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de la señal cuantizada con SQ8')
plt.legend(loc = 'upper right')
# senoidal con ruido
plt.subplot(3,1,3)
plt.plot(tt, SQ16, 'b', label='SQ16')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de la señal cuantizada con SQ16')
plt.legend(loc = 'upper right')
plt.tight_layout()
#%% Grafico de los errores
plt.figure("Gráfico de la señal de error")
plt.plot(tt, e4, 'b', label=' SQ = 4')
plt.plot(tt, e8, 'r', label=' SQ = 8')
plt.plot(tt, e16, 'g', label=' SQ = 16')
plt.xlabel('tiempo [S]')
plt.ylabel('Amplitud [UA]')
plt.grid()
plt.title('Gráfico de las señales de error')
plt.legend(loc = 'upper right')
#%% Gráficos de los histogramas de los errores
#%% Ploteo de los errores
plt.figure("Histograma de errores de cuantificación ")
plt.hist(e4, bins=50, alpha=1, edgecolor = 'black', linewidth=1, label="e4")
plt.hist(e8, bins=50, alpha=1, edgecolor = 'black', linewidth=1, label="e8")
plt.hist(e16, bins=50, alpha=1, edgecolor = 'black', linewidth=1, label="416")
plt.legend(loc = 'upper right')
plt.ylabel('frecuencia')
plt.xlabel('valores')
plt.title('histograma de errores de cuantificación')
plt.show()
#%% tabla de panda
tus_resultados = [ ['$\sum_{f=0}^{f_S/2} \lvert S_R(f) \rvert ^2$', '$\sum_{f=0}^{f_S/2} \lvert S_Q(f) \rvert ^2$', '$\sum_{f=0}^{f_S/2} \lvert e(f) \rvert ^2$' ],
['', '', '' ],
[resultadosSR[0], resutladosSq[0], resultadose[0]], # <-- completar acá
[resultadosSR[1], resutladosSq[1], resultadose[1]], # <-- completar acá
[resultadosSR[2], resutladosSq[2], resultadose[2]], # <-- completar acá
]
df = pd.DataFrame(tus_resultados, columns=['Energía total', 'Energía total Q', 'Energía total $e$'],
index=['$f_0$ \ expr. matemática',
'',
'4 bits',
'8 bits',
'16 bits'
])
print(df) |
#!/bin/python3
import os
import sys
#
# Complete the twoStacks function below.
#
def twoStacks(x, a, b, n, m):
total = 0
atmp = []
print(a)
print(b)
for i in range(n):
val = a.pop()
if total + val > x:
break
total += val
atmp.append(val)
print(atmp)
print(total)
max_count = len(atmp)
cur_count = max_count
while m:
print("m : {}".format(m))
print("total + b[-1] : {}".format(total + b[-1]))
if total + b[-1] <= x:
total += b.pop()
m -= 1
cur_count += 1
if cur_count > max_count:
print("cur_count : {}".format(cur_count))
max_count = cur_count
continue
if not len(atmp):
break
aval = atmp.pop()
print("atmp : {}".format(atmp))
total -= aval
cur_count -= 1
return max_count
# def twoStacks(x, a, b):
# #
# # Write your code here.
# #
# subtotal = 0
# cnt = 0
# while subtotal < x:
# if not a or not b:
# if not a:
# subtotal += b.pop()
# else:
# subtotal += a.pop()
# else:
# if a[0] < b[0]:
# t = a.pop(0)
# subtotal += t
# print("a pop : {}".format(t))
# else:
# t = b.pop(0)
# subtotal += t
# print("b pop : {}".format(t))
# cnt += 1
#
# if subtotal > x:
# cnt -= 1
# # print("a : {}".format(a))
# # print("b : {}".format(b))
# print("subtotal : {}".format(subtotal))
#
# return cnt
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
g = int(input())
for g_itr in range(g):
nmx = input().split()
n = int(nmx[0])
m = int(nmx[1])
x = int(nmx[2])
a = list(map(int, reversed(input().strip().split(' '))))
b = list(map(int, reversed(input().strip().split(' '))))
result = twoStacks(x, a, b, n, m)
print(result)
# fptr.write(str(result) + '\n')
#
# fptr.close()
|
#bubble sort, most basic sorting algorithm,
#generate sorted list for search functions to perform better.
#the function require a list input
#and will return the sorted list
#and how many times have the algorithm go through the list.
import random
def bubble_sort(my_list):
position = 0
status = True
counter = 0
while status:
status = False
for position in range(len(my_list) - 1):
if my_list[position] > my_list[position + 1]:
status = True
my_list[position], my_list[position + 1] = my_list[position + 1], my_list[position]
counter += 1
return my_list, counter #created tuple automatically
#below is the testing case
if __name__ == "__main__":
number_list = list()
i = 0
random.seed()
for i in range(20):
number_list.append(random.randint(1, 20))
print("This is the original list:\n", number_list)
sorted_number_list, counter = bubble_sort(number_list)#automatic tuple unpacking
print("After {} times go through the list, This is the sorted list:\n".format(counter), sorted_number_list)
|
import array
import docker
import fcntl
import os
import pty
import re
import sys
import select
import tempfile
import termios
import tty
from pathlib import Path
from ronto import \
dryrun, \
is_command_available_or_exit, \
is_in_docker, \
run_cmd, \
verbose
from . import get_value, get_value_with_default, exists
PROJECT_DIR_HOST = os.getcwd()
SSH_HOST = os.path.join(str(Path.home()), ".ssh")
class DockerConfig:
_use_docker = False
image = "almedso/yocto-bitbaker:latest"
privatized_image = "my-yocto-bitbaker"
project_dir_container = "/yocto/root"
cache_dir_container = "/yocto/cache"
cache_dir_host = os.path.abspath(os.path.join(PROJECT_DIR_HOST, "..", "cache"))
publish_dir_container = "/yocto/publish"
publish_dir_host = ""
def __init__(cls):
if exists(["docker"]):
verbose("Docker configuration found")
cls._use_docker = True
cls.image = get_value_with_default(
["docker", "image"], "almedso/yocto-bitbaker:latest")
cls.privatized_image = get_value_with_default(
["docker", "privatized_image"], "my-yocto-bitbaker")
cls.project_dir_container = get_value_with_default(
["docker", "project_dir"], "/yocto/root")
cls.cache_dir_host = get_value_with_default(
["docker", "cache_dir", "host"],
os.path.abspath(os.path.join(PROJECT_DIR_HOST, '..', 'cache')))
cls.cache_dir_container = get_value_with_default(
["docker", "cache_dir", "container"], "/yocto/cache")
cls.publish_dir_host = get_value_with_default(
["docker", "publish_dir", "host"],"")
cls.publish_dir_container = get_value_with_default(
["docker", "publish_dir", "container"], "/yocto/publish")
def get_image(cls):
return cls.image
def get_privatized_image(cls):
return cls.privatized_image
def use_docker(cls):
return cls._use_docker
def create_dir_and_dockerfile(
yocto_bitbaker_image="almedso/yocto-bitbaker:latest", yocto_user_home="/home/yocto"
):
"""
create a temporary directory and add a Dockerfile
to create a privatized container
"""
uid = os.getuid()
gid = os.getgid()
verbose(f"Inject uid {uid} and gid {gid}")
dockerfile = f"""
FROM {yocto_bitbaker_image}
RUN pip3 install --upgrade ronto
RUN groupadd --gid {gid} yocto || true && \
useradd --uid {uid} --gid {gid} --home {yocto_user_home} \
--create-home --shell /bin/bash yocto
USER yocto
"""
dir = tempfile.mkdtemp()
filename = os.path.join(dir, "Dockerfile")
with open(filename, "w") as f:
f.write(dockerfile)
return dir
def abs_path(path):
if path[0] != '/':
# relative path
path = os.path.join(os.getcwd(), path)
return os.path.abspath(path)
class DockerHost:
"""
Only used if this runs on docker host
As a constraint: the name of the container is the same
is the name of the privatized image
"""
def __init__(self, config):
# skip totally if repo is not set.
is_command_available_or_exit(["docker", "--version"])
self.config = config
self.docker = docker.from_env() # create a docker client
self.yocto_user_home = "/home/yocto" # needed for consistency reasons
# container name must be suffixed of something that identifies that
# project root is mounted in - with use the "short" project directory
self.container_name = \
config.privatized_image + '-' + os.path.basename(os.getcwd())
def build_privatized_docker_image(self):
if dryrun():
print(f"dry: Build or get privatized docker image: " \
f"{self.config.privatized_image}")
else:
try:
image_label = self.config.privatized_image + ":latest"
image = self.docker.images.get(image_label)
verbose(f"Privatized image {image_label} exists" \
" - no need to build")
except docker.errors.ImageNotFound as _err:
self._build_privatized_docker_image()
def _build_privatized_docker_image(self):
verbose("Build privatized docker image")
privatized_docker_image = self.config.get_privatized_image()
yocto_docker_image = self.config.get_image()
dir = create_dir_and_dockerfile(yocto_docker_image, self.yocto_user_home)
if dryrun():
with open(os.path.join(dir, "Dockerfile"), "r") as f:
print("dry: privatizing Dockerfile" + f.read())
run_cmd(["docker", "build", "-t", privatized_docker_image, dir])
os.remove(os.path.join(dir, "Dockerfile")) # cleanup Dockerfile
os.rmdir(dir) # cleanup temporary directory
def create_container(self):
containers = self.docker.containers.list(
all=True, filters={"name": self.container_name}
)
if len(containers) == 1:
verbose(f"Container already exists, reusing ...")
self.container = containers[0]
return
verbose("Create docker container")
if dryrun():
print(f"dry: Build container: {self.container_name}")
else:
volumes = {
os.getcwd(): {
"mode": "rw",
"bind": self.config.project_dir_container,
},
abs_path(self.config.cache_dir_host): {
"mode": "rw",
"bind": self.config.cache_dir_container,
},
}
local_ssh_dir = os.path.join(Path.home(), ".ssh")
if os.path.isdir(local_ssh_dir):
# only if host (local) user has ssh configured inject
volumes[local_ssh_dir] = {
"mode": "ro",
"bind": os.path.join(self.yocto_user_home, ".ssh"),
}
if self.config.publish_dir_host != "" and os.path.isdir(
self.config.publish_dir_host
):
# only if host (local) is configured and exists configure
volumes[abs_path(self.config.publish_dir_host)] = {
"mode": "rw",
"bind": self.config.publish_dir_container,
}
self.container = self.docker.containers.create(
detach=True,
# Inject an infinite loop command
command="bash -c 'while true; do sleep 1; done'",
user=os.getuid(),
read_only=False, # otherwise /tmp is not writeable
name=self.container_name,
image=self.config.privatized_image,
volumes=volumes,
)
verbose(f"Docker container created: {self.container}")
def start_container(self):
if dryrun():
print(f"dry: Start container: {self.container_name}")
else:
verbose(f"Docker container status: {self.container.status}")
if self.container.status != "running":
verbose(f"Start docker container")
self.container.start()
# wait until container is running
def run_command(self, command, interactive_flag=False):
# cleanup the ronto command path
if isinstance(command, list):
if 'ronto' in command[0]:
command[0] = 'ronto' # get rid of of host local path
if isinstance(command, str):
p = re.compile('^([\w/]*ronto)')
command = p.sub('ronto', command)
if interactive_flag \
or '-i' in command \
or '--interactive' in command: # in works on lists and on strings
self.run_interactive_build_command(command)
else:
self.run_batch_command(command)
if isinstance(command, list):
command = ' '.join(command)
verbose(f"Docker command '{command}' finished - returned to host")
def run_batch_command(self, command):
if isinstance(command, list):
cmd_fmt = ' '.join(command)
else:
cmd_fmt = command
verbose(f"Docker host - run batch command '{cmd_fmt}'")
if dryrun():
print(f"dry: (batch-in-container) {cmd_fmt}")
else:
socket = self.container.exec_run(cmd=command, stream=True,
demux=True, workdir=self.config.project_dir_container)
for (stdout, stderr) in socket.output:
if stdout:
sys.stdout.buffer.write(b'... ')
sys.stdout.buffer.write(stdout)
if stderr:
sys.stderr.buffer.write(b'+++ ')
sys.stderr.buffer.write(stderr)
def run_interactive_build_command(self, command):
verbose(f"Docker host - run interactive command '{command}'")
if dryrun():
print(f"dry: (interactive-in-container) {command}")
else:
try:
socket = self.container.exec_run(cmd=command, tty=True,
stdin=True, socket=True, demux=False,
workdir=self.config.project_dir_container)
socket.output._sock.send(b'export PSADD="(c*r)"\n')
while True:
r, w, e = select.select([sys.stdin, socket.output._sock],
[], [sys.stdin, socket.output._sock])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
socket.output._sock.send(d)
elif socket.output._sock in r:
data = socket.output._sock.recv(16384)
os.write(sys.stdout.fileno(), data)
if sys.stdin in e or socket.output._sock in e:
break # leave the loop
except Exception as err:
verbose(f"Exception: {err}")
def stop_container(self):
if dryrun():
print(f"dry: Stop container: {self.container_name}")
else:
verbose(f"Docker stop container")
self.container.stop()
def remove_container(self):
if dryrun():
print(f"dry: Remove container: {self.container_name}")
else:
self.container.remove(force=True) # force for just to be sure
def remove_privatized_image(self):
self.remove_container()
run_cmd(["docker", "rmi", self.config.get_privatized_image()])
def remove_all(self):
# do not use as ofter since it requires download of more than 1 GByte
self.remove_container()
run_cmd(["docker", "rmi", self.config.get_image()])
def docker_factory():
docker_config = DockerConfig()
if docker_config.use_docker():
verbose(f"Docker context configured")
if not is_in_docker():
return DockerHost(docker_config)
else:
verbose(f"Run inside the container")
return None
verbose(f"Docker context not configured")
return None
# a decorator that helps to run docker
def docker_context():
def _docker_context(function):
def __docker_context(*args, **kwargs):
verbose(f"Docker decorator - started")
docker = docker_factory()
if docker:
verbose("Invoke on docker context ...")
docker.build_privatized_docker_image()
docker.create_container()
docker.start_container()
command = sys.argv
result = docker.run_command(command)
docker.stop_container()
else:
verbose("Do not run on docker")
result = function(*args, **kwargs)
verbose(f"Docker decorator - done")
return result
return __docker_context
return _docker_context
|
from django.urls import path, include, re_path
from . import views
app_name = 'ann'
urlpatterns = [
path('', views.AnnView.as_view(), name='index'),
path('<int:id>', views.AnnDetailView.as_view(), name='detail'),
path('<int:id>/test/<int:training>', views.TestView.as_view(), name='test'),
path('<int:id>/record', views.RecordTrainingList.as_view(), name='record'),
path('dataset/', views.DatasetView.as_view(), name='dataset'),
path('dataset/<int:id>', views.DatasetDetailView.as_view(), name='datasetdetail'),
path('dataset/<int:id>/review', views.DatasetReviewView.as_view(), name='datasetreview'),
path('dataset/<int:id>/data', views.DatasetReviewDataView.as_view(), name='datasetreviewdata'),
] |
# 1、字符串:序列操作、编写字符串的其他方法、模式匹配
# 去空格及特殊符号
# lstrip:删除左边的空格
# rstrip:删除右边的空格
# strip:删除两端的空格
s = " changjie l "
print(s.lstrip())
print(s.rstrip())
print(s.strip())
# 复制字符串
str1 = 'occupation'
str2 = str1
str1 = 'occupation2'
print(str1,str2)
# 连接字符串
str1 = 'my'
str2 = 'job'
str1 += str2
print(str1)
# 查找字符 <0 为未找到
str1 = 'meaning'
str2 = 'n'
n = str1.index(str2) # 若要查找字符出现多次,则输出最早出现的该字符的下标
print(n)
# 比较字符串
# Python 3.X 的版本中已经没有 cmp 函数,如果你需要实现比较功能,需要引入 operator 模块,适合任何对象,包含的方法有:
# operator.lt(a, b) 等价于a<b
# operator.le(a, b) 等价于a<=b
# operator.eq(a, b) 等价于a=b
# operator.ne(a, b) 等价于a!=b
# operator.ge(a, b) 等价于a>=b
# operator.gt(a, b) 等价于a>b
import operator
str1 = 'player'
str2 = 'play'
print(operator.gt(str1,str2))
# 字符串长度
str1 = 'chang'
print(len(str1))
# 扫描字符串是否包含指定的字符
myStr1 = 'professional'
myStr2 = 'fess'
print(len(myStr1 and myStr2)) # 如果myStr1的值为true的话,则返回myStr2的值, len取得myStr2长度
# 将字符串中的大小写转换
sStr1 = 'upStAir'
# sStr1 = sStr1.upper() # 转大写
# sStr1 = sStr1.lower() # 转小写
# sStr1 = sStr1.swapcase() # 大小写互换
sStr1 = sStr1.capitalize() # 首字母大写
print(sStr1)
# 追加指定长度的字符串
sStr1 = 'drink'
sStr2 = 'water234'
sStr1 += sStr2[0:5]
print(sStr1)
# 字符串指定长度比较
sStr1 = 'drink'
sStr2 = 'drink234'
n = 5
print(operator.eq(sStr1[0:n],sStr2[0:n]))
# 复制指定长度的字符
sStr1 = ''
sStr2 = 'drink234'
n = 4
sStr1 = sStr2[0:n]
print(sStr1)
# 将字符串前n个字符替换为指定的字符
sStr1 = 'grape'
ch = 'c'
n = 3
sStr1 = n*ch + sStr1[n:]
print(sStr1)
# 扫描字符串
sStr1 = 'abcdrgjh'
sStr2 = 'caj'
n = -1
for c in sStr2:
if c in sStr1:
n = sStr1.index(c)
break
print(n)
# 翻转字符串
sStr1 = 'chang'
sStr1 = sStr1[::-1]
print(sStr1)
# 查找字符串
sStr1 = 'changjie'
sStr2 = 'jie'
print(sStr1.find(sStr2))
# 分割字符串
sStr1 = 'a,cd,erf,w,qe'
sStr2 = ','
sStr1 = sStr1[sStr1.find(sStr2)+1:]
print(sStr1)
# 或者
s = 'ab,cde,fgh,ijk'
print(s.split(' ')) # 将string转list,以空格区分
# 连接字符串
sStr1 = ' '
sStr2 = ['banana','apple','pear','grape']
print(sStr1.join(sStr2)) # 将list转string,以空格连接
# S.find(substring, [start [,end]]) #可指范围查找子串,返回索引值,否则返回-1
# S.rfind(substring,[start [,end]]) ##返回S中最后出现的substr的第一个字母的标号,如果S中没有substring则返回-1,也就是说从右边算起的第一次出现的ssubstring的首字母标号
# S.index(substring,[start [,end]]) #同find,只是找不到产生ValueError异常
# S.rindex(substring,[start [,end]])#同上反向查找
# S.count(substring,[start [,end]]) #返回找到子串的个数
sStr1 = 'ab,cde,fgh,ijk'
sStr2 = 'qw'
print(sStr1.find(sStr2,0,10))
print(sStr1.rfind(sStr2,10,0))
# print(sStr1.index(sStr2,0,10))
# print(sStr1.rindex(sStr2))
# print(sStr1.count(sStr2))
# S.replace(oldstr, newstr, [count])
# 把S中的oldstar替换为newstr,count为替换次数。这是替换的通用形式,还有一些函数进行特殊字符的替换
sStr1 = 'abcdefg'
sStr2 = 'w'
print(sStr1.replace('d',sStr2,1))
# 将字符串转为int,float
sStr1 = '11'
print(int(sStr1))
print(float(sStr1))
# 引入正则表达式包
import re
# findall(rule , target [,flag] )在目标字符串中查找符合规则的所有字符串
sStr1 = '123abcd789jkabhg'
print(re.findall(r'ab',sStr1))
# 正则表达式基本规则
'''
‘[‘ ‘]’ 字符集合设定符
[a-zA-Z]来指定所以英文字母的大小写
[^a-zA-Z]表明不匹配所有英文字母
如果 ‘^’不在开头,则它就不再是表示取非,而表示其本身,如[a-z^A-Z]表明匹配所有的英文字母和字符’^’。
‘|’ 或规则: 只要满足其中之一就可以匹配。比如:[a-zA-Z]|[0-9] 表示满足数字或字母就可以匹配,这个规则等价于 [a-zA-Z0-9]
‘‘/d’ 匹配数字
/D’ 匹配非数字 ‘
/w’ 匹配字母和数字
‘/W’ 匹配非英文字母和数字
'''
'''
re包中常用方法
search: 搜索整个字符串,直到发现符合的子字符串。
match:从头开始检查字符串是否符合正则表达式。必须从字符串的第一个字符开始就相符。
sub: 在string中利用正则变换pattern进行搜索,对于搜索到的字符串,用另一字符串replacement替换。返回替换后的字符串。
split: 根据正则表达式分割字符串, 将分割后的所有子字符串放在一个表(list)中返回
findall: 根据正则表达式搜索字符串,将所有符合的子字符串放在一给表(list)中返回
compile: 可以把正则表达式编译成一个正则表达式对象。
'''
'''
匹配中文字符的正则表达式: [/u4e00-/u9fa5]
匹配双字节字符(包括汉字在内):[^/x00-/xff]
匹配空行的正则表达式:/n[/s| ]*/r
匹配HTML标记的正则表达式:/<(.*)>.*<///1>|<(.*) //>/
匹配首尾空格的正则表达式:(^/s*)|(/s*$)
匹配Email地址的正则表达式:/w+([-+.]/w+)*@/w+([-.]/w+)*/./w+([-.]/w+)*
匹配网址URL的正则表达式:^[a-zA-z]+://(//w+(-//w+)*)(//.(//w+(-//w+)*))*(//?//S*)?$
匹配帐号是否合法(字母开头,允许5-16字节,允许字母数字下划线):^[a-zA-Z][a-zA-Z0-9_]{4,15}$
匹配国内电话号码:(/d{3}-|/d{4}-)?(/d{8}|/d{7})?
'''
sStr1 = 'I have a dog,I have a cat'
print(re.findall('I have a (?:dog|cat)', sStr1))
print(re.findall( r'I have a dog|cat', sStr1))
# ‘.’ 匹配所有字符 匹配除换行符’/n’外的所有字符。
s='123 /n456 /n789'
print(re.findall('([0-9]*\d+)',s))
print(re.findall('.+' , s ))
# ‘/d’ 匹配数字
sStr1='123adsdfk'
ret = re.findall('\d',sStr1)
print(str(ret))
# search方法 re.search 扫描整个字符串并返回第一个成功的匹配。
sStr1 = 'changjiehelo'
sStr2 = 'he'
print(re.search(sStr2,sStr1))
phone = "2004-959-559 # 这是一个国外电话号码"
# 删除字符串中的 Python注释
num = re.sub(r'#.*', "", phone)
print("电话号码是: ", num)
# 删除非数字(-)的字符串
num = re.sub(r'\D', "", phone)
print("电话号码是 : ", num)
# 匹配出所有的整数
digit = '31,-3,gh,-2.5,7,asdf'
# ret = re.findall(r"'(-*\d+)'",str(re.split(",", digit)))
# 或者
ret = re.findall(r"'(-?\d+)'",str(re.split(",", digit)))
print(ret)
# 匹配一段文本中的每行的邮箱
y = '123@qq.comaaa@163.combbb@126.comasdfasfs33333@adfcom'
ret = re.findall(r'\w+@(?:qq|163|126).com',y)
print(ret)
# 匹配一段文本中的每行的时间字符串,比如:‘1990-07-12’
time = 'asfasf1999-07-22asdfAAAbbbb434241'
# ret = re.search(r'(?P<year>19[09]\d)',time)
ret = re.search(r'(?P<year>19[09]\d)-(?P<month>\d+)-(?P<day>\d+)',time)
# findall 输出的是元组
# ret = re.findall(r'(?P<year>19[09]\d)-(?P<month>\d+)-(?P<day>\d+)',time)
# print(ret)
print(ret.group('year'))
print(ret.group('month'))
print(ret.group('day'))
# 匹配一段文本中的数字。
card = 'sfafsf,3423423,1231313132'
ret = re.findall("\d{7}", card)
print(ret)
# # 2、列表(列表的基本操作)
# list 和 tuple 的相互转化 tuple(ls) list(ls)
list1 = [1, 2, 3, 4, 5]
print(list1)
print(tuple(list1))
t1 = (1, 2, 3, 4, 5)
print(t1)
print(list(t1))
# 创建一个列表
list1 = ['apple', 'orange', 12, 34,'watermelon']
list2 = [1, 2, 3, 4, 5]
# 访问列表中的值
print("list1[0]: ", list1[0])
print("list2[1:5]: ", list2[1:5])
# 更新列表list1中的值
print(list1[2])
list1[2] = 'pear'
print(list1[2])
# 删除列表元素
del list1[2]
print("删除后的列表元素有 : ")
print(list1)
# 创建连续的list
list1 = range(1, 5) # 即 list1=[1,2,3,4],不含最后一个元素
print(list1)
list2 = range(1, 10, 2) # 即 list2=[1, 3, 5, 7, 9]
print(list2)
# 将对象插入列表
list1 = ['a','cf','gf',2,5,7]
list1.insert(0,'hello')
print(list1)
# 反向列表中元素
list1.reverse()
print(list1)
# 遍历一个列表
L = ['a','cf','gf',2,5,7]
# 对原列表进行排序
list1 = ['3','7','23','1','21','36','18']
list2 = [3,7,23,1,21,36,18]
list1.sort()
list2.sort()
print("排序后的列表为:",list1)
print("排序后的列表为:",list2)
for a in L:
# 打印列表中的值
print(a)
# 通过while遍历一个列表
count = 0
while count < len(L):
print(L[count])
count += 1
# 3、打印九九乘法口诀和循环输出26个字母(并将字母的Ascii码同时输出)
for i in range (1,10):
for j in range (1,i+1):
# %前为占位符 后面括号中参数对应占位符位置, end 结束后追加的内容
# print("%d*%d=%d"%(j,i,j*i),end="\t")
print(j,"*",i,"=",j*i,end="\t")
print(" ")
# 通过while方法处理
i = 0
j = 0
while i<9:
i=i+1
while j<9:
j=j+1
print("%d*%d=%d" % (j, i, j * i), end="\t")
if i==j:
j=0
print("")
break
# 输出26个字母以及对应ASCII码
import string
# chr(a)将a转为对应的字母
for a in range(97,123):
print(chr(a)+"\t"+str(a))
# 输出小写字母及对应ASCII码
for word in string.ascii_lowercase:
# 由于print中有字符拼接,数字只能通过转换为str()字符进行输出
print(word + "\t" + str(ord(word)))
# 输出大写字母及对应ASCII码
for word in string.ascii_uppercase:
# 由于print中有字符拼接,数字只能通过转换为str()字符进行输出
print(word + "\t" + str(ord(word))) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scrub_csv', '0006_auto_20150416_2317'),
]
operations = [
migrations.AddField(
model_name='document',
name='found_file',
field=models.FileField(default='', upload_to=b'uploads/%Y/%m/%d', verbose_name=b'found records'),
preserve_default=False,
),
migrations.AddField(
model_name='document',
name='not_found_file',
field=models.FileField(default='', upload_to=b'uploads/%Y/%m/%d', verbose_name=b'not found records'),
preserve_default=False,
),
]
|
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from TestUtils import Workspace
from DynamicSchedulerGeneric import Utils as DynSchedUtils
from DynamicSchedulerGeneric import Analyzer
class AnalyzerTestCase(unittest.TestCase):
def setUp(self):
self.vomap = {"atlasprod": "atlas",
"atlassgm": "atlas",
"dteamgold": "dteam",
"dteamsilver": "dteam",
"dteambronze": "dteam",
"infngridlow": "infngrid",
"infngridmedium": "infngrid",
"infngridhigh": "infngrid"}
self.headerfmt = "#!/bin/bash\n\n"
self.headerfmt += "printf 'nactive %d\n"
self.headerfmt += "nfree %d\n"
self.headerfmt += "now %d\n"
self.headerfmt += "schedCycle %d\n\n"
self.footerfmt = "'\n\nexit 0"
self.dictfmt = '{"group": "%s", "queue": "%s", "state": "%s", "qtime": %d, "name": "%s"}\n'
def tearDown(self):
pass
def test_analyze_ok(self):
jTable = [
("atlasprod", "creamtest1", 'running', 1327564866, "creXX_23081970"),
("atlasprod", 'creamtest2', 'queued', 1327565866, "creXX_23081971"),
("dteamgold", 'creamtest2', 'running', 1327566866, "creXX_23081972"),
("dteamgold", "creamtest1", 'running', 1327567866, "creXX_23081973"),
("dteamgold", 'creamtest2', 'queued', 1327568866, "creXX_23081974"),
("infngridlow", 'creamtest1', 'running', 1327569866, "creXX_23081975"),
("infngridlow", 'creamtest2', 'running', 1327570866, "creXX_23081976"),
("infngridhigh", 'creamtest1', 'running', 1327571866, "creXX_23081977"),
("infngridhigh", 'creamtest2', 'running', 1327572866, "creXX_23081978"),
("infngridhigh", 'creamtest1', 'queued', 1327573866, "creXX_23081979")
]
workspace = Workspace(vomap = self.vomap)
script = self.headerfmt % (5, 0, 1327574866, 26)
for jItem in jTable:
script += self.dictfmt % jItem
script += self.footerfmt
workspace.setLRMSCmd(script)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
collector = Analyzer.analyze(config, {})
result = collector.runningJobsForVO('atlas') == 1
result = result and collector.queuedJobsForVO('atlas') == 1
result = result and collector.runningJobsForVO('dteam') == 2
result = result and collector.queuedJobsForVO('dteam') == 1
result = result and collector.runningJobsForVO('infngrid') == 4
result = result and collector.queuedJobsForVO('infngrid') == 1
self.assertTrue(result)
def test_analyze_err_from_script(self):
try:
workspace = Workspace(vomap = self.vomap)
script = """#!/usr/bin/python
import sys
sys.stderr.write("Dummy error message")
sys.exit(1)
"""
workspace.setLRMSCmd(script)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
collector = Analyzer.analyze(config, {})
self.fail("Exception not handled")
except Analyzer.AnalyzeException, test_error:
msg = str(test_error)
self.assertTrue(msg.startswith("Dummy error message"))
def test_analyze_with_maxjobforvo(self):
jTable = [
("atlasprod", "creamtest1", 'running', 1327564866, "creXX_23081970"),
("dteamgold", 'creamtest2', 'running', 1327566866, "creXX_23081972"),
("dteamgold", "creamtest1", 'running', 1327567866, "creXX_23081973"),
("infngridlow", 'creamtest1', 'running', 1327569866, "creXX_23081975"),
("infngridlow", 'creamtest2', 'running', 1327570866, "creXX_23081976"),
("infngridhigh", 'creamtest2', 'running', 1327572866, "creXX_23081978")
]
workspace = Workspace(vomap = self.vomap)
script = self.headerfmt % (10, 4, 1327574866, 26)
for jItem in jTable:
script += self.dictfmt % jItem
script += self.footerfmt
mJobTable = {'dteam': 5, 'atlas': 5, 'infngrid':5}
workspace.setLRMSCmd(script)
workspace.setMaxJobCmd(mJobTable)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
collector = Analyzer.analyze(config, mJobTable)
self.assertTrue(collector.freeSlots(None, 'dteam') == 3)
if __name__ == '__main__':
unittest.main()
|
from end_to_end import end_to_end
def main():
# Set parameters
release_number = 500
number_infected_before_release = 200
stop_inflow_at_intervention = True
print('Running simulation ...')
t, S, I, R, D = end_to_end(release_number, number_infected_before_release, stop_inflow_at_intervention)
print('Simulation complete.')
print('Commencing tests ...')
test_death_rate_is_strictly_increasing(D)
print('Testing completed.')
print('Program ending.')
def test_death_rate_is_strictly_increasing(D):
print('test_death_rate_is_strictly_increasing:', end=' ')
for i in range(len(D) - 1):
if D[i] > D[i + 1]:
print('Failed')
return
print('Passed')
if __name__ == "__main__":
main()
|
name = "polgrad"
__all__ = ["BasePolicyGradient", "a2c", "ppo", "reinforce"]
from flare.polgrad.base import BasePolicyGradient
from flare.polgrad import a2c, ppo, reinforce
|
from django.apps import AppConfig
class GooglemapsSocketsAppConfig(AppConfig):
name = 'googlemaps_sockets_app'
|
name=" moona"
print("hi", name)
print(name.lower())
print(name.upper())
print(name)
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""The step definitions for workflow."""
from __future__ import absolute_import
from typing import List
import attr
from sagemaker.workflow.entities import Expression
@attr.s
class Join(Expression):
"""Join together properties.
Attributes:
values (List[Union[PrimitiveType, Parameter]]): The primitive types
and parameters to join.
on_str (str): The string to join the values on (Defaults to "").
"""
on: str = attr.ib(factory=str)
values: List = attr.ib(factory=list)
@property
def expr(self):
"""The expression dict for a `Join` function."""
return {
"Std:Join": {
"On": self.on,
"Values": [
value.expr if hasattr(value, "expr") else value for value in self.values
],
},
}
|
import json
from datetime import datetime
from django.db import connection
from django.db.models import Sum, Count
from operator import itemgetter
from djofx import models
def get_training_data(owner):
from djofx import models
categorised = models.Transaction.objects.filter(
account__owner=owner,
category_verified=True
)
data = [(t.payee, t.transaction_category) for t in categorised]
# Avoiding .distinct, because SQLite doesn't support it
category_mapping = dict()
for payee, category in data:
category_mapping[payee] = category
return category_mapping
def qs_to_monthly_report(qs, type):
from djofx import models
truncate_date = connection.ops.date_trunc_sql('month', 'date')
qs = qs.extra({'month': truncate_date})
report = qs.values('month').annotate(
Sum('amount'),
Count('pk')
).order_by('month')
def adjust_value(value, type):
if type == models.TransactionCategory.OUTGOINGS:
return value * -1
return value
report = [
(
datetime.strptime(entry['month'], '%Y-%m-%d'),
adjust_value(float(entry['amount__sum']), type)
)
for entry in report
]
report = [((thedate.year, thedate.month), value)
for thedate, value in report]
return report
def get_spending_by_category(early_date_limit, late_date_limit):
uncategorised_breakdown = models.Transaction.objects.filter(
amount__lt=0,
transaction_category__isnull=True,
date__gte=early_date_limit,
date__lte=late_date_limit
).aggregate(
total=Sum('amount')
)
out = models.TransactionCategory.OUTGOINGS
breakdown = models.Transaction.objects.filter(
amount__lt=0,
transaction_category__category_type=out,
date__gte=early_date_limit,
date__lte=late_date_limit
).values(
'transaction_category__pk',
'transaction_category__name'
).annotate(
total=Sum('amount')
).order_by('transaction_category')
breakdown = [
(
abs(item['total']),
item['transaction_category__pk'],
item['transaction_category__name']
)
for item in breakdown
]
if uncategorised_breakdown['total']:
breakdown.append(
(
uncategorised_breakdown['total'] * -1,
0,
'Uncategorised'
)
)
breakdown = sorted(
breakdown,
key=itemgetter(0),
reverse=True
)
return breakdown
def spending_by_category_to_flot(breakdown):
return json.dumps([
{
'label': item[2],
'data': float(item[0])
}
for item in breakdown
])
|
# 자막이 빠른 경우
import re
def print_int(i):
if i > 9:
return str(i)
else:
return "0" + str(i)
sec = 10
content = open('timing.txt', 'rt', encoding='UTF-8')
newContent = open('timing_edited.txt', 'w', encoding='UTF-8')
newLine = ""
for c in content.readlines():
if c == "\n":
newLine += "\n"
elif len(c) <= 3:
newLine += c
print(c)
else:
exp = re.compile(r"(\d{2}):(\d{2}):(\d{2})[.,](\d{3})\s+-->\s+(\d{2}):(\d{2}):(\d{2}).(\d{3})")
fa2 = re.findall(exp, c)
if len(fa2) == 0:
newLine += c
continue
fa = re.search(exp, c)
a1, a2, a3, a4, a5, a6, a7, a8 = fa.groups()
a1 = int(a1)
a2 = int(a2)
a3 = int(a3)
a5 = int(a5)
a6 = int(a6)
a7 = int(a7)
if a3 + sec >= 60:
if a2 + 1 >= 60:
newLine += print_int(a1 + 1) + ":" + print_int(a2 + 1 - 60) + ":" + print_int(a3 + sec - 60) + "," + a4
else:
newLine += print_int(a1) + ":" + print_int(a2 + 1) + ":" + print_int(a3 + sec - 60) + "," + a4
else:
newLine += print_int(a1) + ":" + print_int(a2) + ":" + print_int(a3 + sec) + "," + a4
newLine += " --> "
if a7 + sec >= 60:
if a6 + 1 >= 60:
newLine += print_int(a5 + 1) + ":" + print_int(a6 + 1 - 60) + ":" + print_int(
a7 + sec - 60) + "," + a8 + "\n"
else:
newLine += print_int(a5) + ":" + print_int(a6 + 1) + ":" + print_int(a7 + sec - 60) + "," + a8 + "\n"
else:
newLine += print_int(a5) + ":" + print_int(a6) + ":" + print_int(a7 + sec) + "," + a8 + "\n"
# print(newLine)
newContent.write(newLine)
|
def invert(lst):
output = []
for i in lst:
output.append(-i)
return output |
#!/usr/bin/env python3
import sys
def takepizza(pizzas, key):
ids = pizzas[key]
ret = ids.pop()
if not ids:
# print(' took last {} pizza {} — deleting'.format(key, ret))
del pizzas[key]
return ret
with open(sys.argv[1]) as f:
line1 = f.readline()
m, t2, t3, t4 = [int(x) for x in line1.split()]
pizzas = {} # dict of {ingredients}: [pizza IDs]
for i in range(m):
line = f.readline()
pizza = frozenset(sorted(line.split()[1:]))
pizzas.setdefault(pizza, []).append(i)
# TODO sort pizzas by descending number of toppings
orders = []
order_sizes = [(4, t4), (3, t3), (2, t2)]
for team_size, num_orders in order_sizes:
# print('Filling {} orders for teams of {} from inventory {}'.format(num_orders, team_size, pizzas))
for team in range(num_orders):
if not pizzas:
# print('No more pizzas :(')
break
pizza_keys = []
all_toppings = set()
last_toppings = set()
for toppings in pizzas:
# TODO decide stop or continue
# num_new_toppings = len(toppings - all_toppings)
if toppings != last_toppings:
pizza_keys.append(toppings)
all_toppings.update(toppings)
last_toppings = toppings
if len(pizza_keys) >= team_size:
# print(' completed order: {}'.format(pizza_keys))
break
if len(pizza_keys) == team_size:
pizza_ids = [takepizza(pizzas, k) for k in pizza_keys]
orders.append(' '.join([str(i) for i in [team_size] + pizza_ids]))
# TODO use up any remaining pizzas
print(len(orders))
[print(order) for order in orders]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by
# David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
# BSD license.
#
# Authors: David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
"""Functions for computing vertex covers and related invariants in a graph."""
from pulp import LpBinary, LpMinimize, LpProblem, LpVariable, lpSum
def min_vertex_cover_ilp(G):
"""Return a smallest vertex cover in the graph G.
This method uses an ILP to solve for a smallest vertex cover.
Specifically, the ILP minimizes
.. math::
\\sum_{v \\in V} x_v
subject to
.. math::
x_v + x_u \\geq 1 \\mathrm{for all } \\{u, v\\} \\in E
x_v \\in \\{0, 1\\} \\mathrm{for all } v \\in V
where *V* and *E* are the vertex and edge sets of *G*.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
set
A set of nodes in a smallest vertex cover.
"""
prob = LpProblem('min_vertex_cover', LpMinimize)
variables = {
node: LpVariable('x{}'.format(i+1), 0, 1, LpBinary)
for i, node in enumerate(G.nodes())
}
# Set the total domination number objective function
prob += lpSum([variables[n] for n in variables])
# Set constraints
for edge in G.edges():
prob += variables[edge[0]] + variables[edge[1]] >= 1
prob.solve()
solution_set = {node for node in variables if variables[node].value() == 1}
return solution_set
def min_vertex_cover(G, method='ilp'):
"""Return a smallest vertex cover of G.
A *vertex cover* of a graph *G* is a set of vertices with the
property that every edge in the graph is incident to at least one
vertex in the set.
Parameters
----------
G : NetworkX graph
An undirected graph.
method: string
The method to use for finding a smallest vertex cover.
Currently, the only option is 'ilp'. Defaults to 'ilp'.
Returns
-------
set
A set of nodes in a smallest vertex cover.
"""
vertex_cover_func = {
'ilp': min_vertex_cover_ilp,
}.get(method, None)
if vertex_cover_func:
return vertex_cover_func(G)
raise ValueError('Invalid `method` argument "{}"'.format(method))
def vertex_cover_number(G):
"""Return a the size of smallest vertex cover in the graph G.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
number
The size of a smallest vertex cover of G.
"""
return len(min_vertex_cover_ilp(G)) |
board = [
[7,8,0,4,0,0,1,2,0],
[6,0,0,0,7,5,0,0,9],
[0,0,0,6,0,1,0,7,8],
[0,0,7,0,4,0,2,6,0],
[0,0,1,0,5,0,9,3,0],
[9,0,4,0,6,0,0,0,5],
[0,7,0,3,0,0,0,1,2],
[1,2,0,0,0,7,4,0,0],
[0,4,9,2,0,6,0,0,7]
]
def solver(bo, box_size):
"""
Solves a soduku board with backtracking and pretty prints the solved board
Parameter bo: 2D list of ints
Parameter box_size: is the smaller grid size of the board.
EX: traditional boards have nine 3 by 3 boxes so box_size would be the int 3
"""
solve(bo, box_size)
print_board(bo, box_size)
def solve(bo, box_size):
"""
Solves a sodoku board with backtracking
Parameter bo: 2D list of ints
Parameter box_size: is the smaller grid size of the board.
EX: traditional boards have nine 3 by 3 boxes so box_size would be the int 3
"""
find = find_empty(bo)
if not find:
return True
else:
row, col = find
# trys out numbers 1 through 25 inclusive for each position on the board
# checks if the num is valid based on current board
# recursively solves next positions
# if no numbers work, then the position is reset to zero
for i in range(1, len(bo)+1):
if valid(bo, i, (row,col), box_size):
bo[row][col] = i
if solve(bo,box_size):
return True
bo[row][col] = 0 # resets the value.
return False
def valid(bo, num, pos, box_size):
"""
Returns True if num is valid at the pos position in the the 2D array
Parameter bo: 2D list of ints
Parameter num: the integer we are trying to insert at pos
Parameter pos: (row, col)
Parameter box_size: is the smaller grid size of the board.
EX: traditional boards have nine 3 by 3 boxes so box_size would be the int 3
"""
# check row
for i in range(len(bo[0])):
# accesses a specific row and loop over all columns of that row
# checks all columns of that row to see if that number is already present
# we skip when pos[1] != i because we just inserted that number
if bo[pos[0]][i] == num and pos[1] != i:
return False
# check column
for i in range(len(bo)):
# same logic as checking row but for column
if bo[i][pos[1]] == num and pos[0] != i:
return False
# box_size by box_size cube
# use int division here so you can can get which smaller cube you are in
box_x = pos[1] // box_size
box_y = pos[0] // box_size
# loops over the particular box the pos parameter is in
# note the int division will always give us one the boxes by (box_x, box_y)
# therefore we multliply by box_size to get the correct starting indices.
# our loop will go box_size times as that is the length the cube
for i in range(box_y * box_size, box_y * box_size + box_size):
# an inner loop of the same length is necessary as each row of the cube needs to be checked as well
for j in range(box_x * box_size, box_x * box_size + box_size):
# checks all space in the cube to make sure the number doesn't already exist
# also skips the current position we inserted
if bo[i][j] == num and (i,j) != pos:
return False
return True
def print_board(bo, box_size):
"""
Pretty print of a board
Parameter bo: 2D list of ints
Parameter box_size: is the smaller grid size of the board.
EX: traditional boards have nine 3 by 3 boxes so box_size would be the int 3
"""
for i in range(len(bo)):
if i % box_size == 0 and i != 0:
print("- - - - " * box_size)
for j in range(len(bo[0])):
if j % box_size == 0 and j != 0:
print(" | ", end= "")
if j == len(bo[0]) - 1:
print(bo[i][j])
else:
print(str(bo[i][j]) + " ", end="")
def find_empty(bo):
"""
Return (int,int) in the form of (row, col) of the position of zeroes on the board
Parameter bo: 2D list of ints. Usually a partially completed board
"""
# loops through the entire board and checks if a position on the board is a zero
# use length(bo) since the board is a large square
for i in range(len(bo)):
for j in range(len(bo)):
if bo[i][j] == 0:
return (i,j) # row, col
return None
## testing board
# print_board(board, 3)
# solve(board,3)
# print("_____________________________")
# print_board(board,3) |
# -*- coding: utf-8 -*-
from util_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': LOCAL('db.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SERVE_MEDIA = True
MEDIA_ROOT = LOCAL('media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_URL = '/static/'
STATICFILES_DIRS = ( LOCAL('static'), )
STATIC_ROOT = LOCAL('static_root')
#settings this for debug tools
INTERNAL_IPS = ('127.0.0.1',)
|
from datetime import datetime
from feature_tools import get_statistical_results_of_list
def get_user_id(tweets):
return tweets[0]['user']['id_str']
def get_user_name(tweets):
return tweets[0]['user']['name']
def get_user_screen_name(tweets):
return tweets[0]['user']['screen_name']
def get_all_friend_features(rts):
friends_features=[]
for r in rts:
lang=r['retweeted_status']['lang']
created_at = datetime.strptime(r['retweeted_status']['user']['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
now = datetime.today()
delta = now - created_at
age = delta.days
num_of_friends = r['retweeted_status']['user']['friends_count']
num_of_followers = r['retweeted_status']['user']['followers_count']
num_of_lists = r['retweeted_status']['user']['listed_count']
num_of_total_tweets = r['retweeted_status']['user']['statuses_count']
num_of_favs = r['retweeted_status']['user']['favourites_count']
if (r['retweeted_status']['user']["description"] != None):
description_length = len(r['retweeted_status']['user']["description"])
else:
description_length = 0
url = r['retweeted_status']['user']['url']
default_profile = r['retweeted_status']['user']['default_profile']
default_image = r['retweeted_status']['user']['default_profile_image']
friend_obj = {"user_id":r['retweeted_status']['user']['id_str'],"lang":lang, "age":age, "friends":num_of_friends, "followers":num_of_followers,
"lists":num_of_lists, "statuses":num_of_total_tweets, "favourites":num_of_favs, "description":r['retweeted_status']['user']["description"],
"description_length":description_length, "url":url, "default_profile":default_profile,
"default_image":default_image}
friends_features.append(friend_obj)
return friends_features
def get_unique_retweets_rate(friendsFeatures):
# print ('get unique retweets rate')
retweeted = set()
for i in friendsFeatures:
retweeted.add(i['user_id'])
return len(retweeted)/len(friendsFeatures)
def get_num_of_distinct_languages(friendFeatures):
langs=set()
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
langs.add(i['lang'])
return len(langs)
def get_account_age_distribution(friendFeatures):
ages=[]
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
ages.append(i['age'])
return get_statistical_results_of_list(ages)
def get_number_of_friends_distribution(friendFeatures):
friends = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
friends.append(i['friends'])
return get_statistical_results_of_list(friends)
def get_number_of_followers_distribution(friendFeatures):
followers = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
followers.append(i['followers'])
return get_statistical_results_of_list(followers)
def get_number_of_lists_distribution(friendFeatures):
lists = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
lists.append(i['lists'])
return get_statistical_results_of_list(lists)
def get_number_of_statuses_distribution(friendFeatures):
statuses = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
statuses.append(i['statuses'])
return get_statistical_results_of_list(statuses)
def get_number_of_favourites_distribution(friendFeatures):
favourites = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
favourites.append(i['favourites'])
return get_statistical_results_of_list(favourites)
def get_description_length_distribution(friendFeatures):
description_lengths = []
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
description_lengths.append(i['description_length'])
return get_statistical_results_of_list(description_lengths)
def get_fraction_of_users_with_urls(friendFeatures):
j=0
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
if i['url']!=None:
j+=1
return j/len(friendFeatures)
def get_fraction_of_users_with_default_profile(friendFeatures):
j=0
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
if i['default_profile']==True:
j+=1
return j/len(friendFeatures)
def get_fraction_of_users_with_default_image(friendFeatures):
j=0
checked = []
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
if i['default_image']==True:
j+=1
return (j/len(friendFeatures))
def get_fraction_of_unique_profile_descriptions(friendFeatures):
j = 0
checked = []
descriptions=set()
for i in friendFeatures:
if i['user_id'] not in checked:
checked.append(i['user_id'])
if i['description']!=None:
j+=1
descriptions.add(i['description'])
if j==0:
return 1.0
else:
return len(descriptions)/j
|
from collections import defaultdict
def recoverSecret(triplets):
letters = defaultdict(set)
for a, b, c in triplets:
letters[a].add(b)
letters[a].add(c)
letters[b].add(c)
for key, value in letters.items():
for after_key in value:
letters[key] = letters[key].union(letters[after_key])
return ''.join(k for k, _ in sorted(
letters.iteritems(), key=lambda (_, v): len(v), reverse=True
))
|
import dsn.util.systems as dsnsys
import re, inspect
# system_strs = ['linear_2D', 'R1RNN_input', 'V1_circuit'];
system_strs = ["linear_2D", "V1_circuit"]
num_system_strs = len(system_strs)
def doc2md(docstring, keywords):
docstrings = docstring.split("\n")
num_strings = len(docstrings)
i = 0
key_ind = 0
num_keys = len(keywords)
key_len = len(keywords[0])
found_key = False
more_keys = True
markdown = []
while i < num_strings:
docstring_i = re.sub("\s+", " ", docstrings[i])
if i > 0:
docstring_i = docstring_i[1:]
if more_keys and (docstring_i[:key_len] == keywords[key_ind]):
docstring_i = (
"**" + docstring_i[:key_len] + "**" + docstring_i[key_len:] + "\\\\"
)
key_ind += 1
more_keys = not (key_ind == num_keys)
if not found_key:
found_key = True
if more_keys:
key_len = len(keywords[key_ind])
if found_key:
if "):" in docstring_i:
words = docstring_i.split(" ")
line = "**" + words[0] + "** " + words[1] + " *"
for j in range(2, len(words) - 1):
line += words[j] + " "
line += words[-1] + "*"
docstring_i = line
markdown.append(docstring_i)
i += 1
return markdown
def parse_members(members):
markdown = []
keywords = ["Args", "Returns"]
for i in range(len(members)):
name, method = members[i]
if not (name[0:2] == "__"):
markdown.append("### " + name + " ###")
markdown += doc2md(method.__doc__, keywords)
markdown.append("\n")
return markdown
print("# system #")
docstring = dsnsys.system.__doc__
keywords = ["Attributes"]
markdown = doc2md(docstring, keywords)
for i in range(len(markdown)):
print(markdown[i])
print("*****\n")
for i in range(num_system_strs):
print("# " + system_strs[i] + " #")
system_class = dsnsys.system_from_str(system_strs[i])
docstring = system_class.__doc__
keywords = ["Attributes"]
markdown = doc2md(docstring, keywords)
markdown += parse_members(inspect.getmembers(system_class))
for i in range(len(markdown)):
print(markdown[i])
print("*****\n")
|
def stoer_wagner(G, weight: str = "weight", heap=...): ...
|
#Endi boolean turidagi o'zgaruvchilar bilan stringda qanday ishlasa buladi
# Aytaylik bizga berilgan string turiadi o'zgaruvchida biron bir so'zni bor yoki yo'qligini tekshirmoqchi bulsak
# Agar shu so'z bor bo'lsa True yo'q Bulsa Fulse qaytarsin
a="Khamzayev Jamshid is wonderfull Python programmer!!!"
print('Jamshid'in a)
print('Akmal'in a)
|
#! /usr/bin/python3.4
print ("Hello Poland")
|
import numpy as np
import os
import csv
import pickle
import sys
import matplotlib
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import scipy.interpolate
from pyevtk.hl import pointsToVTK
import pyvtk
DATA_FORMAT_VERSION = 2
var_params = []
#path = "output/restart_002"
# record_times = [0, 50, 100] # in Myrs
# record_depths = [31e3, 95e3, 45e3]
PLOT = 21
PLOT_TIME = 0
PLOT_DEPTH_BOTTOM = 5
PLOT_DEPTH = 0
PLOT_TIME1 = 0
PLOT_TIME2 = 1
PLOT_DEPTH_BELOW_THRUST = 0
ADD_INITIAL_DEPTH = True
# 1 = non-dim near-bottom temperature, at time PLOT_TIME
# 2 = non-dim change in temperature at PLOT_DEPTH_BELOW_THRUST, from PLOT_TIME1 to PLOT_TIME2
#path = "output/steadystate_003"
#record_times = [1800, 1900]
#record_depths = [35e3, 75e3, 125e3]
path = "output/001-ini"
record_times = np.array([10,180,200])
record_depths = np.array([10e3,99e3,20e3])
depth_adjust = record_depths * 0.0
ndatapoints = 0
rec_values = []
rec_times = []
rec_depths = []
rec_params = []
rec_ranks = []
for d in os.walk(path):
meta_read = False
params_read = False
start_time = None
irec = 0
if ("params" in d[2]):
params = []
rparamfile = open(d[0] + "/" + "params")
for row in rparamfile:
params.append(eval(row))
rparamfile.close()
if PLOT == 20 or PLOT == 21:
if ADD_INITIAL_DEPTH:
depth_adjust = record_depths * 0.0
depth_adjust = depth_adjust + params[0]
else:
depth_adjust = record_depths * 0.0
else:
depth_adjust = record_depths * 0.0
else:
# skipped
continue
if ("meta" in d[2]):
rmetafile = open(d[0] + "/" + "meta")
csvmetar = csv.reader(rmetafile, delimiter=",", quotechar='"')
irow = 0
alltimesread = False
for row in csvmetar:
irow = irow + 1
if irow == 1:
if int(row[0]) != DATA_FORMAT_VERSION:
raise Exception("Incompatible data format")
else:
continue
elif irow == 2:
mpi_rank = int(row[0])
meta_read = True
continue
elif irow == 3:
# just the header
pass
if irow == 4:
# we've got data!
start_time = float(row[1])
rec_values.append([])
rec_times.append([])
rec_depths.append([])
rec_params.append([])
for p in params:
rec_params[-1].append(p)
rec_ranks.append(mpi_rank)
ndatapoints = ndatapoints + 1
if irow > 3:
if (float(row[1])-start_time) >= record_times[irec]:
irec = irec + 1
rec_values[-1].append([])
rec_depths[-1].append([])
time = float(row[1]) - start_time
rec_times[-1].append(time)
rnodefile = open(d[0] + "/" + "nodes." + str(row[0]), "rb")
csvnoder = csv.reader(rnodefile, delimiter=",", quotechar='"')
inoderow = 0
idepthrec = 0
skip_header = True
DEPTHCOL = 2
TEMPCOL = 3
alldepthsread = False
lastdepth = -np.inf
for noderow in csvnoder:
if skip_header:
skip_header = False
continue
if inoderow == 0:
for i in range(len(record_depths)):
if float(noderow[DEPTHCOL]) > record_depths[i] + depth_adjust[i]:
# first depth recorder is larger than a requested recording depth
idepthrec = i
rec_values[-1][-1].append(np.nan)
rec_depths[-1][-1].append(np.nan)
lastdepth = float(noderow[DEPTHCOL])
inoderow = inoderow + 1
continue
#if idepthrec >= len(record_depths):
# # we have already recorded more than requested amount of depths
# rec_depths[-1][-1].append(-999)
# rec_values[-1][-1].append(-999)
# idepthrec = idepthrec + 1
# lastdepth = float(noderow[DEPTHCOL])
elif float(noderow[DEPTHCOL]) > record_depths[idepthrec] + depth_adjust[idepthrec]:
idepthrec = idepthrec + 1
rec_values[-1][-1].append(float(noderow[TEMPCOL]))
rec_depths[-1][-1].append(float(noderow[DEPTHCOL]))
lastdepth = float(noderow[DEPTHCOL])
if idepthrec >= len(record_depths):
alldepthsread = True
break
inoderow = inoderow + 1
rnodefile.close()
while len(rec_depths[-1][-1]) < len(record_depths):
# fill unfound depths with NaNs
rec_depths[-1][-1].append(np.nan)
rec_values[-1][-1].append(np.nan)
if irec >= len(record_times):
alltimesread = True
break
# all times checked
# fill unfound times with NaNs
while len(rec_values[-1]) < len(record_times):
rec_values[-1].append([])
rec_depths[-1].append([])
rec_times[-1].append(np.nan)
while len(rec_depths[-1][-1]) < len(record_depths):
rec_depths[-1][-1].append(np.nan)
rec_values[-1][-1].append(np.nan)
rmetafile.close()
######
## SECONDARY HANDLING
######
print ndatapoints
np_rec_values = np.array(rec_values)
print "---"
print np_rec_values.shape
np_rec_ranks = np.array(rec_ranks)
np_rec_times = np.array(rec_times)
np_rec_depths = np.array(rec_depths)
np.set_printoptions(threshold=np.nan)
#Tscale = np_rec_values[:,0,1]
#dT = (np_rec_values[:,1,1] - np_rec_values[:,0,1])/Tscale
if PLOT == 1:
plottitle = "bottom temp change from time zero to t=" + str(record_times[PLOT_TIME])
Tscale = np_rec_values[:,0,PLOT_DEPTH_BOTTOM]
dT = np_rec_values[:,PLOT_TIME,PLOT_DEPTH_BOTTOM]/Tscale
p1 = np.array(rec_params)[:,0]
p2 = np.array(rec_params)[:,1]
elif PLOT == 2:
plottitle = "temp change from t=" + str(record_times[PLOT_TIME1]) + " to t=" + str(record_times[PLOT_TIME2]) + " at z = " + str(record_depths[PLOT_DEPTH_BELOW_THRUST])
Tscale = np_rec_values[:,0,PLOT_DEPTH_BOTTOM]
dT = np_rec_values[:,PLOT_TIME2,PLOT_DEPTH_BELOW_THRUST] - np_rec_values[:,PLOT_TIME1,PLOT_DEPTH_BELOW_THRUST]
dT = dT / Tscale
p1 = np.array(rec_params)[:,0]
p2 = np.array(rec_params)[:,1]
elif PLOT == 20:
plottitle = "what are we plotting here? " + str(record_times[PLOT_TIME])
Tscale = 1350.0
print np_rec_values.shape
print np_rec_times.shape
pickedT = np_rec_values[:,PLOT_TIME,PLOT_DEPTH]
ip0 = []
ip1 = []
ip2 = []
iy0 = []
for i in range(0,len(rec_params)):
if rec_params[i][2] > -1:
ip0.append(rec_params[i][0]) # thickness of the overthrust sheet
ip1.append(rec_params[i][1][1]-rec_params[i][1][0]) # original extent of the lithosphere
ip2.append(rec_params[i][2]) # erosion speed
iy0.append(pickedT[i]) # value itself
ip0 = np.array(ip0)
ip1 = np.array(ip1)
ip2 = np.array(ip2)
iy0 = np.array(iy0)
p0 = ip0
p0lab = "overthrust sheet thickness"
p1 = ip1
p1lab = "orig. lithosphere thickness"
p2 = ip2
p2lab = "erosion speed"
y0 = iy0
y0lab = "temperature"
print ip0
print ip1
print ip2
print iy0
elif PLOT == 21:
plottitle = "what are we plotting here? " + str(record_times[PLOT_TIME])
Tscale = 1350.0
print np_rec_values.shape
print np_rec_times.shape
pickedT = np_rec_values[:,PLOT_TIME,PLOT_DEPTH]
ip0 = []
ip1 = []
ip2 = []
iy0 = []
for i in range(0,len(rec_params)):
if rec_params[i][2] > -1:
ip0.append(rec_params[i][0]) # thickness of the overthrust sheet
ip1.append(rec_params[i][1]) # bottom temp
ip2.append(rec_params[i][2]) # erosion speed
iy0.append(pickedT[i]) # value itself
ip0 = np.array(ip0)
ip1 = np.array(ip1)
ip2 = np.array(ip2)
iy0 = np.array(iy0)
p0 = ip0
p0lab = "overthrust sheet thickness"
p1 = ip1
p1lab = "lithosphere bottom temp"
p2 = ip2
p2lab = "erosion speed"
y0 = iy0
y0lab = "temperature"
print ip0
print ip1
print ip2
print iy0
x0i = np.linspace(min(p0), max(p0))
x1i = np.linspace(min(p1), max(p1))
x2i = np.linspace(min(p2), max(p2))
x0, x1, x2 = np.meshgrid(x0i, x1i, x2i)
p1 = np.ma.array(p1, mask=np.isnan(y0))
p2 = np.ma.array(p2, mask=np.isnan(y0))
p0 = np.ma.array(p0, mask=np.isnan(y0))
y0 = np.ma.array(y0, mask=np.isnan(y0))
print "----"
print len(p0)
print len(p1)
print len(p2)
print len(y0)
print np.ma.vstack((p0,p1,p2)).T.shape
orig_x = np.ma.vstack((p0,p1,p2)).T
print "---"
##print len(x0), len(x0[0]), len(x0[0][0])
queryx = np.vstack((x0.flatten(), x1.flatten(), x2.flatten())).T
##print queryx.shape
ipolate = scipy.interpolate.LinearNDInterpolator(points=orig_x, values=y0)
zi = ipolate(queryx)
##print len(queryx)
##print len(zi)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(queryx[:,0], queryx[:,1], queryx[:,2])
#plt.show()
#zi = scipy.interpolate.griddata((p1,p2), y0, (xi[None,:],yi[:,None]), method='cubic')
NN = len(queryx[:,0])
picks = np.arange(0,NN,1)
pointsToVTK("./points", queryx[picks,0], queryx[picks,1], queryx[picks,2], data = {"temp" : zi[picks]})
print queryx.shape
print zi.shape
#sys.exit()
do3d = False
for x in np.unique(queryx[picks,2]):
idx = np.where(queryx[picks,2] == x)
fig = plt.figure()
if do3d:
ax = fig.add_subplot(111,projection='3d')
ax.scatter((queryx[picks,0])[idx], (queryx[picks,1])[idx], (queryx[picks,2])[idx], c=(zi[picks])[idx], cmap=plt.hot(), s=5, edgecolors='none')
else:
ax = fig.add_subplot(111)
sc = ax.scatter((queryx[picks,0])[idx], (queryx[picks,1])[idx], c=(zi[picks])[idx], cmap=plt.hot(), edgecolors='none')
plt.colorbar(sc)
#cs = ax.contour((queryx[picks,0])[idx], (queryx[picks,1])[idx], (zi[picks])[idx])
#ax.clabel(cs)
ax.set_xlabel(p0lab)
ax.set_ylabel(p1lab)
plt.show()
#contour(xi, yi, zi)
#plt.figure()
#CS = plt.contour(X, Y, Z)
#plt.show()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\ric_j\OneDrive\Projects\FINISHZZ\arenavision_ui\ui\arenavision_ui.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("ArenaVision")
main_window.setEnabled(True)
main_window.resize(1387, 883)
self.centralwidget = QtWidgets.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
self.todos_jogos = QtWidgets.QPushButton(self.centralwidget)
self.todos_jogos.setGeometry(QtCore.QRect(10, 10, 161, 41))
self.todos_jogos.setObjectName("todos_jogos")
self.procurar_jogo_texto = QtWidgets.QLineEdit(self.centralwidget)
self.procurar_jogo_texto.setGeometry(QtCore.QRect(220, 10, 421, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(25)
font.setBold(True)
font.setWeight(75)
self.procurar_jogo_texto.setFont(font)
self.procurar_jogo_texto.setObjectName("procurar_jogo_texto")
self.procurar_jogo = QtWidgets.QPushButton(self.centralwidget)
self.procurar_jogo.setGeometry(QtCore.QRect(640, 10, 161, 41))
self.procurar_jogo.setObjectName("procurar_jogo")
self.table_jogos = QtWidgets.QTableWidget(self.centralwidget)
self.table_jogos.setGeometry(QtCore.QRect(0, 50, 1381, 791))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.table_jogos.setFont(font)
self.table_jogos.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.table_jogos.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.table_jogos.setFrameShadow(QtWidgets.QFrame.Sunken)
self.table_jogos.setLineWidth(2)
self.table_jogos.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.table_jogos.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.table_jogos.setTabKeyNavigation(False)
self.table_jogos.setProperty("showDropIndicator", False)
self.table_jogos.setDragDropOverwriteMode(False)
self.table_jogos.setAlternatingRowColors(True)
self.table_jogos.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table_jogos.setGridStyle(QtCore.Qt.DashLine)
self.table_jogos.setWordWrap(False)
self.table_jogos.setCornerButtonEnabled(True)
self.table_jogos.setRowCount(0)
self.table_jogos.setObjectName("table_jogos")
self.table_jogos.setColumnCount(7)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.table_jogos.setHorizontalHeaderItem(6, item)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(950, 10, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.select_days = QtWidgets.QComboBox(self.centralwidget)
self.select_days.setGeometry(QtCore.QRect(1010, 10, 151, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.select_days.setFont(font)
self.select_days.setObjectName("select_days")
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1387, 31))
self.menubar.setObjectName("menubar")
self.menuOp_es = QtWidgets.QMenu(self.menubar)
self.menuOp_es.setObjectName("menuOp_es")
main_window.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_window)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
self.actionrecome_ar = QtWidgets.QAction(main_window)
self.actionrecome_ar.setObjectName("actionrecome_ar")
self.actionSair = QtWidgets.QAction(main_window)
self.actionSair.setObjectName("actionSair")
self.menuOp_es.addAction(self.actionrecome_ar)
self.menuOp_es.addAction(self.actionSair)
self.menubar.addAction(self.menuOp_es.menuAction())
self.retranslateUi(main_window)
QtCore.QMetaObject.connectSlotsByName(main_window)
def retranslateUi(self, main_window):
_translate = QtCore.QCoreApplication.translate
main_window.setWindowTitle(_translate("main_window", "MainWindow"))
self.todos_jogos.setText(_translate("main_window", "TODOS OS JOGOS"))
self.procurar_jogo.setText(_translate("main_window", "PROCURAR JOGO"))
self.table_jogos.setSortingEnabled(True)
item = self.table_jogos.horizontalHeaderItem(0)
item.setText(_translate("main_window", "Horas"))
item = self.table_jogos.horizontalHeaderItem(1)
item.setText(_translate("main_window", "Tipo de Jogo"))
item = self.table_jogos.horizontalHeaderItem(2)
item.setText(_translate("main_window", "Liga"))
item = self.table_jogos.horizontalHeaderItem(3)
item.setText(_translate("main_window", "Equipas"))
item = self.table_jogos.horizontalHeaderItem(4)
item.setText(_translate("main_window", "Língua"))
item = self.table_jogos.horizontalHeaderItem(5)
item.setText(_translate("main_window", "Canal 1"))
item = self.table_jogos.horizontalHeaderItem(6)
item.setText(_translate("main_window", "Canal 2"))
self.label.setText(_translate("main_window", "DIA:"))
self.menuOp_es.setTitle(_translate("main_window", "Opções"))
self.actionrecome_ar.setText(_translate("main_window", "Recomeçar"))
self.actionSair.setText(_translate("main_window", "Sair"))
|
try:
try:
raise IndexError
finally:
print('SPAM')
finally:
print('spam')
|
def my_factors(n):
factors = [1, n]
for i in range(2, n//2+1):
if n % i == 0:
factors.append(i)
return sorted(factors)
ls = list(map(int, input().split()))
N, K = ls[0], ls[1]
myFactors = my_factors(N)
if len(myFactors) < K:
print(0)
else:
print(myFactors[K-1])
|
from account.models import MyUser, RateReader, MyUserProfile
from rate.models import Rate
from countrycity.models import Location, Liner
from rest_framework import viewsets, views
from api.serializers import UserSerializer, UserCreateSerializer, UserUpdateSerializer, ChangePasswordSerializer, ChangeProfileImageSerializer, RateReaderSerializer, RateUserSerializer, RateSerializer, LocationSerializer, LinerSerializer, RateInputpersonSerializer, RateAccountSerializer, RateLinerSerializer, RatePolSerializer, RatePodSerializer
from django_filters.rest_framework import DjangoFilterBackend
from .permissions import UserPermission, UserCreatePermission, UserUpdatePermission, RatePermission, AjaxPermission
from dateutil.relativedelta import relativedelta
from django.utils import timezone
import dateutil.parser
from rest_framework import status
from rest_framework.response import Response
from django.db.models import Avg
from rest_framework import permissions
from PIL import Image
from io import BytesIO
from django.core.files.base import ContentFile
from resizeimage import resizeimage
from rest_framework.parsers import MultiPartParser
class RateReaderViewSet(viewsets.ModelViewSet):
serializer_class = RateReaderSerializer
pagination_class = None
filter_backends = (DjangoFilterBackend,)
filter_fields = ('id', 'shower', 'reader',)
def get_queryset(self):
showerreader = RateReader.objects.none()
showerreader = showerreader | RateReader.objects.filter(shower=self.request.user)
showerreader = showerreader | RateReader.objects.filter(reader=self.request.user)
return showerreader
def perform_create(self, serializer):
serializer.save(shower=self.request.user)
class RateReaderUserView(views.APIView):
def get(self, request, *args, **kwargs):
readers = RateReader.objects.filter(shower=self.request.user)
users = MyUser.objects.filter(who_reads__in=readers)
serializer = RateUserSerializer(users, many=True)
return Response(serializer.data)
class RateShowerUserView(views.APIView):
def get(self, request, *args, **kwargs):
showers = RateReader.objects.filter(reader=self.request.user)
users = MyUser.objects.filter(who_shows__in=showers)
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
class UserSearchView(views.APIView):
def get(self, request, *args, **kwargs):
email_query = self.request.query_params.get('email', None)
nickname_query = self.request.query_params.get('nickname', None)
company_query = self.request.query_params.get('company', None)
filter_args = {}
if email_query:
filter_args['email__exact'] = email_query
if nickname_query:
filter_args['nickname__exact'] = nickname_query
if company_query:
filter_args['profile__company__exact'] = company_query
users = MyUser.objects.filter(**filter_args)
if not email_query and not nickname_query and not company_query:
users = MyUser.objects.none()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
class UserViewSet(viewsets.ModelViewSet):
queryset = MyUser.objects.all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('id', 'email', 'nickname', )
permission_classes = (UserPermission,)
class UserCreateViewSet(viewsets.ModelViewSet):
queryset = MyUser.objects.all()
serializer_class = UserCreateSerializer
permission_classes = (UserCreatePermission,)
class UserUpdateViewSet(viewsets.ModelViewSet):
queryset = MyUser.objects.all()
serializer_class = UserUpdateSerializer
permission_classes = (UserUpdatePermission,)
class UpdateProfileImage(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (MultiPartParser,)
def get_object(self, queryset=None):
return MyUserProfile.objects.get(owner=self.request.user)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = ChangeProfileImageSerializer(data=request.data)
if serializer.is_valid():
img = serializer.validated_data["new_profile_image"]
pil_image_obj = Image.open(img)
new_image = resizeimage.resize_cover(pil_image_obj, [100, 100], validate=False)
new_image_io = BytesIO()
new_image.save(new_image_io, new_image.format)
temp_name = img.name
self.object.image.save(
temp_name,
content=ContentFile(new_image_io.getvalue())
)
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UpdatePassword(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def get_object(self, queryset=None):
return self.request.user
def patch(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = ChangePasswordSerializer(data=request.data)
if serializer.is_valid():
old_password = serializer.data.get("old_password")
if not self.object.check_password(old_password):
return Response({"old_password": ["Wrong Password."]},
status=status.HTTP_400_BAD_REQUEST)
self.object.set_password(serializer.data.get("new_password"))
self.object.save()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
class RateChartView(views.APIView):
def get(self, request, *args, **kwargs):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance)
rates = rates | Rate.objects.filter(inputperson=self.request.user)
liner_list = []
liner_query = self.request.query_params.get('liner', None)
if liner_query:
liners_list = liner_query.split('|')
for liner in liners_list:
x = Liner.objects.filter(name=liner).values_list('name', flat=True)
for y in x:
liner_list.append(y)
else:
liner_list = []
pol_list = []
pol_query = self.request.query_params.get('pol', None)
if pol_query:
pol_list = pol_query.split('|')
pod_list = []
pod_query = self.request.query_params.get('pod', None)
if pod_query:
pod_list = pod_query.split('|')
sf = self.request.query_params.get('search_from', None)
st = self.request.query_params.get('search_to', None)
filter_args = {}
if liner_list != []:
filter_args['liner__in'] = liner_list
if pol_list != []:
filter_args['pol__in'] = pol_list
if pod_list != []:
filter_args['pod__in'] = pod_list
if sf:
fullsf = sf + '-01'
searchvalue_sf = dateutil.parser.parse(fullsf).date()
filter_args['effectiveDate__gte'] = searchvalue_sf
else:
searchvalue_sf = timezone.now().replace(day=1) + relativedelta(months=-1) # 전달 1일
filter_args['effectiveDate__gte'] = searchvalue_sf
if st:
fullst = st + '-01'
searchvalue_st = dateutil.parser.parse(fullst).date().replace(day=1) + relativedelta(months=1) - relativedelta(days=1)
filter_args['effectiveDate__lte'] = searchvalue_st
filtered_rates = rates.filter(**filter_args).exclude(deleted=1)
query_type = self.request.query_params.get('type', None) # type : 20 -> buying20 , 40 -> buying40 , 4H -> buying4H
liners = filtered_rates.values('liner').distinct()
period_sf = sf.split('-')
period_st = st.split('-')
gap_year = int(period_st[0]) - int(period_sf[0])
gap_month = int(period_st[1]) - int(period_sf[1])
gap_total = gap_year * 12 + gap_month + 1
context = {}
context_liners = []
liner_avg_rate = []
for i in range(gap_total):
item = {}
temp_args = {}
div = int((int(period_sf[1]) - 1 + i) / 12)
if ((int(period_sf[1]) - 1 + i) % 12 + 1) < 10:
t_sf = str(int(period_sf[0]) + div) + '0' + str((int(period_sf[1]) - 1 + i) % 12 + 1)
temp_sf = str(int(period_sf[0]) + div) + '0' + str((int(period_sf[1]) - 1 + i) % 12 + 1) + '01'
query_sf = dateutil.parser.parse(temp_sf).date()
query_st = dateutil.parser.parse(temp_sf).date().replace(day=1) + relativedelta(months=1) - relativedelta(days=1)
else:
t_sf = str(int(period_sf[0]) + div) + str((int(period_sf[1]) - 1 + i) % 12 + 1)
temp_sf = str(int(period_sf[0]) + div) + str((int(period_sf[1]) - 1 + i) % 12 + 1) + '01'
query_sf = dateutil.parser.parse(temp_sf).date()
query_st = dateutil.parser.parse(temp_sf).date().replace(day=1) + relativedelta(months=1) - relativedelta(days=1)
temp_args['effectiveDate__gte'] = query_sf
temp_args['effectiveDate__lte'] = query_st
item['month'] = t_sf
if query_type == '20':
temp = filtered_rates.filter(**temp_args).exclude(buying20=0).aggregate(avg_rate=Avg('buying20'))['avg_rate']
if temp:
item['Market'] = int(temp)
else:
item['Market'] = temp
elif query_type == '40':
temp = filtered_rates.filter(**temp_args).exclude(buying40=0).aggregate(avg_rate=Avg('buying40'))['avg_rate']
if temp:
item['Market'] = int(temp)
else:
item['Market'] = temp
elif query_type == '4H':
temp = filtered_rates.filter(**temp_args).exclude(buying4H=0).aggregate(avg_rate=Avg('buying4H'))['avg_rate']
if temp:
item['Market'] = int(temp)
else:
item['Market'] = temp
for liner in liners:
temp_args['liner'] = liner['liner']
if query_type == '20':
temp = filtered_rates.filter(**temp_args).exclude(buying20=0).aggregate(avg_rate=Avg('buying20'))['avg_rate']
if temp:
item[liner['liner']] = int(temp)
else:
item[liner['liner']] = temp
elif query_type == '40':
temp = filtered_rates.filter(**temp_args).exclude(buying40=0).aggregate(avg_rate=Avg('buying40'))['avg_rate']
if temp:
item[liner['liner']] = int(temp)
else:
item[liner['liner']] = temp
elif query_type == '4H':
temp = filtered_rates.filter(**temp_args).exclude(buying4H=0).aggregate(avg_rate=Avg('buying4H'))['avg_rate']
if temp:
item[liner['liner']] = int(temp)
else:
item[liner['liner']] = temp
liner_avg_rate.append(item)
context_liners.append('Market')
for liner in liners:
context_liners.append(liner['liner'])
context['type'] = query_type
context['pol'] = self.request.query_params.get('pol', None)
context['pod'] = self.request.query_params.get('pod', None)
context['liners'] = context_liners
context['result'] = liner_avg_rate
return Response(context)
class RateViewSet(viewsets.ModelViewSet):
serializer_class = RateSerializer
permission_classes = (RatePermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance)
rates = rates | Rate.objects.filter(inputperson=self.request.user)
inputperson_list = []
inputpersons = self.request.query_params.get('inputperson', None)
if inputpersons:
inputpersons_list = inputpersons.split('|')
for inputperson in inputpersons_list:
x = MyUser.objects.get(profile__profile_name=inputperson)
inputperson_list.append(x)
else:
inputperson_list = []
account_list = []
accounts = self.request.query_params.get('account', None)
if accounts:
account_list = accounts.split('|')
liner_list = []
liner_query = self.request.query_params.get('liner', None)
if liner_query:
liners_list = liner_query.split('|')
for liner in liners_list:
x = Liner.objects.filter(label=liner).values_list('name', flat=True)
for y in x:
liner_list.append(y)
else:
liner_list = []
pol_list = []
pol_query = self.request.query_params.get('pol', None)
if pol_query:
pol_list = pol_query.split('|')
pod_list = []
pod_query = self.request.query_params.get('pod', None)
if pod_query:
pod_list = pod_query.split('|')
sf = self.request.query_params.get('search_from', None)
st = self.request.query_params.get('search_to', None)
filter_args = {}
if inputperson_list != []:
filter_args['inputperson__in'] = inputperson_list
if account_list != []:
filter_args['account__in'] = account_list
if liner_list != []:
filter_args['liner__in'] = liner_list
if pol_list != []:
filter_args['pol__in'] = pol_list
if pod_list != []:
filter_args['pod__in'] = pod_list
if sf:
searchvalue_sf = dateutil.parser.parse(sf).date()
filter_args['effectiveDate__gte'] = searchvalue_sf
if st:
searchvalue_st = dateutil.parser.parse(st).date()
filter_args['effectiveDate__lte'] = searchvalue_st
queryset = rates.filter(**filter_args).order_by('-id').exclude(deleted=1)
return queryset
def create(self, request, *args, **kwargs):
data = request.data.get("items") if 'items' in request.data else request.data
many = isinstance(data, list)
serializer = self.get_serializer(data=data, many=many)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save(inputperson=self.request.user)
class RateInputpersonViewSet(viewsets.ModelViewSet):
serializer_class = RateInputpersonSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance).exclude(deleted=1)
rates = rates | Rate.objects.filter(inputperson=self.request.user).exclude(deleted=1)
queryset = rates.distinct('inputperson')
return queryset
class RateAccountViewSet(viewsets.ModelViewSet):
serializer_class = RateAccountSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance).exclude(deleted=1)
rates = rates | Rate.objects.filter(inputperson=self.request.user).exclude(deleted=1)
queryset = rates.values('account').distinct().order_by('account')
return queryset
class RateLinerViewSet(viewsets.ModelViewSet):
serializer_class = RateLinerSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance).exclude(deleted=1)
rates = rates | Rate.objects.filter(inputperson=self.request.user).exclude(deleted=1)
liner_filtered = rates.values('liner').distinct()
liner_args = {}
liner_temp = []
for liner in liner_filtered:
liner_temp.append(liner['liner'])
liner_args['name__in'] = liner_temp
queryset = Liner.objects.filter(**liner_args).values('label').order_by('label')
return queryset
class RatePolViewSet(viewsets.ModelViewSet):
serializer_class = RatePolSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance).exclude(deleted=1)
rates = rates | Rate.objects.filter(inputperson=self.request.user).exclude(deleted=1)
queryset = rates.values('pol').distinct().order_by('pol')
return queryset
class RatePodViewSet(viewsets.ModelViewSet):
serializer_class = RatePodSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance).exclude(deleted=1)
rates = rates | Rate.objects.filter(inputperson=self.request.user).exclude(deleted=1)
queryset = rates.values('pod').distinct().order_by('pod')
return queryset
class RateHeaderView(views.APIView):
def get(self, request, *args, **kwargs):
# 로그인 유저에게 운임을 보여주는 Shower query
showers = RateReader.objects.filter(reader=self.request.user).distinct()
# Shower 한 명 씩 입력한 운임 정보를 rates 객체에 취합
rates = Rate.objects.none()
for shower in showers:
userinstance = MyUser.objects.get(email=shower.shower.email)
rates = rates | Rate.objects.filter(inputperson=userinstance)
rates = rates | Rate.objects.filter(inputperson=self.request.user)
handler = self.request.query_params.get('handler', None)
searchkw = self.request.query_params.get('searchkw', None)
results = []
if handler == "inputperson":
ips = rates.values('inputperson__profile__profile_name').distinct().order_by('inputperson__profile__profile_name')
for ip in ips:
items = {}
items['label'] = ip['inputperson__profile__profile_name']
items['value'] = ip['inputperson__profile__profile_name']
results.append(items)
if handler == "account":
acs = rates.values('account').distinct().order_by('account')
for ac in acs:
items = {}
items['label'] = ac['account']
items['value'] = ac['account']
results.append(items)
if handler == "liner":
liner_filtered = rates.values('liner').distinct()
liner_args = {}
liner_temp = []
for liner in liner_filtered:
liner_temp.append(liner['liner'])
liner_args['name__in'] = liner_temp
lns = Liner.objects.filter(**liner_args).values('label').order_by('label')
for ln in lns:
items = {}
items['label'] = ln['label']
items['value'] = ln['label']
results.append(items)
if handler == "linerinput":
liner_filtered = Liner.objects.all().order_by('label')
for ln in liner_filtered:
items = {}
items['label'] = ln.label
items['value'] = ln.name
results.append(items)
if handler == "pol":
pols = rates.values('pol').distinct().order_by('pol')
for pol in pols:
items = {}
items['label'] = pol['pol']
items['value'] = pol['pol']
results.append(items)
if handler == "pod":
pods = rates.values('pod').distinct().order_by('pod')
for pod in pods:
items = {}
items['label'] = pod['pod']
items['value'] = pod['pod']
results.append(items)
if handler == "locationinput":
if searchkw:
location_args = {}
location_args['name__istartswith'] = searchkw
queryset = Location.objects.filter(**location_args).order_by('label')
else:
queryset = Location.objects.none()
for lc in queryset:
items = {}
items['label'] = lc.name
items['value'] = lc.name
results.append(items)
return Response(results)
class LinerViewSet(viewsets.ModelViewSet):
queryset = Liner.objects.all()
serializer_class = LinerSerializer
pagination_class = None
filter_backends = (DjangoFilterBackend,)
filter_fields = ('id', 'name', 'label', )
permission_classes = (AjaxPermission,)
class LocationViewSet(viewsets.ModelViewSet):
serializer_class = LocationSerializer
pagination_class = None
permission_classes = (AjaxPermission,)
def get_queryset(self):
location_query = self.request.query_params.get('location', None)
if location_query:
location_args = {}
location_args['name__istartswith'] = location_query
queryset = Location.objects.filter(**location_args)
return queryset
else:
queryset = Location.objects.none()
return queryset |
from __future__ import absolute_import
import re
import pattern.en as english
from compiler import get_nate_logic
from peggy.peggy import flatten
from peggy_test.keyvalue import parse_keyvalue, KeyValueListParser
from nate.util import *
from vm import NateVm
DEFAULT_REPLACEMENTS = parse_keyvalue(read_data("initial_replace.txt"),
parser=KeyValueListParser)
class RegexpReplacer(object):
def __init__(self, patterns=DEFAULT_REPLACEMENTS):
self.patterns = [(re.compile(regex), repl) for (regex, repl) in
patterns]
def replace(self, text):
s = text
for (pattern, repl) in self.patterns:
(s, count) = re.subn(pattern, repl, s)
return s
class Rebuilder(object):
# TODO Make this awesome, make this rule based
def __init__(self):
self._sp_after = [",", "."]
self._sp_none = ["'s", "'", "@", "(", ")", "[", "]"]
self._sp_both = ["?", "<", ">"]
self._text = ""
def rebuild(self, processed_):
self._text = ""
processed = flatten(processed_)
processed = [atom if is_str(atom) else atom.string for atom in processed]
for atom in processed:
if atom in self._sp_after:
self._text += atom + " "
elif atom in self._sp_none:
self._text += atom
elif atom in self._sp_both:
self._text += " " + atom + " "
else:
self._text += " " + atom
self._text = self._text.strip().replace(" ", " ")
@property
def text(self):
return self._text
class Nate(object):
def __init__(self, text):
self._text = text
self._regex = RegexpReplacer()
self._rebuilder = Rebuilder()
self._logic = get_nate_logic()
def process(self):
text = self._regex.replace(self._text)
pt = english.parsetree(text, lemmata=True)
processed = []
vm = NateVm()
english.pprint(pt)
for sentence in pt:
words = sentence
pos = 0
last = len(words)
while pos < last:
for pattern, code in self._logic:
matched = pattern.match(words, start=pos)
if matched:
vm.run(matched, code)
pos = matched.stop
processed += vm.get()
break
else:
processed.append(words[pos])
pos += 1
self.rebuild_text(processed)
def rebuild_text(self, processed):
self._rebuilder.rebuild(processed)
self._text = self._rebuilder.text
@property
def text(self):
return self._text
|
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from collections import defaultdict
# from threading import Event, Thread
from typing import (
Dict,
List,
Optional,
Union,
TYPE_CHECKING
)
import json
from pyee import AsyncIOEventEmitter # type: ignore
# from wechaty_puppet import RoomMemberPayload
from wechaty_puppet import ( # type: ignore
FileBox,
RoomQueryFilter,
RoomPayload,
get_logger
)
# from wechaty.utils import type_check
from ..accessory import Accessory
if TYPE_CHECKING:
from .contact import Contact
from .url_link import UrlLink
from .mini_program import MiniProgram
from .message import Message
log = get_logger('Room')
class Room(Accessory):
"""
All wechat rooms(groups) will be encapsulated as a Room.
"""
_pool: Dict[str, 'Room'] = defaultdict()
def __init__(self, room_id: str) -> None:
"""docs"""
self.room_id = room_id
self.payload: Optional[RoomPayload] = None
# if self.__class__ is Room:
# raise Exception('Room class can not be instanciated directly!')
if self.puppet is None:
raise Exception(
'Room class can not be instanciated without a puppet!')
_event_stream: AsyncIOEventEmitter = AsyncIOEventEmitter()
def on(self, event_name: str, func):
"""
listen event for contact
event_name:
"""
self._event_stream.on(event_name, func)
def emit(self, event_name: str, *args, **kwargs):
"""
emit event for a specific
"""
self._event_stream.emit(event_name, *args, **kwargs)
@classmethod
async def create(cls, contacts: List[Contact], topic: str) -> Room:
"""
create room instance
"""
if not hasattr(contacts, '__len__'):
raise Exception('contacts should be list type')
if len(contacts) < 2:
raise Exception(
'contactList need at least 2 contact to create a new room'
)
log.info(
'Room create <%s, %s>',
','.join([contact.contact_id for contact in contacts]),
topic
)
try:
contact_ids = list(map(lambda x: x.contact_id, contacts))
room_id = await cls.get_puppet().\
room_create(contact_ids=contact_ids, topic=topic)
return cls.load(room_id=room_id)
except Exception as exception:
log.error(
'Room create error <%s>',
str(exception.args)
)
raise Exception('Room create error')
@classmethod
async def find_all(cls, room_id: Optional[str] = None,
topic: Optional[str] = None) -> List[Room]:
"""
find room by query filter
"""
log.info('Room find_all <%s, %s>', room_id, topic)
query_filter = RoomQueryFilter(id=room_id, topic=topic)
room_ids = await cls.get_puppet().room_search(query_filter)
rooms = [cls.load(room_id) for room_id in room_ids]
room_result = []
# TODO -> chang to more efficient way
# jointly run async ready method
for room in rooms:
try:
await room.ready()
room_result.append(room)
# pylint:disable=W0703
except Exception as exception:
log.warning(
'Room findAll() room.ready() rejection: %s',
exception.args
)
return room_result
@classmethod
async def find(cls, room_id: Optional[str] = None,
topic: Optional[str] = None) -> Union[None, Room]:
"""
Try to find a room by filter: {topic: string | RegExp}. If get many,
return the first one.
"""
log.info('Room find <%s, %s>', room_id, topic)
rooms = await cls.find_all(room_id, topic)
if rooms is None or len(rooms) < 1:
return None
if len(rooms) > 1:
log.warning('Room find() got more than one(%d) result', len(rooms))
for index, room in enumerate(rooms):
# TODO -> room_valid function is not implemented in puppet
# this code need to be changed later
valid = cls.get_puppet() is None
if valid:
log.warning(
'Room find() confirm room[#%d] with id=%d '
'is valid result, return it.',
index,
room.room_id
)
return room
log.info(
'Room find() confirm room[#%d] with id=%d '
'is INVALID result, try next',
index,
room.room_id)
log.info('Room find() got %d rooms but no one is valid.', len(rooms))
return None
@classmethod
def load(cls, room_id: str) -> Room:
"""
dynamic load room instance
"""
if room_id in cls._pool:
room = cls._pool.get(room_id)
if room is None:
raise Exception('room not found')
return room
new_room = cls(room_id)
cls._pool[room_id] = new_room
return new_room
def __str__(self):
"""
string format for room instance
"""
if self.payload is None:
return self.__class__.__name__
if self.payload.topic is None:
return 'loading ...'
return 'Room <%s>' % self.payload.topic
def is_ready(self) -> bool:
"""
check if room's payload is ready
"""
return self.payload is not None
async def ready(self, force_sync=False):
"""
Please not to use `ready()` at the user land.
"""
if self.is_ready():
return
if force_sync:
pass
# TODO -> *_dirty method is not implemented in puppet
# await self.puppet.room_payload_dirty(self.room_id)
# await self.puppet.room_member_payload_dirty(self.room_id)
self.payload = await self.puppet.room_payload(self.room_id)
if self.payload is None:
raise Exception('Room Payload can"t be ready')
return
member_ids = await self.puppet.room_members(self.room_id)
contacts = [
self.wechaty.Contact.load(member_id) for member_id in member_ids]
for contact in contacts:
try:
await contact.ready()
# pylint:disable=W0703
except Exception as exception:
log.error(
'Room ready() member.ready() rejection: %s', exception
)
async def say(self,
some_thing: Union[str, Contact,
FileBox, MiniProgram, UrlLink],
mention_ids: Optional[List[str]] = None
) -> Union[None, Message]:
"""
Room Say(%s, %s)
"""
log.info('Room say <%s, %s>', some_thing, mention_ids)
if isinstance(some_thing, str):
msg_id = await self.puppet.message_send_text(
conversation_id=self.room_id, message=some_thing,
mention_ids=mention_ids
)
elif isinstance(some_thing, FileBox):
msg_id = await self.puppet.message_send_file(
conversation_id=self.room_id,
file=some_thing
)
elif isinstance(some_thing, Contact):
msg_id = await self.puppet.message_send_contact(
conversation_id=self.room_id,
contact_id=some_thing.contact_id
)
elif isinstance(some_thing, UrlLink):
msg_id = await self.puppet.message_send_url(
conversation_id=self.room_id,
url=some_thing.url
)
elif isinstance(some_thing, MiniProgram):
# TODO -> mini_program key is not clear
assert some_thing.payload is not None
msg_id = await self.puppet.message_send_mini_program(
conversation_id=self.room_id,
mini_program=some_thing.payload
)
else:
raise Exception('arg unsupported: ', some_thing)
if msg_id is not None:
msg = self.wechaty.Message.load(msg_id)
await msg.ready()
return msg
return None
# '''
# TODO -> sayTemplateStringsArray
# '''
# '''
# TODO -> Event emit : on
# '''
# async def on(self, event: str, listener: Callable):
async def add(self, contact: Contact):
"""
Add contact in a room
"""
log.info('Room add <%s>', contact)
await self.puppet.room_add(self.room_id, contact.contact_id)
async def delete(self, contact: Contact):
"""
delete room
"""
log.info('Room delete<%s>', contact)
if contact is None or contact.contact_id is None:
raise Exception('Contact is none or contact_id not found')
await self.puppet.room_delete(self.room_id, contact.contact_id)
async def quit(self):
"""
Add contact in a room
"""
log.info('Room quit <%s>', self)
await self.puppet.room_quit(self.room_id)
async def topic(self, new_topic: str = None) -> Optional[str]:
"""
get/set room topic
"""
log.info('Room topic (%s)', new_topic)
if not self.is_ready():
log.warning('Room topic() room not ready')
raise Exception('Room not ready')
if new_topic is None:
if self.payload is not None and self.payload.topic is not None:
return self.payload.topic
# 获取名称之间的结合
room_member_ids = await \
self.puppet.room_members(self.room_id)
# filter member_ids
member_ids = [member_id for member_id in
room_member_ids
if member_id != self.wechaty.contact_id]
members: List[Contact] = [
self.wechaty.Contact.load(member_id)
for member_id in member_ids]
for member in members:
await member.ready()
# members: List[Contact] = list(
# map(lambda x: self.wechaty.Contact.load(x), member_ids)
# )
names = [member.name for member in members]
return ','.join(names)
try:
await self.puppet.room_topic(self.room_id, new_topic)
return new_topic
# pylint:disable=W0703
except Exception as exception:
log.warning(
'Room topic(newTopic=%s) exception: %s',
new_topic,
exception
)
return None
async def announce(self, announce_text: str = None) -> Optional[str]:
"""
SET/GET announce from the room
It only works when bot is the owner of the room.
"""
log.info('Room announce (%s)', announce_text)
if announce_text is None:
announce = await self.puppet.room_announce(self.room_id)
return announce
await self.puppet.room_announce(self.room_id, announce_text)
return None
async def qr_code(self) -> str:
"""
TODO -> need to rewrite this function later
Get QR Code Value of the Room from the room, which can be used as
scan and join the room.
"""
log.info('qr_code()')
qr_code_str = await self.puppet.room_qr_code(self.room_id)
return qr_code_str
async def alias(self, member: Contact) -> Optional[str]:
"""
Return contact's roomAlias in the room
"""
if member is None:
raise Exception('member can"t be none')
room_member_payload = await self.puppet.room_member_payload(
room_id=self.room_id, contact_id=member.contact_id)
if room_member_payload is not None \
and room_member_payload.room_alias is not None:
return room_member_payload.room_alias
return None
async def has(self, contact: Contact) -> bool:
"""
Check if the room has member `contact`, the return is a Promise and
must be `await`-ed
"""
member_ids = await self.puppet.room_members(self.room_id)
return contact.contact_id in member_ids
async def member_all(
self,
query: Union[str, RoomQueryFilter] = None) -> List[Contact]:
"""
Find all contacts in a room
# TODO -> need to refactoring this function
"""
log.info('room member all (%s)', json.dumps(query))
if query is None:
members = await self.member_list()
return members
contact_ids = await self.puppet.room_members(self.room_id)
contacts = [
self.wechaty.Contact.load(contact_id)
for contact_id in contact_ids
]
return contacts
async def member_list(self) -> List[Contact]:
"""
Get all room member from the room
"""
log.info('Get room <%s> all members', self)
member_ids = await self.puppet.room_members(self.room_id)
contacts = [
self.wechaty.Contact.load(member_id)
for member_id in member_ids
]
return contacts
async def member(
self,
query: Union[str, RoomQueryFilter] = None) -> Optional[Contact]:
"""
Find all contacts in a room, if get many, return the first one.
# TODO -> need to refactoring this function
"""
log.info('Room member search <%s>', query)
members = await self.member_all(query)
if members is None or len(members) < 1:
return None
return members[0]
async def owner(self) -> Optional[Contact]:
"""
get room owner
"""
log.info('Room <%s> owner', self)
if self.payload is None or self.payload.owner_id is None:
# raise Exception('Room <%s> payload or payload.owner_id not found')
return None
contact = self.wechaty.Contact.load(self.payload.owner_id)
return contact
async def avatar(self) -> FileBox:
"""
get the avatar of the room
"""
log.info('avatar() <%s>', self)
avatar = await self.puppet.room_avatar(self.room_id)
return avatar
|
import pygame
from pygame.locals import *
import copy
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
WIDTH = 600
dx = [-1, 0, 1]
class Coord:
def __init__(self, x, y):
self.x = int(x)
self.y = int(y)
def __eq__(self, other):
if not isinstance(other, Coord):
return NotImplemented
return self.x == other.x and self.y == other.y
def __ne__(self, other):
if not isinstance(other, Coord):
return NotImplemented
return self.x != other.x or self.y != other.y
def eval_heuristic(curr, final):
return (curr.x - final.x) * (curr.x - final.x) + (curr.y - final.y) * (curr.y - final.y)
class App:
def __init__(self, s, e):
self.screen = pygame.display.set_mode((WIDTH, WIDTH))
self.running = True
self.start = s
self.final = e
self.path = []
self.evaluate()
self.run()
def evaluate(self):
curr = self.start
while curr != self.final:
h = eval_heuristic(curr, self.final)
for i in dx:
flag = False
for j in dx:
if i == j == 0:
continue
temp = copy.deepcopy(curr)
temp.x = curr.x + i
temp.y = curr.y + j
sh = eval_heuristic(temp, self.final)
if sh < h:
self.path.append(temp)
curr = copy.deepcopy(temp)
if flag is False:
flag = True
break
if flag:
break
def run(self):
while self.running:
clock.tick(10)
pygame.draw.circle(self.screen, (255, 0, 0), (self.start.x, self.start.y), 3)
pygame.draw.circle(self.screen, (0, 255, 0), (self.final.x, self.final.y), 3)
pygame.display.update()
self.events()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN:
# print("event")
self.update()
def update(self):
self.draw()
def draw(self):
for i in self.path:
pygame.draw.circle(self.screen, (0, 0, 255), (i.x, i.y), 3)
pygame.display.update()
# 50 50
start = [x for x in input().split()]
# 400 450
end = [x for x in input().split()]
app = App(Coord(start[0], start[1]), Coord(end[0], end[1]))
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 09:42:48 2020
@author: akira
"""
import pandas as pd
import numpy as np
from keras.models import model_from_json
arquivo = open('classificador.json','r')
estrutura_rede=arquivo.read()
arquivo.close()
classificador = model_from_json(estrutura_rede)
classificador.load_weights('classificador.h5')
novo = np.array([[15,80,8.34,118,900,0.10,0.26,0.08,0.134,0.178,0.20,0.05,1090,0.07,4500,145,2,0.005,0.04,0.05,0.015,0.03,0.07,23,15,16.64,178.5,2018,0.14,0.185]])
previsao = classificador.predict(novo)
previsao=(previsao>0.5)
previsores = pd.read_csv('entradas-breast.csv')
classe = pd.read_csv('saidas-breast.csv')
classificador.compile(loss='binary_crossentropy',optimizer='adam',metrics=['binary_accuracy'])
resultado=classificador.evaluate(previsores,classe) |
def batch(iterable, n=1):
"""Allow to iterate in batch of size n over the given iterable
Args:
iterable (:iter:): Any iterable with a `len` function.
n (int, optional): Int specifying the size of the batch.
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)] |
import os
import argparse
import random
from data import Data
import cv2
import time
import sys
from tqdm import tqdm
from bounding_box import bounding_box as bb
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("-r","--root_dir", type=str, default="/mnt/069A453E9A452B8D/Ram/surveillance-data/sdd_train")
parser.add_argument("-t","--type", type=str, default="train", help="train|val|trainval|test")
parser.add_argument("--random_seed", type=int, default=100)
parser.add_argument("--save_images", type=bool, default=True)
parser.add_argument("-s","--save_dir", type=str, default="output")
parser.add_argument("-l","--line_thickness", type=int, default=2)
args = parser.parse_args()
random.seed(args.random_seed)
img_dir = os.path.join(args.root_dir, 'JPEGImages')
ann_dir = os.path.join(args.root_dir, 'Annotations')
set_dir = os.path.join(args.root_dir, 'ImageSets', 'Main')
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def get_image_list(dir, filename):
image_list = open(os.path.join(dir, filename)).readlines()
return [image_name.strip() for image_name in image_list]
def process_image(image_data):
image = cv2.imread(image_data.image_path)
image = cv2.putText(image, image_data.image_name, (5, 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
color_list = ["maroon", "green", "yellow", "purple", "fuchsia", "lime", "red", "silver"]
for ann in image_data.annotations:
id_color = random.randint(0, 7)
box_color = color_list[id_color]
bb.add(image, ann.xmin, ann.ymin, ann.xmax, ann.ymax, ann.name, box_color)
#image = cv2.rectangle(image, (ann.xmin, ann.ymin), (ann.xmax, ann.ymax), box_color, args.line_thickness)
#image = cv2.putText(image, ann.name, (ann.xmin, ann.ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
return image
def main(args):
image_list = get_image_list(set_dir, args.type + ".txt")
total_images = len(image_list)
#for index in progressbar(range(total_images), "Computing: ", 40):
for index in tqdm(range(total_images)):
time.sleep(0.1)
image_data = Data(args.root_dir, image_list[index])
image = process_image(image_data)
if args.save_images:
cv2.imwrite(os.path.join(args.save_dir, image_list[index] + ".jpg"), image)
if __name__ == '__main__':
main(args)
|
""" Provide toy example classes which we can test PathSelection on. """
import networkx as nx
import numpy as np
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
class ToyExample(object):
""" Class as template for all DAG toy examples. These toy examples could
be used to test whether a path weighting model achieves the expected
result. """
def __init__(self):
self.G = None # Create a graph.
self.sources = None
self.targets = None
raise Exception("Instance of toyexample unexpected.")
def display(self, path=None):
""" Display the toy example.
Args:
path (str): (optional) Result image file path.
"""
node_colors = {n: "b" for n in self.G.nodes()}
for s in self.sources:
node_colors[s] = "g"
for t in self.targets:
node_colors[t] = "r"
pos = graphviz_layout(self.G, prog="dot", args="-Grankdir=BT")
# pos = {n: (y, x) for n, (x, y) in pos.items()}
fig, ax = plt.subplots()
nx.draw(
self.G,
with_labels=True,
pos=pos,
node_color=list(node_colors.values()),
font_color="white",
edge_color="grey",
# width=[data["weight"] for _, _, data in self.G.edges(data=True)],
)
nx.draw_networkx_edge_labels(
self.G,
pos=pos,
edge_labels={
(u, v): data["weight"] for u, v, data in self.G.edges(data=True)
},
ax=ax,
rotate=False
)
if path:
plt.savefig(path, dpi=300)
def get_graph(self):
""" Get the connectome, sources and targets.
Returns:
G (NetworkX.DiGraph): toy example graph.
sources (iterable): source nodes.
target (iterable): target nodes.
"""
return self.G, self.sources, self.targets
def test(self, path_centrality):
""" Run through some tests. Accept the path weights as the input.
Args:
path_centrality (dict): node as key and path centrality as value.
Returns:
passed (bool): Whether all tests are passed.
error (str or NoneType): Error message if not passed.
"""
raise Exception("Test not implemented for object %s" % type(self))
class BackBone(ToyExample):
""" 3-layer fully connected network, with 4 source nodes,
4 targets, and 2 intermediate nodes. The edge weight are consistently 1
among all edges.
"""
def __init__(self):
self.sources = range(4)
self.targets = range(6, 10)
self.G = build_fully_connected_network([4, 2, 4])
def test(self, path_centrality):
""" A series of tests on backbone network. """
# Nodes should share same centrality if they are in the same layer.
if not all(
[
all(
[
path_centrality[n1] == path_centrality[n2]
for n1, n2 in zip(layer[:-1], layer[1:])
]
)
for layer in [range(4), range(4, 6), range(6, 10)]
]
):
return (
False,
"BackBone Test: Nodes in same layer don't have same path centrality.",
)
# Source and targets should have same path centrality.
if not path_centrality[0] == path_centrality[-1]:
return (
False,
"BackBone Test: Sources and targets don't have same path centrality.",
)
# Intermediate nodes should have higher weights
if not path_centrality[4] > path_centrality[0]:
return (
False,
"BackBone Test: Intermediate nodes don't have higher path centrality.",
)
return True, None
class SmallBranch(ToyExample):
""" Add a non-path forming branch to an intermediate node in the backbone
shouln't or shouldn't significantly change the path centrality of that node.
"""
def __init__(self):
self.sources = range(4)
self.targets = range(6, 10)
self.G = add_small_branch(BackBone().G, 5)
def test(self, path_centrality, tolerance=0.9):
""" Test: The path centrality of intermediate nodes with that branch
and without that branch should be equal or similar.
Args:
path_centrality (dict): node as key and path centrality as value.
"""
# Test if path centrality of two intermediate nodes are equal or the
# intermediate node with branch have lower path centrality but within
# tolerance.
if (
tolerance * path_centrality[4] <= path_centrality[5]
and path_centrality[4] >= path_centrality[5]
):
return True, None
else:
return (
False,
"SmallBranch Test: Small branch shouldn't change the path centrality a lot.",
)
class IshaanExample(ToyExample):
""" Toy example used in Ishaan's old draft. """
def __init__(self):
self.sources = ["a"]
self.targets = ["p", "q", "r"]
self.G = nx.DiGraph()
edges = [
("a", "b", 2),
("a", "c", 2),
("a", "d", 4),
("a", "e", 4),
("b", "f", 1),
("c", "f", 1),
("d", "g", 1),
("e", "g", 1),
("f", "h", 5),
("f", "i", 1),
("f", "j", 1),
("f", "k", 1),
("g", "l", 1),
("g", "m", 4),
("h", "p", 2),
("i", "n", 1),
("j", "n", 1),
("k", "n", 1),
("n", "p", 1),
("n", "q", 1),
("l", "o", 1),
("o", "q", 1),
("o", "r", 1),
("m", "r", 2),
]
self.G.add_weighted_edges_from(edges)
class IshaanExampleModified(ToyExample):
""" Toy example used in Ishaan's old draft. """
def __init__(self):
self.sources = ["a"]
self.targets = ["l", "m", "n"]
self.G = nx.DiGraph()
edges = [
("a", "b", 2),
("a", "c", 4),
("b", "d", 5),
("b", "e", 1),
("b", "f", 1),
("b", "g", 1),
("c", "h", 1),
("c", "i", 4),
("d", "l", 2),
("e", "j", 1),
("f", "j", 1),
("g", "j", 1),
("j", "l", 1),
("j", "m", 1),
("h", "k", 1),
("k", "m", 1),
("k", "n", 1),
("i", "n", 2),
]
self.G.add_weighted_edges_from(edges)
class QihangExample(ToyExample):
""" Toy example used in Ishaan's old draft. """
def __init__(self):
self.sources = ["a", "b"]
self.targets = ["h", "i", "j"]
self.G = nx.DiGraph()
edges = [
("a", "c", 2),
("a", "d", 1),
("b", "d", 1),
("b", "e", 2),
("c", "f", 2),
("d", "f", 1),
("d", "g", 1),
("e", "g", 2),
("f", "h", 1),
("f", "i", 1),
("g", "i", 1),
("g", "j", 1),
]
self.G.add_weighted_edges_from(edges)
def build_fully_connected_network(layers):
""" Build fully connected network with constant weight 1.
Args:
layers (list): number of neurons in each layer.
Returns:
G (NetworkX.DiGraph): The network built.
"""
G = nx.DiGraph()
layer_nodes = [
[sum(layers[:i]) + n for n in range(layer)] for i, layer in enumerate(layers)
]
# Add all nodes
G.add_nodes_from([n for layer in layer_nodes for n in layer])
# Add all edges
edges = [
[s, t, 1]
for sources, targets in zip(layer_nodes[:-1], layer_nodes[1:])
for s in sources
for t in targets
]
G.add_weighted_edges_from(edges)
return G
def add_small_branch(G, branch_node):
""" Example1: What if we add a small branch at a intermediate node. """
G = G.copy()
new_node = len(G.nodes)
G.add_node(new_node)
G.add_edge(branch_node, new_node, weight=0.1)
return G
def double_node_connectivity(G, target_node):
""" Example2: double the weights of edges connected to a node. """
G = G.copy()
for edge in G.in_edges(target_node):
G.edges[edge]["weight"] = G.edges[edge]["weight"] * 3
for edge in G.out_edges(target_node):
G.edges[edge]["weight"] = G.edges[edge]["weight"] * 3
return G
def replace_node_with_edge(G, split_node, weight_agg_func):
""" Example3: split a node into two connected nodes. """
G = G.copy()
new_node = len(G.nodes)
G.add_node(new_node)
out_edges = list(G.out_edges(split_node))
G.add_edge(
split_node,
new_node,
weight=weight_agg_func([G.edges[e]["weight"] for e in out_edges]),
)
for u, v in out_edges:
G.add_edge(new_node, v, weight=G.edges[u, v]["weight"])
G.remove_edge(u, v)
return G
def display_network(G, sources, targest, path=None):
node_colors = np.array(["b" for _ in G.nodes()])
node_colors[sources] = "r"
node_colors[targets] = "g"
pos = graphviz_layout(G, prog="dot", args="-Grankdir=LR")
# pos = nx.spring_layout(G)
fig, ax = plt.subplots()
nx.draw(
G,
with_labels=True,
pos=pos,
node_color=node_colors,
font_color="white",
width=[data["weight"] for _, _, data in G.edges(data=True)],
)
if path:
plt.savefig(path, dpi=300)
|
#!/usr/bin/env python3
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
DART_DISABLE_BUILDFILES = "DART_DISABLE_BUILDFILES"
def DisableBuildfiles():
return DART_DISABLE_BUILDFILES in os.environ
def Execute(args):
process = subprocess.Popen(args, cwd=DART_ROOT)
process.wait()
return process.returncode
def RunAndroidGn(options):
if not HOST_OS in ['linux', 'macos']:
return 0
gn_command = [
'python3',
os.path.join(DART_ROOT, 'tools', 'gn.py'),
'-m',
'all',
'-a',
'arm,arm64',
'--os',
'android',
]
if options.verbose:
gn_command.append('-v')
print(' '.join(gn_command))
return Execute(gn_command)
def RunCrossGn(options):
if HOST_OS != 'linux':
return 0
gn_command = [
'python3',
os.path.join(DART_ROOT, 'tools', 'gn.py'),
'-m',
'all',
'-a',
'arm,arm64',
]
if options.verbose:
gn_command.append('-v')
print(' '.join(gn_command))
return Execute(gn_command)
def RunHostGn(options):
gn_command = [
'python3',
os.path.join(DART_ROOT, 'tools', 'gn.py'),
'-m',
'all',
'-a',
'all',
]
if options.verbose:
gn_command.append('-v')
print(' '.join(gn_command))
return Execute(gn_command)
def RunGn(options):
status = RunHostGn(options)
if status != 0:
return status
status = RunCrossGn(options)
if status != 0:
return status
return RunAndroidGn(options)
def ParseArgs(args):
args = args[1:]
parser = argparse.ArgumentParser(
description="A script to generate Dart's build files.")
parser.add_argument(
"-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
return parser.parse_args(args)
def main(argv):
# Check the environment and become a no-op if directed.
if DisableBuildfiles():
return 0
options = ParseArgs(argv)
RunGn(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
# flake8: noqa: W191,E101
import copy
import gzip
import io
import pickle
import zlib
from pathlib import Path
from typing import Callable, Dict, List, Tuple, Type
import numpy as np
import pandas as pd
import pytest
import polars as pl
from polars import DataType
def test_to_from_buffer(df: pl.DataFrame) -> None:
df = df.drop("strings_nulls")
for to_fn, from_fn in zip(
[df.to_parquet, df.to_csv, df.to_ipc, df.to_json],
[pl.read_parquet, pl.read_csv, pl.read_ipc, pl.read_json],
):
f = io.BytesIO()
to_fn(f) # type: ignore
f.seek(0)
df_1 = from_fn(f) # type: ignore
assert df.frame_equal(df_1, null_equal=True)
def test_select_columns_and_projection_from_buffer() -> None:
df = pl.DataFrame({"a": [1, 2, 3], "b": [True, False, True], "c": ["a", "b", "c"]})
expected = pl.DataFrame({"b": [True, False, True], "c": ["a", "b", "c"]})
for to_fn, from_fn in zip(
[df.to_parquet, df.to_ipc], [pl.read_parquet, pl.read_ipc]
):
f = io.BytesIO()
to_fn(f) # type: ignore
f.seek(0)
df_1 = from_fn(f, columns=["b", "c"], use_pyarrow=False) # type: ignore
assert df_1.frame_equal(expected)
for to_fn, from_fn in zip(
[df.to_parquet, df.to_ipc], [pl.read_parquet, pl.read_ipc]
):
f = io.BytesIO()
to_fn(f) # type: ignore
f.seek(0)
df_2 = from_fn(f, projection=[1, 2], use_pyarrow=False) # type: ignore
assert df_2.frame_equal(expected)
def test_compressed_to_ipc() -> None:
df = pl.DataFrame({"a": [1, 2, 3], "b": [True, False, True], "c": ["a", "b", "c"]})
compressions = ["uncompressed", "lz4", "zstd"]
for compression in compressions:
f = io.BytesIO()
df.to_ipc(f, compression)
f.seek(0)
df_read = pl.read_ipc(f, use_pyarrow=False)
assert df_read.frame_equal(df)
def test_read_web_file() -> None:
url = "https://raw.githubusercontent.com/pola-rs/polars/master/examples/aggregate_multiple_files_in_chunks/datasets/foods1.csv"
df = pl.read_csv(url)
assert df.shape == (27, 4)
def test_parquet_chunks() -> None:
"""
This failed in https://github.com/pola-rs/polars/issues/545
"""
cases = [
1048576,
1048577,
]
for case in cases:
f = io.BytesIO()
# repeat until it has case instances
df = pd.DataFrame(
np.tile([1.0, pd.to_datetime("2010-10-10")], [case, 1]),
columns=["floats", "dates"],
)
print(df)
# write as parquet
df.to_parquet(f)
print(f"reading {case} dates with polars...", end="")
f.seek(0)
# read it with polars
polars_df = pl.read_parquet(f)
assert pl.DataFrame(df).frame_equal(polars_df)
def test_parquet_datetime() -> None:
"""
This failed because parquet writers cast datetimeto Date
"""
f = io.BytesIO()
data = {
"datetime": [ # unix timestamp in ms
1618354800000,
1618354740000,
1618354680000,
1618354620000,
1618354560000,
],
"laf_max": [73.1999969482, 71.0999984741, 74.5, 69.5999984741, 69.6999969482],
"laf_eq": [59.5999984741, 61.0, 62.2999992371, 56.9000015259, 60.0],
}
df = pl.DataFrame(data)
df = df.with_column(df["datetime"].cast(pl.Datetime))
df.to_parquet(f, use_pyarrow=True)
f.seek(0)
read = pl.read_parquet(f)
assert read.frame_equal(df)
def test_csv_null_values() -> None:
csv = """
a,b,c
na,b,c
a,na,c"""
f = io.StringIO(csv)
df = pl.read_csv(f, null_values="na")
assert df[0, "a"] is None
assert df[1, "b"] is None
csv = """
a,b,c
na,b,c
a,n/a,c"""
f = io.StringIO(csv)
df = pl.read_csv(f, null_values=["na", "n/a"])
assert df[0, "a"] is None
assert df[1, "b"] is None
csv = """
a,b,c
na,b,c
a,n/a,c"""
f = io.StringIO(csv)
df = pl.read_csv(f, null_values={"a": "na", "b": "n/a"})
assert df[0, "a"] is None
assert df[1, "b"] is None
def test_datetime_parsing() -> None:
csv = """
timestamp,open,high
2021-01-01 00:00:00,0.00305500,0.00306000
2021-01-01 00:15:00,0.00298800,0.00300400
2021-01-01 00:30:00,0.00298300,0.00300100
2021-01-01 00:45:00,0.00299400,0.00304000
"""
f = io.StringIO(csv)
df = pl.read_csv(f)
assert df.dtypes == [pl.Datetime, pl.Float64, pl.Float64]
def test_partial_dtype_overwrite() -> None:
csv = """
a,b,c
1,2,3
1,2,3
"""
f = io.StringIO(csv)
df = pl.read_csv(f, dtype=[pl.Utf8])
assert df.dtypes == [pl.Utf8, pl.Int64, pl.Int64]
def test_partial_column_rename() -> None:
csv = """
a,b,c
1,2,3
1,2,3
"""
f = io.StringIO(csv)
for use in [True, False]:
f.seek(0)
df = pl.read_csv(f, new_columns=["foo"], use_pyarrow=use)
assert df.columns == ["foo", "b", "c"]
def test_column_rename_and_dtype_overwrite() -> None:
csv = """
a,b,c
1,2,3
1,2,3
"""
f = io.StringIO(csv)
df = pl.read_csv(
f,
new_columns=["A", "B", "C"],
dtype={"A": pl.Utf8, "B": pl.Int64, "C": pl.Float32},
)
assert df.dtypes == [pl.Utf8, pl.Int64, pl.Float32]
f = io.StringIO(csv)
df = pl.read_csv(
f,
columns=["a", "c"],
new_columns=["A", "C"],
dtype={"A": pl.Utf8, "C": pl.Float32},
)
assert df.dtypes == [pl.Utf8, pl.Float32]
csv = """
1,2,3
1,2,3
"""
f = io.StringIO(csv)
df = pl.read_csv(
f,
new_columns=["A", "B", "C"],
dtype={"A": pl.Utf8, "C": pl.Float32},
has_headers=False,
)
assert df.dtypes == [pl.Utf8, pl.Int64, pl.Float32]
def test_compressed_csv() -> None:
# gzip compression
csv = """
a,b,c
1,a,1.0
2,b,2.0,
3,c,3.0
"""
fout = io.BytesIO()
with gzip.GzipFile(fileobj=fout, mode="w") as f:
f.write(csv.encode())
csv_bytes = fout.getvalue()
out = pl.read_csv(csv_bytes)
expected = pl.DataFrame(
{"a": [1, 2, 3], "b": ["a", "b", "c"], "c": [1.0, 2.0, 3.0]}
)
assert out.frame_equal(expected)
# now from disk
out = pl.read_csv("tests/files/gzipped.csv")
assert out.frame_equal(expected)
# now with column projection
out = pl.read_csv(csv_bytes, columns=["a", "b"])
expected = pl.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
assert out.frame_equal(expected)
# zlib compression
csv_bytes = zlib.compress(csv.encode())
out = pl.read_csv(csv_bytes)
expected = pl.DataFrame(
{"a": [1, 2, 3], "b": ["a", "b", "c"], "c": [1.0, 2.0, 3.0]}
)
assert out.frame_equal(expected)
# no compression
f2 = io.BytesIO(b"a, b\n1,2\n")
out2 = pl.read_csv(f2)
expected = pl.DataFrame({"a": [1], "b": [2]})
assert out2.frame_equal(expected)
def test_empty_bytes() -> None:
b = b""
with pytest.raises(ValueError):
pl.read_csv(b)
def test_pickle() -> None:
a = pl.Series("a", [1, 2])
b = pickle.dumps(a)
out = pickle.loads(b)
assert a.series_equal(out)
df = pl.DataFrame({"a": [1, 2], "b": ["a", None], "c": [True, False]})
b = pickle.dumps(df)
out = pickle.loads(b)
assert df.frame_equal(out, null_equal=True)
def test_copy() -> None:
df = pl.DataFrame({"a": [1, 2], "b": ["a", None], "c": [True, False]})
assert copy.copy(df).frame_equal(df, True)
assert copy.deepcopy(df).frame_equal(df, True)
a = pl.Series("a", [1, 2])
assert copy.copy(a).series_equal(a, True)
assert copy.deepcopy(a).series_equal(a, True)
def test_to_json() -> None:
# tests if it runs if no arg given
df = pl.DataFrame({"a": [1, 2, 3]})
assert (
df.to_json() == '{"columns":[{"name":"a","datatype":"Int64","values":[1,2,3]}]}'
)
def test_ipc_schema() -> None:
df = pl.DataFrame({"a": [1, 2], "b": ["a", None], "c": [True, False]})
f = io.BytesIO()
df.to_ipc(f)
f.seek(0)
assert pl.read_ipc_schema(f) == {"a": pl.Int64, "b": pl.Utf8, "c": pl.Boolean}
def test_categorical_round_trip() -> None:
df = pl.DataFrame({"ints": [1, 2, 3], "cat": ["a", "b", "c"]})
df = df.with_column(pl.col("cat").cast(pl.Categorical))
tbl = df.to_arrow()
assert "dictionary" in str(tbl["cat"].type)
df2: pl.DataFrame = pl.from_arrow(tbl) # type: ignore
assert df2.dtypes == [pl.Int64, pl.Categorical]
def test_csq_quote_char() -> None:
rolling_stones = """
linenum,last_name,first_name
1,Jagger,Mick
2,O"Brian,Mary
3,Richards,Keith
4,L"Etoile,Bennet
5,Watts,Charlie
6,Smith,D"Shawn
7,Wyman,Bill
8,Woods,Ron
9,Jones,Brian
"""
assert pl.read_csv(rolling_stones.encode(), quote_char=None).shape == (9, 3)
def test_date_list_fmt() -> None:
df = pl.DataFrame(
{
"mydate": ["2020-01-01", "2020-01-02", "2020-01-05", "2020-01-05"],
"index": [1, 2, 5, 5],
}
)
df = df.with_column(pl.col("mydate").str.strptime(pl.Date, "%Y-%m-%d"))
assert (
str(df.groupby("index", maintain_order=True).agg(pl.col("mydate"))["mydate"])
== """shape: (3,)
Series: 'mydate' [list]
[
[2020-01-01]
[2020-01-02]
[2020-01-05, 2020-01-05]
]"""
)
def test_csv_empty_quotes_char() -> None:
# panicked in: https://github.com/pola-rs/polars/issues/1622
pl.read_csv(b"a,b,c,d\nA1,B1,C1,1\nA2,B2,C2,2\n", quote_char="")
def test_ignore_parse_dates() -> None:
csv = """a,b,c
1,i,16200126
2,j,16250130
3,k,17220012
4,l,17290009""".encode()
headers = ["a", "b", "c"]
dtypes: Dict[str, Type[DataType]] = {
k: pl.Utf8 for k in headers
} # Forces Utf8 type for every column
df = pl.read_csv(csv, columns=headers, dtype=dtypes)
assert df.dtypes == [pl.Utf8, pl.Utf8, pl.Utf8]
def test_scan_csv() -> None:
df = pl.scan_csv(Path(__file__).parent / "files" / "small.csv")
assert df.collect().shape == (4, 3)
def test_scan_parquet() -> None:
df = pl.scan_parquet(Path(__file__).parent / "files" / "small.parquet")
assert df.collect().shape == (4, 3)
|
# import the necessary packages
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image and show it
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
# grab the dimensions of the image and calculate the center of the image
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# rotate our image by 45 degrees
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by 45 Degrees", rotated)
# rotate our image by -90 degrees
M = cv2.getRotationMatrix2D((cX, cY), -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 Degrees", rotated)
# rotate our image around an arbitrary point rather than the center
M = cv2.getRotationMatrix2D((cX - 50, cY - 50), 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by Offset & 45 Degrees", rotated)
# finally, let's use our helper function in imutils to rotate the image by
# 180 degrees (flipping it upside down)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180 Degrees", rotated)
# 1. Question
# Download the following image: http://pyimg.co/kwy7l
# Then, use OpenCV to rotate the image 30 degrees clockwise. What is the value of the pixel located at x=335 and y=254?
M = cv2.getRotationMatrix2D((cX, cY), -30, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
(b, g, r) = rotated[254, 335]
cv2.imshow("Question 1.", rotated)
print("Pixel at (0, 0) - Red: {r}, Green: {g}, Blue: {b}".format(r=r, g=g, b=b))
# 2. Question
# Now rotate the image 110 degrees counter-clockwise. What is the value of the pixel located at x=312, y=136?
M = cv2.getRotationMatrix2D((cX, cY), 110, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
(b, g, r) = rotated[136, 312]
cv2.imshow("Question 2.", rotated)
print("Pixel at (0, 0) - Red: {r}, Green: {g}, Blue: {b}".format(r=r, g=g, b=b))
# 3. Question
# Change the call to cv2.getRotationMatrix2D to rotate the image 88 degrees counter-clockwise about coordinate (50, 50). What is the value of the pixel located at point (10, 10)?
M = cv2.getRotationMatrix2D((50, 50), 88, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
(b, g, r) = rotated[10, 10]
cv2.imshow("Question 3.", rotated)
print("Pixel at (0, 0) - Red: {r}, Green: {g}, Blue: {b}".format(r=r, g=g, b=b))
cv2.waitKey(0) |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class SmbSettingsGlobalSettings(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SmbSettingsGlobalSettings - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'access_based_share_enum': 'bool',
'audit_fileshare': 'str',
'audit_global_sacl': 'list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]',
'audit_logon': 'str',
'dot_snap_accessible_child': 'bool',
'dot_snap_accessible_root': 'bool',
'dot_snap_visible_child': 'bool',
'dot_snap_visible_root': 'bool',
'enable_security_signatures': 'bool',
'guest_user': 'str',
'ignore_eas': 'bool',
'onefs_cpu_multiplier': 'int',
'onefs_num_workers': 'int',
'require_security_signatures': 'bool',
'server_string': 'str',
'service': 'bool',
'srv_cpu_multiplier': 'int',
'srv_num_workers': 'int',
'support_multichannel': 'bool',
'support_netbios': 'bool',
'support_smb2': 'bool'
}
self.attribute_map = {
'access_based_share_enum': 'access_based_share_enum',
'audit_fileshare': 'audit_fileshare',
'audit_global_sacl': 'audit_global_sacl',
'audit_logon': 'audit_logon',
'dot_snap_accessible_child': 'dot_snap_accessible_child',
'dot_snap_accessible_root': 'dot_snap_accessible_root',
'dot_snap_visible_child': 'dot_snap_visible_child',
'dot_snap_visible_root': 'dot_snap_visible_root',
'enable_security_signatures': 'enable_security_signatures',
'guest_user': 'guest_user',
'ignore_eas': 'ignore_eas',
'onefs_cpu_multiplier': 'onefs_cpu_multiplier',
'onefs_num_workers': 'onefs_num_workers',
'require_security_signatures': 'require_security_signatures',
'server_string': 'server_string',
'service': 'service',
'srv_cpu_multiplier': 'srv_cpu_multiplier',
'srv_num_workers': 'srv_num_workers',
'support_multichannel': 'support_multichannel',
'support_netbios': 'support_netbios',
'support_smb2': 'support_smb2'
}
self._access_based_share_enum = None
self._audit_fileshare = None
self._audit_global_sacl = None
self._audit_logon = None
self._dot_snap_accessible_child = None
self._dot_snap_accessible_root = None
self._dot_snap_visible_child = None
self._dot_snap_visible_root = None
self._enable_security_signatures = None
self._guest_user = None
self._ignore_eas = None
self._onefs_cpu_multiplier = None
self._onefs_num_workers = None
self._require_security_signatures = None
self._server_string = None
self._service = None
self._srv_cpu_multiplier = None
self._srv_num_workers = None
self._support_multichannel = None
self._support_netbios = None
self._support_smb2 = None
@property
def access_based_share_enum(self):
"""
Gets the access_based_share_enum of this SmbSettingsGlobalSettings.
Only enumerate files and folders the requesting user has access to.
:return: The access_based_share_enum of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._access_based_share_enum
@access_based_share_enum.setter
def access_based_share_enum(self, access_based_share_enum):
"""
Sets the access_based_share_enum of this SmbSettingsGlobalSettings.
Only enumerate files and folders the requesting user has access to.
:param access_based_share_enum: The access_based_share_enum of this SmbSettingsGlobalSettings.
:type: bool
"""
self._access_based_share_enum = access_based_share_enum
@property
def audit_fileshare(self):
"""
Gets the audit_fileshare of this SmbSettingsGlobalSettings.
Specify level of file share audit events to log.
:return: The audit_fileshare of this SmbSettingsGlobalSettings.
:rtype: str
"""
return self._audit_fileshare
@audit_fileshare.setter
def audit_fileshare(self, audit_fileshare):
"""
Sets the audit_fileshare of this SmbSettingsGlobalSettings.
Specify level of file share audit events to log.
:param audit_fileshare: The audit_fileshare of this SmbSettingsGlobalSettings.
:type: str
"""
allowed_values = ["all", "success", "failure", "none"]
if audit_fileshare is not None and audit_fileshare not in allowed_values:
raise ValueError(
"Invalid value for `audit_fileshare`, must be one of {0}"
.format(allowed_values)
)
self._audit_fileshare = audit_fileshare
@property
def audit_global_sacl(self):
"""
Gets the audit_global_sacl of this SmbSettingsGlobalSettings.
List of permissions to audit.
:return: The audit_global_sacl of this SmbSettingsGlobalSettings.
:rtype: list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]
"""
return self._audit_global_sacl
@audit_global_sacl.setter
def audit_global_sacl(self, audit_global_sacl):
"""
Sets the audit_global_sacl of this SmbSettingsGlobalSettings.
List of permissions to audit.
:param audit_global_sacl: The audit_global_sacl of this SmbSettingsGlobalSettings.
:type: list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]
"""
self._audit_global_sacl = audit_global_sacl
@property
def audit_logon(self):
"""
Gets the audit_logon of this SmbSettingsGlobalSettings.
Specify the level of logon audit events to log.
:return: The audit_logon of this SmbSettingsGlobalSettings.
:rtype: str
"""
return self._audit_logon
@audit_logon.setter
def audit_logon(self, audit_logon):
"""
Sets the audit_logon of this SmbSettingsGlobalSettings.
Specify the level of logon audit events to log.
:param audit_logon: The audit_logon of this SmbSettingsGlobalSettings.
:type: str
"""
allowed_values = ["all", "success", "failure", "none"]
if audit_logon is not None and audit_logon not in allowed_values:
raise ValueError(
"Invalid value for `audit_logon`, must be one of {0}"
.format(allowed_values)
)
self._audit_logon = audit_logon
@property
def dot_snap_accessible_child(self):
"""
Gets the dot_snap_accessible_child of this SmbSettingsGlobalSettings.
Allow access to .snapshot directories in share subdirectories.
:return: The dot_snap_accessible_child of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._dot_snap_accessible_child
@dot_snap_accessible_child.setter
def dot_snap_accessible_child(self, dot_snap_accessible_child):
"""
Sets the dot_snap_accessible_child of this SmbSettingsGlobalSettings.
Allow access to .snapshot directories in share subdirectories.
:param dot_snap_accessible_child: The dot_snap_accessible_child of this SmbSettingsGlobalSettings.
:type: bool
"""
self._dot_snap_accessible_child = dot_snap_accessible_child
@property
def dot_snap_accessible_root(self):
"""
Gets the dot_snap_accessible_root of this SmbSettingsGlobalSettings.
Allow access to the .snapshot directory in the root of the share.
:return: The dot_snap_accessible_root of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._dot_snap_accessible_root
@dot_snap_accessible_root.setter
def dot_snap_accessible_root(self, dot_snap_accessible_root):
"""
Sets the dot_snap_accessible_root of this SmbSettingsGlobalSettings.
Allow access to the .snapshot directory in the root of the share.
:param dot_snap_accessible_root: The dot_snap_accessible_root of this SmbSettingsGlobalSettings.
:type: bool
"""
self._dot_snap_accessible_root = dot_snap_accessible_root
@property
def dot_snap_visible_child(self):
"""
Gets the dot_snap_visible_child of this SmbSettingsGlobalSettings.
Show .snapshot directories in share subdirectories.
:return: The dot_snap_visible_child of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._dot_snap_visible_child
@dot_snap_visible_child.setter
def dot_snap_visible_child(self, dot_snap_visible_child):
"""
Sets the dot_snap_visible_child of this SmbSettingsGlobalSettings.
Show .snapshot directories in share subdirectories.
:param dot_snap_visible_child: The dot_snap_visible_child of this SmbSettingsGlobalSettings.
:type: bool
"""
self._dot_snap_visible_child = dot_snap_visible_child
@property
def dot_snap_visible_root(self):
"""
Gets the dot_snap_visible_root of this SmbSettingsGlobalSettings.
Show the .snapshot directory in the root of a share.
:return: The dot_snap_visible_root of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._dot_snap_visible_root
@dot_snap_visible_root.setter
def dot_snap_visible_root(self, dot_snap_visible_root):
"""
Sets the dot_snap_visible_root of this SmbSettingsGlobalSettings.
Show the .snapshot directory in the root of a share.
:param dot_snap_visible_root: The dot_snap_visible_root of this SmbSettingsGlobalSettings.
:type: bool
"""
self._dot_snap_visible_root = dot_snap_visible_root
@property
def enable_security_signatures(self):
"""
Gets the enable_security_signatures of this SmbSettingsGlobalSettings.
Indicates whether the server supports signed SMB packets.
:return: The enable_security_signatures of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._enable_security_signatures
@enable_security_signatures.setter
def enable_security_signatures(self, enable_security_signatures):
"""
Sets the enable_security_signatures of this SmbSettingsGlobalSettings.
Indicates whether the server supports signed SMB packets.
:param enable_security_signatures: The enable_security_signatures of this SmbSettingsGlobalSettings.
:type: bool
"""
self._enable_security_signatures = enable_security_signatures
@property
def guest_user(self):
"""
Gets the guest_user of this SmbSettingsGlobalSettings.
Specifies the fully-qualified user to use for guest access.
:return: The guest_user of this SmbSettingsGlobalSettings.
:rtype: str
"""
return self._guest_user
@guest_user.setter
def guest_user(self, guest_user):
"""
Sets the guest_user of this SmbSettingsGlobalSettings.
Specifies the fully-qualified user to use for guest access.
:param guest_user: The guest_user of this SmbSettingsGlobalSettings.
:type: str
"""
self._guest_user = guest_user
@property
def ignore_eas(self):
"""
Gets the ignore_eas of this SmbSettingsGlobalSettings.
Specify whether to ignore EAs on files.
:return: The ignore_eas of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._ignore_eas
@ignore_eas.setter
def ignore_eas(self, ignore_eas):
"""
Sets the ignore_eas of this SmbSettingsGlobalSettings.
Specify whether to ignore EAs on files.
:param ignore_eas: The ignore_eas of this SmbSettingsGlobalSettings.
:type: bool
"""
self._ignore_eas = ignore_eas
@property
def onefs_cpu_multiplier(self):
"""
Gets the onefs_cpu_multiplier of this SmbSettingsGlobalSettings.
Specify the number of OneFS driver worker threads per CPU.
:return: The onefs_cpu_multiplier of this SmbSettingsGlobalSettings.
:rtype: int
"""
return self._onefs_cpu_multiplier
@onefs_cpu_multiplier.setter
def onefs_cpu_multiplier(self, onefs_cpu_multiplier):
"""
Sets the onefs_cpu_multiplier of this SmbSettingsGlobalSettings.
Specify the number of OneFS driver worker threads per CPU.
:param onefs_cpu_multiplier: The onefs_cpu_multiplier of this SmbSettingsGlobalSettings.
:type: int
"""
if onefs_cpu_multiplier is not None and onefs_cpu_multiplier > 4.0:
raise ValueError("Invalid value for `onefs_cpu_multiplier`, must be a value less than or equal to `4.0`")
if onefs_cpu_multiplier is not None and onefs_cpu_multiplier < 1.0:
raise ValueError("Invalid value for `onefs_cpu_multiplier`, must be a value greater than or equal to `1.0`")
self._onefs_cpu_multiplier = onefs_cpu_multiplier
@property
def onefs_num_workers(self):
"""
Gets the onefs_num_workers of this SmbSettingsGlobalSettings.
Set the maximum number of OneFS driver worker threads.
:return: The onefs_num_workers of this SmbSettingsGlobalSettings.
:rtype: int
"""
return self._onefs_num_workers
@onefs_num_workers.setter
def onefs_num_workers(self, onefs_num_workers):
"""
Sets the onefs_num_workers of this SmbSettingsGlobalSettings.
Set the maximum number of OneFS driver worker threads.
:param onefs_num_workers: The onefs_num_workers of this SmbSettingsGlobalSettings.
:type: int
"""
if onefs_num_workers is not None and onefs_num_workers > 1024.0:
raise ValueError("Invalid value for `onefs_num_workers`, must be a value less than or equal to `1024.0`")
if onefs_num_workers is not None and onefs_num_workers < 0.0:
raise ValueError("Invalid value for `onefs_num_workers`, must be a value greater than or equal to `0.0`")
self._onefs_num_workers = onefs_num_workers
@property
def require_security_signatures(self):
"""
Gets the require_security_signatures of this SmbSettingsGlobalSettings.
Indicates whether the server requires signed SMB packets.
:return: The require_security_signatures of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._require_security_signatures
@require_security_signatures.setter
def require_security_signatures(self, require_security_signatures):
"""
Sets the require_security_signatures of this SmbSettingsGlobalSettings.
Indicates whether the server requires signed SMB packets.
:param require_security_signatures: The require_security_signatures of this SmbSettingsGlobalSettings.
:type: bool
"""
self._require_security_signatures = require_security_signatures
@property
def server_string(self):
"""
Gets the server_string of this SmbSettingsGlobalSettings.
Provides a description of the server.
:return: The server_string of this SmbSettingsGlobalSettings.
:rtype: str
"""
return self._server_string
@server_string.setter
def server_string(self, server_string):
"""
Sets the server_string of this SmbSettingsGlobalSettings.
Provides a description of the server.
:param server_string: The server_string of this SmbSettingsGlobalSettings.
:type: str
"""
self._server_string = server_string
@property
def service(self):
"""
Gets the service of this SmbSettingsGlobalSettings.
Specify whether service is enabled.
:return: The service of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._service
@service.setter
def service(self, service):
"""
Sets the service of this SmbSettingsGlobalSettings.
Specify whether service is enabled.
:param service: The service of this SmbSettingsGlobalSettings.
:type: bool
"""
self._service = service
@property
def srv_cpu_multiplier(self):
"""
Gets the srv_cpu_multiplier of this SmbSettingsGlobalSettings.
Specify the number of SRV service worker threads per CPU.
:return: The srv_cpu_multiplier of this SmbSettingsGlobalSettings.
:rtype: int
"""
return self._srv_cpu_multiplier
@srv_cpu_multiplier.setter
def srv_cpu_multiplier(self, srv_cpu_multiplier):
"""
Sets the srv_cpu_multiplier of this SmbSettingsGlobalSettings.
Specify the number of SRV service worker threads per CPU.
:param srv_cpu_multiplier: The srv_cpu_multiplier of this SmbSettingsGlobalSettings.
:type: int
"""
if srv_cpu_multiplier is not None and srv_cpu_multiplier > 8.0:
raise ValueError("Invalid value for `srv_cpu_multiplier`, must be a value less than or equal to `8.0`")
if srv_cpu_multiplier is not None and srv_cpu_multiplier < 1.0:
raise ValueError("Invalid value for `srv_cpu_multiplier`, must be a value greater than or equal to `1.0`")
self._srv_cpu_multiplier = srv_cpu_multiplier
@property
def srv_num_workers(self):
"""
Gets the srv_num_workers of this SmbSettingsGlobalSettings.
Set the maximum number of SRV service worker threads.
:return: The srv_num_workers of this SmbSettingsGlobalSettings.
:rtype: int
"""
return self._srv_num_workers
@srv_num_workers.setter
def srv_num_workers(self, srv_num_workers):
"""
Sets the srv_num_workers of this SmbSettingsGlobalSettings.
Set the maximum number of SRV service worker threads.
:param srv_num_workers: The srv_num_workers of this SmbSettingsGlobalSettings.
:type: int
"""
if srv_num_workers is not None and srv_num_workers > 1024.0:
raise ValueError("Invalid value for `srv_num_workers`, must be a value less than or equal to `1024.0`")
if srv_num_workers is not None and srv_num_workers < 0.0:
raise ValueError("Invalid value for `srv_num_workers`, must be a value greater than or equal to `0.0`")
self._srv_num_workers = srv_num_workers
@property
def support_multichannel(self):
"""
Gets the support_multichannel of this SmbSettingsGlobalSettings.
Support multichannel.
:return: The support_multichannel of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._support_multichannel
@support_multichannel.setter
def support_multichannel(self, support_multichannel):
"""
Sets the support_multichannel of this SmbSettingsGlobalSettings.
Support multichannel.
:param support_multichannel: The support_multichannel of this SmbSettingsGlobalSettings.
:type: bool
"""
self._support_multichannel = support_multichannel
@property
def support_netbios(self):
"""
Gets the support_netbios of this SmbSettingsGlobalSettings.
Support NetBIOS.
:return: The support_netbios of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._support_netbios
@support_netbios.setter
def support_netbios(self, support_netbios):
"""
Sets the support_netbios of this SmbSettingsGlobalSettings.
Support NetBIOS.
:param support_netbios: The support_netbios of this SmbSettingsGlobalSettings.
:type: bool
"""
self._support_netbios = support_netbios
@property
def support_smb2(self):
"""
Gets the support_smb2 of this SmbSettingsGlobalSettings.
Support the SMB2 protocol on the server.
:return: The support_smb2 of this SmbSettingsGlobalSettings.
:rtype: bool
"""
return self._support_smb2
@support_smb2.setter
def support_smb2(self, support_smb2):
"""
Sets the support_smb2 of this SmbSettingsGlobalSettings.
Support the SMB2 protocol on the server.
:param support_smb2: The support_smb2 of this SmbSettingsGlobalSettings.
:type: bool
"""
self._support_smb2 = support_smb2
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from flask import Flask # => wsgi itself
app = Flask(__name__)
@app.route('/')
def hello():
return 'merhaba zalım dunya'
pass |
#__date__ = 6/14/18
#__time__ = 4:09 PM
#__author__ = isminilourentzou
import lib
import torch
import os
import logging
logger = logging.getLogger("model")
def build_wordrepr(opt, vocabs):
if opt.load_from is not None:
checkpoint = get_checkpoint(opt.load_from, opt)
model = lib.model.WordRepr(checkpoint['opt'], vocabs)
model.load_state_dict(checkpoint['model_state_dict'])
else:
model = lib.model.WordRepr(opt, vocabs)
if opt.cuda: model.cuda() # GPU.
return model
def build_active_learner(opt, wordrepr, network='dqn'):
if(network=='cluster'):
policy = lib.model.Cluster(opt, wordrepr)
elif(network=='dqn'):
policy = lib.model.DQN(opt, wordrepr)
elif(network=='imitation'):
policy = lib.model.Policy(opt, wordrepr)
elif(network=='a2c'):
policy = lib.model.ActorCritic(opt, wordrepr)
else:
raise NotImplementedError('Choices for AL network are: dqn, policy, a2c')
optim = _create_optim(policy, opt)
if opt.cuda: policy.cuda() # GPU.
return policy, optim
def create_active_learner(opt, wordrepr, network='dqn', load_from=None):
if load_from is not None:
checkpoint = get_checkpoint(os.path.join(opt.save_dir, load_from), opt)
model, optim = build_active_learner(checkpoint['opt'], wordrepr, network)
load_checkpoint(checkpoint, model, optim, opt)
else:
model, optim = build_active_learner(opt, wordrepr, network)
finish_creation(opt, model)
return model, optim
def build_model(opt, wordrepr, ema=False):
model = lib.model.Model(opt, wordrepr)
optim = _create_optim(model, opt)
if opt.cuda: model.cuda() # GPU.
if ema:
for p in model.parameters():
p.requires_grad = False
return model, optim
def create_model(opt, wordrepr, ema=False):
if(opt.plainCRF):
if opt.load_from:
model = lib.model.CRFTagger(opt, wordrepr, model_file=opt.load_from)
model.tagger.open(opt.load_from)
else:
model_file = os.path.join(opt.save_dir, opt.lang +'_'+'CRFplain.pt')
model = lib.model.CRFTagger(opt, wordrepr, model_file)
optim = None
else:
if opt.load_from is not None:
checkpoint = get_checkpoint(opt.load_from, opt)
model, optim = build_model(checkpoint['opt'], wordrepr, ema)
load_checkpoint(checkpoint, model, optim, opt)
else:
model, optim = build_model(opt, wordrepr, ema)
finish_creation(opt, model)
return model, optim
def _create_optim(model, opt):
trained_params = filter(lambda p: p.requires_grad, model.parameters())
optim = lib.train.Optim(
trained_params, opt.optim, opt.lr, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_after=opt.start_decay_after
)
return optim
def get_checkpoint(load_from, opt):
logger.info('Loading model from checkpoint at {}'.format(load_from))
if opt.cuda:
location = lambda storage, loc: storage.cuda(opt.gpu)
else:
location = lambda storage, loc: storage
checkpoint = torch.load(load_from, map_location=location)
checkpoint['opt'].cuda = opt.cuda
return checkpoint
def load_checkpoint(checkpoint, model, optim, opt):
model.load_state_dict(checkpoint['model_state_dict'])
optim.load_state_dict(checkpoint['optim_state_dict'])
def finish_creation(opt, model):
if opt.cuda: model.cuda() # GPU.
nParams = sum([p.nelement() for p in model.parameters()])
logger.info('* number of parameters: %d' % nParams)
logger.info(model)
|
from typing import List
class BrowserHistory:
def __init__(self, homepage: str):
self.stack: List[str] = [homepage]
self.pointer: int = 0
def visit(self, url: str) -> None:
if self.pointer < len(self.stack) - 1:
self.stack[self.pointer + 1] = url
del self.stack[self.pointer + 2 :]
else:
self.stack.append(url)
self.pointer += 1
def back(self, steps: int) -> str:
back_pointer = max(self.pointer - steps, 0)
self.pointer = back_pointer
return self.stack[back_pointer]
def forward(self, steps: int) -> str:
forward_pointer = min(self.pointer + steps, len(self.stack) - 1)
self.pointer = forward_pointer
return self.stack[forward_pointer]
b = BrowserHistory("leetcode.com")
b.visit("google.com")
b.visit("facebook.com")
b.visit("youtube.com")
assert b.back(1) == "facebook.com"
assert b.back(1) == "google.com"
assert b.forward(1) == "facebook.com"
b.visit("linkedin.com")
assert b.forward(2) == "linkedin.com"
assert b.back(2) == "google.com"
assert b.back(7) == "leetcode.com"
|
import pymssql as ms
conn = ms.connect(server='localhost', user='bitcamp', password='1234', database='BTDB')
cursor = conn.cursor()
cursor.execute('SELECT TOP(1000) * FROM train;')
row = cursor.fetchone()
print(type(row)) ## tuple
while row:
# print("첫컬럼=%s, 둘컬럼=%s" %(row[0], row[1]))
print(row)
row = cursor.fetchone()
conn.close() |
"""
厦一代表队
"""
from os import lstat
import grpc
import contest_pb2
import contest_pb2_grpc
import question_pb2
import question_pb2_grpc
import pickle
import numpy as np
import pandas as pd
import time
import random
import threading
class Client:
# --- class attribute ---
ID = 121 # your ID
PIN = 's5eouCB3X1' # your PIN
CHANNEL_LOGIN_SUBMIT = grpc.insecure_channel('47.100.97.93:40723')
CHANNEL_GETDATA = grpc.insecure_channel('47.100.97.93:40722')
stub_contest = contest_pb2_grpc.ContestStub(CHANNEL_LOGIN_SUBMIT)
stub_question = question_pb2_grpc.QuestionStub(CHANNEL_GETDATA)
def __init__(self):
# login
self.session_key = None # 用于提交position
self.login_success = None # 是否成功login
# get data
self.sequence = None # 数据index
self.has_next_question = None # 后续是否有数据
self.capital = None # 总资产
self.dailystk = None # 数据!共500支股票
self.positons = None # 当前持仓
# output
self.is_initialized = False
self.loaded_model = pickle.load(open('Strategy/Model/MLP_model_2.sav', 'rb')) # 使用的模型
self.pos_frame = pd.DataFrame(np.zeros([10, 500]))
self.leverage = 1.5 # 杠杆率
self.holding = 10 # 持有周期
self.numnewpos = 25 # 每轮分别新做多和做空股数
# submit
self.accepted = None
def XOX(self, s, p):
'''
将绝对量数据转化为增长率 ( e.g. [1,2,3] -> [2,1.5,NA] )
s : array
p : look-back period
'''
return np.append((s[p:] - s[:-p])/s[:-p], np.repeat(np.nan, p))
def lagging(self, s, l):
'''
向前平移时间序列 ( e.g. [1,2,3] -> [2,3,NA] )
s : array
l : lagging period
'''
return np.append(s[l:], np.repeat(np.nan, l))
def login(self):
response_login = self.stub_contest.login(contest_pb2.LoginRequest(
user_id=self.ID,
user_pin=self.PIN
))
self.session_key = response_login.session_key # 用于提交position
self.login_success = response_login.success # 是否成功login
if not self.login_success:
time.sleep(0.1)
def getdata(self):
response_question = self.stub_question.get_question(question_pb2.QuestionRequest(
user_id=self.ID,
user_pin=self.PIN,
sequence=0 # 首次询问数据 0 # 注意用0有收不到数据风险
))
self.sequence = response_question.sequence # 之后的寻求数据sequence为这个sequence num + 1 # 如果-1 出错
if self.sequence == -1:
time.sleep(0.1)
self.has_next_question = response_question.has_next_question # True - 后续仍有数据
self.capital = response_question.capital # 总资产
self.dailystk = response_question.dailystk # 数据!共500支股票
self.positons = response_question.positions # 当前持仓
def output(self):
# if self.is_initialized == False:
# self.submit_pos = np.random.randint(low=-20, high=30,size=(500)) # 随机仓位
# return
# else: # 在这里编写你的策略 ...
# pass
X_pred = self.dailynew.iloc[:,8:108].values
pred = self.loaded_model.predict(X_pred)
longstock = pred.argsort()[-25:]
shortstock = pred.argsort()[:25]
newpostoday = np.zeros(500)
newpostoday[longstock] = self.capital*self.leverage/(2*self.numnewpos) \
/self.holding/self.dailynew.iloc[:,5].values[longstock]
newpostoday[shortstock] = -self.capital*self.leverage/(2*self.numnewpos) \
/self.holding/self.dailynew.iloc[:,5].values[shortstock]
self.pos_frame = self.pos_frame.append(pd.DataFrame([newpostoday]))
self.pos_frame = self.pos_frame.iloc[1:]
self.submit_pos = self.pos_frame.sum().values
return
def submit(self):
response_ansr = self.stub_contest.submit_answer(contest_pb2.AnswerRequest(
user_id=self.ID,
user_pin=self.PIN,
session_key=self.session_key, # 使用login时系统分配的key来提交
sequence=self.sequence, # 使用getdata时获得的sequence
positions=self.submit_pos # 使用output中计算的pos作为答案仓位
))
self.accepted = response_ansr.accepted # 是否打通提交通道
if not self.accepted:
print(response_ansr.reason) # 未成功原因
if response_ansr.reason == "session key not match":
self.login()
print(f'Retry logging in result: {self.login_success} ...')
print(f'New session key: {self.session_key}')
def run(self):
last_get = time.time()
self.login()
print(f'Log in result: {self.login_success} ...')
self.getdata()
print(f'Sequence now: {self.sequence} ...')
self.dailynew = pd.DataFrame(np.asarray([array.values for array in self.dailystk]))
if self.sequence != -1:
self.output()
self.submit()
print(f'Submit result: {self.accepted} ...')
try:
while True:
while time.time() - last_get < 5 + 0.1 * random.randint(0,5):
continue
last_get += 1
self.login()
print(f'Log in result: {self.login_success} ...')
self.getdata()
print(f'Sequence now: {self.sequence} ...')
self.dailynew = pd.DataFrame(np.asarray([array.values for array in self.dailystk]))
self.dailynew.to_csv('data.csv', index=False, mode='a', header=False)
if self.sequence != -1:
self.output()
self.submit()
print(f'Submit result: {self.accepted} ...')
except KeyboardInterrupt:
return
def train_func():
train_last_get = time.time()
try:
while True:
while time.time() - train_last_get < 10:
continue
train_last_get += 10
"""TODO: add training script
data = pd.read('data.csv')
new_model = sklearn.lr(data)
self.loaded_model = new_model
"""
except KeyboardInterrupt:
return
if __name__ == "__main__":
c = Client()
th = threading.Thread(target=train_func)
th.start()
c.run()
|
import os
import wget
from setuptools import setup
from setuptools.command.develop import develop as _develop
from setuptools.command.install import install as _install
with open("requirements.txt", "r") as f:
REQUIRED_PACKAGES = f.read().splitlines()
def get_config(config_dir=None):
"""Downloads config file from GitHub and saves it in config_dir."""
print("Getting the config file")
config_url = (
"https://raw.githubusercontent.com/alan-turing-institute/dodo/develop/config.yml"
)
if config_dir == None:
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
config_dir = os.path.join(this_dir, "pydodo")
wget.download(config_url, config_dir)
class develop(_develop):
"""Post-installation in develop mode"""
def run(self):
_develop.run(self)
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
config_dir = os.path.join(this_dir, "pydodo")
if not os.path.exists(os.path.join(config_dir, "config.yml")):
get_config()
class install(_install):
"""Post-installation in install mode"""
def run(self):
_install.run(self)
package_path = os.path.join(self.install_lib, "pydodo")
if not os.path.exists(os.path.join(package_path, "config.yml")):
self.execute(get_config, (package_path,))
setup(
name="PyDodo",
description="Scaffold for ATC agents to interface with the BlueBird API",
version="1.0.0",
author="Radka Jersakova and Ruairidh MacLeod",
install_requires=REQUIRED_PACKAGES,
packages=["pydodo"],
url="https://github.com/alan-turing-institute/dodo/PyDoDo",
cmdclass={"install": install, "develop": develop},
)
|
from bs4 import BeautifulSoup
import re
import urllib.request
import urllib.parse
import collections
import sys
import os
from FileDownload import *
import time
class Crawler:
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
DIR_PATH_FILES = 'Downloaded_Files'
CRAWLED_FILES = 'Crawled Urls.txt'
def __init__(self):
self.sleep_time = 1
self.urls_crawled = {}
self.exclude_content = ['mailto:', 'favicon', '.ico', '.css', '.js',
'.jpg', '.jpeg', '.png', '.gif', '#', '?',
'.pdf', '.doc', '.JPG', '.svg', ':']
self._crawl_count = 0
self._output = False
self._filename = '{0}\{1}'.format(self.DIR_PATH, self.CRAWLED_FILES)
self._max_depth = 0
@property
def nr(self):
return self._crawl_count
def crawl(self, base_url, filename=None, output=False, max_depth=5):
self._crawl_count = 0
self._max_depth = max_depth
self._output = output
base_url = base_url.strip()
if base_url[:7] == 'http://' or base_url[:8] == 'https://':
pass
else:
base_url = 'http://{}'.format(base_url)
if base_url in self.urls_crawled:
return
return self.breadth_first_search(base_url)
def breadth_first_search(self, base_url):
print(base_url)
queued_urls = collections.deque()
depth = 1
self.urls_crawled[base_url] = 1
queued_urls.append(base_url)
queued_urls.append(depth)
while len(queued_urls):
if depth > self._max_depth:
return True
base_url = queued_urls.popleft()
depth = queued_urls.popleft()
html = self.get_html_content(base_url)
raw_html = self.get_raw_html(base_url)
if not html:
continue
self._crawl_count += 1
if self._crawl_count > 1000:
return True
self.download_file_and_store_url(base_url, raw_html, depth)
urls = self.get_urls_to_crawl(base_url, html)
if self._output:
self._print_output(
self._crawl_count, depth)
depth += 1
for url in urls:
if url not in self.urls_crawled:
self.urls_crawled[url] = 1
queued_urls.append(url)
queued_urls.append(depth)
return True
def download_file_and_store_url(self, base_url, html, depth):
raw_html = BeautifulSoup(html, "html.parser")
page_body=raw_html.body
p=page_body.text
for i in page_body.text:
if ord(i)<65 or ord(i)>122:
p=p.replace(i,' ')
if ord(i)>90 and ord(i)<97:
p=p.replace(i,' ')
create_data_files(self.DIR_PATH_FILES, base_url, p.encode('utf-8'), self._crawl_count)
self._write_to_file(base_url, depth)
raw_html=''
def get_html_content(self, base_url):
html_content = None
try:
html_bytes = urllib.request.urlopen(base_url).read()
html_string = html_bytes.decode("utf-8")
html = collections.namedtuple('HTML', ['html', 'soup'])
return html(html_string, BeautifulSoup(html_string, "html.parser"))
except:
return False
def get_raw_html(self, base_url):
html_content = None
try:
html_bytes = urllib.request.urlopen(base_url).read()
html_string = html_bytes.decode("utf-8")
html = collections.namedtuple('HTML', ['html', 'soup'])
return html_string
except:
return False
def get_urls_to_crawl(self, base_url, html):
urls_unique = []
for url in html.soup.find_all('a'):
href = url.text
href = href.lower()
url = url.get('href')
check_url = 'https://en.wikipedia.org/wiki/'
check_url = urllib.parse.urljoin(base_url, url)
if url and url not in urls_unique and url != base_url and not any(word in url for word in
self.exclude_content) and 'https://en.wikipedia.org/wiki/' in check_url.lower():
urls_unique.append(check_url)
return urls_unique
def _write_to_file(self, base_url, depth):
with open(self._filename, 'a') as textfile:
output = 'Depth: {0}, Rank: {1}, URL: {2}\n'.format(depth, self._crawl_count, base_url)
textfile.write(output)
def _print_output(self, nr, depth):
print('Files Crawled: {0} , Depth: {1}'.format(nr, depth))
def main(self):
self.crawl('https://en.wikipedia.org/wiki/Sport', 'Crawled Urls.txt', output=True)
if __name__ == '__main__':
Crawler().main() |
#!/usr/bin/env python
from scapy.all import *
import sys
import argparse
class Fabric(Packet):
name = "Fabric "
fields_desc = [
BitField('packetType', 0, 3),
BitField('headerVersion', 0, 2),
BitField('packetVersion', 0, 2),
BitField('pad1', 0, 1),
BitField('fabricColor', 0, 3),
BitField('fabricQos', 0, 5),
ByteField('dstDevice', 0),
ShortField('dstPortOrGroup', 0)]
class FabricUnicast(Packet):
name = "Fabric Unicast"
fields_desc = [
BitField('routed', 0, 1),
BitField('outerRouted', 0, 1),
BitField('tunnelTerminate', 0, 1),
BitField('ingressTunnelType', 0, 5),
ShortField('nexthopIndex', 0)]
class FabricMulticast(Packet):
name = "Fabric Multicast"
fields_desc = [
BitField('routed', 0, 1),
BitField('outerRouted', 0, 1),
BitField('tunnelTerminate', 0, 1),
BitField('ingressTunnelType', 0, 5),
ShortField('ingressIfIndex', 0),
ShortField('ingressBd', 0),
ShortField('mcastGrp', 0)]
class FabricMirror(Packet):
name = "Fabric Mirror"
fields_desc = [
ShortField('rewriteIndex', 0),
BitField('egressPort', 0, 10),
BitField('egressQueue', 0, 5),
BitField('pad', 0, 1)]
class FabricPayload(Packet):
name = "Fabric Payload "
fields_desc = [ ShortField('etherType', 0x800) ]
def generate(args):
p0 = Ether(src="00:00:00:00:00:01", dst="00:00:00:00:00:02", type=0x9000) / \
Fabric(packetType=1, headerVersion=2, packetVersion=3, fabricColor=3,
fabricQos=4, dstDevice = 5, dstPortOrGroup=6) / \
FabricUnicast(routed=1, outerRouted=1, tunnelTerminate=1, ingressTunnelType=5) / \
FabricPayload(etherType=0x800) / \
IP(src="192.168.4.95", dst="224.3.29.73") / \
UDP()
p1 = Ether(src="00:00:00:00:00:01", dst="00:00:00:00:00:02", type=0x9000) / \
Fabric(packetType=2, headerVersion=2, packetVersion=3, fabricColor=3,
fabricQos=4, dstDevice = 5, dstPortOrGroup=6) / \
FabricMulticast(routed=1, outerRouted=1, tunnelTerminate=1, ingressTunnelType=5,
ingressIfIndex=0, ingressBd=2) / \
FabricPayload(etherType=0x800) / \
IP(src="192.168.4.95", dst="224.3.29.73") / \
UDP()
wrpcap('fabric.pcap', [p1, p0])
def main():
parser = argparse.ArgumentParser(description='MPLS label generator')
parser.add_argument("-i", "--interface", default='veth2', help="bind to specified interface")
args = parser.parse_args()
generate(args)
if __name__=='__main__':
main()
|
# from selenium import webdriver
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
def webscrap():
try:
# driver = webdriver.Chrome(executable_path="C:\Drivers\chromedriver")#,options=options)
# driver.get("https://www1.nseindia.com/live_market/dynaContent/live_watch/option_chain/optionKeys.jsp?symbolCode=-10003&symbol=NIFTY&symbol=NIFTY&instrument=OPTIDX&date=-&segmentLink=17&segmentLink=17")
# driver.set_window_size(100, 100)
# content=driver.page_source
symbol="NIFTY"
url="https://www1.nseindia.com/live_market/dynaContent/live_watch/option_chain/optionKeys.jsp?symbol="+symbol
print(url)
headers={"User-Agent": "Mozilla/5.0"}
page=requests.get(url,headers=headers)
soup=BeautifulSoup(page.content,'lxml')
# print(soup.title.text)
except:
print("connection error")
try:
nse_table = soup.find("table",attrs={'id':'octable'})
# nse_heading =nse_table.tbody.find_all("tr")
# print(nse_table)
calls_headers=[]
puts_headers=[]
for th,j in zip(nse_table.find_all("th"),range(26)):
if(j<=14):
calls_headers.append(th.text.replace('\n',' ').strip())
else:
puts_headers.append(th.text.replace('\n',' ').strip())
# print(calls_headers)
del calls_headers[0:4]
del puts_headers[-1]
# print(calls_headers)
# print(puts_headers)
# nse_headers=[]
# nse_headers=calls_headers+puts_headers
# print(nse_headers)
calls_data=[]
puts_data=[]
# trs=nse_table.tbody.find_all("tr")
tbody =nse_table.find_all("tr")
trs=tbody[2:]
# print(len(trs))
for tr in trs[:-1]:
c_row={}
p_row={}
tds=tr.find_all("td")
# print(tds)
for td,th in zip(tds[1:12],calls_headers):
c_row[th] = re.sub(",|\n","",td.text).strip()
calls_data.append(c_row)
for td,th in zip(tds[12:25],puts_headers):
p_row[th] = re.sub(",|\n","",td.text).strip()
puts_data.append(p_row)
# for i in range (0,len(calls_data)):
# print(calls_data[i])
# print("END OF CALLS DATA")
# for i in range (0,len(puts_data)):
# print(puts_data[i])
# driver.close()
except:
print("error")
try:
first_df = pd.DataFrame(calls_data)
sec_df = pd.DataFrame(puts_data)
final_df=pd.concat([first_df,sec_df],axis=1)
final_df.to_csv("nse_data.csv",index=False)
# first_df.to_csv("nse_data1.csv")
# sec_df.to_csv("nse_data2.csv")
print("successful")
except:
print('excel error')
# webscrap()
|
import boto3
import sys
import urllib
from urllib.request import urlopen
# region Variables
#This is github demo
#This is github demo 2
#This is github demo 3
#This is github demo branch 1
ACCESS_KEY = 'Your Access Key'
SECRET_KEY = 'Your Secret Key'
IPSetId = 'ID of the IPList that you want to push the IP List'
file_url = 'https://myip.ms/files/blacklist/csf/latest_blacklist.txt'
ChangeToken = ''
DataFromWAF = set()
DataFromMyIP = set()
DataFromLocalFile = set()
DataDiff = set()
DictIPs = {}
arrIP_KVP = []
wafRegionalClient = boto3.client('waf-regional',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
region_name='region ex : us-west-2')
# endregion
def _FetchDataFrommyipms():
retval = set()
txt = urllib.request.urlopen(file_url)
for line in txt:
line = line.decode('utf-8')
# line = line.remove('')
if "#" in str(line):
continue
else:
if ":" in str(line):
line = str(line).rstrip("\n\r") + '/128'
else:
line = str(line).rstrip("\n\r") + '/32'
# # log.debug(line)
retval.add(line)
return retval
def _FetchDataFromWAFIPlist():
retval = set([])
IPSetData = wafRegionalClient.get_ip_set(IPSetId=IPSetId)
print(IPSetData)
IPSetData = IPSetData['IPSet']['IPSetDescriptors']
for row in range(len(IPSetData)):
retval.add(IPSetData[row]['Value'])
return retval
# region Main
def _getChangeToken():
response = wafRegionalClient.get_change_token()
return response['ChangeToken']
def _pushToWAF():
global DataFromMyIP
global DataFromWAF
global DictIPs
global DataDiff
for values in set(DataDiff):
try:
if len(str(values)) > 3:# Random number for removing blank lines.
update_sets = []
if ":" in str(values):
DictIPs['Type'] = 'IPV6'
else:
DictIPs['Type'] = 'IPV4'
DictIPs['Value'] = values
t = {
'Action': 'INSERT',
'IPSetDescriptor': DictIPs
}
update_sets.append(t)
print(update_sets)
response = wafRegionalClient.update_ip_set(
IPSetId=IPSetId,
ChangeToken=_getChangeToken(),
Updates=update_sets)
print(response)
except Exception as e:
print("Unable to update WAF IP List. Error --> " + str(values))
def main():
global DataFromMyIP
global DataFromWAF
global DataDiff
try:
DataFromMyIP = _FetchDataFrommyipms()
print('myip.ms Data fetched')
except Exception as e:
print("Unable to fetch IP List file. Error --> " + e)
sys.exit(1)
try:
DataFromWAF = _FetchDataFromWAFIPlist()
print('WAF Data fetched')
except Exception as e:
print("Unable to fetch WAF IP List. Error --> " + e)
sys.exit(1)
DataDiff = DataFromMyIP - DataFromWAF
print("New IPs Count :" + str(len(DataDiff)))
if len(DataDiff) > 0:
_pushToWAF()
else:
print('Nothing to update.')
# endregion
if __name__ == '__main__':
main()
|
from setuptools import setup, find_packages
import vikid._version
setup(
name='vikid',
version=vikid._version.__version__,
url='https://vikiautomation.com',
author='John Shanahan',
author_email='shanahan.jrs@gmail.com',
license='Apache',
description='Viki is a command line web hook reciever and developer assisstant '
'that can execute tasks remotely or on demand.',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
install_requires=[
'flask',
'setuptools-git',
'gunicorn'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
keywords='deployment setuptools development builds automation webhooks scheduler job-runner',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
scripts=['bin/vikid'],
)
|
#!/usr/bin/python
import time
from datetime import date, datetime
import json #read json files
import glob #iterate over files in folder
import os #we need to see current working directory.
files = glob.glob(os.path.dirname(os.path.realpath(__file__)) + '/*.json')
weeks = {}
weekdays = {}
for f in files:
for item in json.load(open(f)):
# need only date, no time
com_date = item['commit']['committer']['date'].split('T', 1)
com_date = com_date[0].split('-')
year = int(com_date[0])
month = int(com_date[1])
day = int(com_date[2])
com_date = date(year, month, day).isocalendar() # this will give you the year week day
# week numbers
try:
weeks[com_date[1]] = weeks[com_date[1]] + 1 #filling weeks object.
#
except KeyError:
weeks[com_date[1]] = 1
# weekdays
try:
weekdays[com_date[2]] = weekdays[com_date[2]] + 1
except KeyError:
weekdays[com_date[2]] = 1
most_productive_week = max(weeks, key=weeks.get) #key gets the value rather than the key
most_productive_weekday = max(weekdays, key=weekdays.get)
print
print('Most productive week in 2014: ' + str(most_productive_week) + ' had ' + str(weeks[most_productive_week]) + ' commits.')
print('Most productive weekday in 2014: ' + str(most_productive_weekday) + ' with ' + str(weekdays[most_productive_weekday]) + ' commits.') |
import serial
import time
s = None
def setup():
global s
s = serial.Serial("/dev/ttyS0", 57600)
def loop():
s.write("1")
time.sleep(1)
s.write("0")
time.sleep(1)
if __name__ == '__main__':
setup()
while True:
loop()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 18:53:29 2019
@author: nico
"""
import sys
sys.path.append('/home/nico/Documentos/facultad/6to_nivel/pds/git/pdstestbench')
import os
import matplotlib.pyplot as plt
import numpy as np
#import seaborn as sns
from pdsmodulos.signals import spectral_estimation as sp
import pandas as pd
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
# Simular para los siguientes tamaños de señal
N = 1000
fs = 1000 # Hz
df = fs/N
Nexp = 200
mu = 0 # media (mu)
var = 2 # varianza
K = np.array([2, 5, 10, 20, 50], dtype=np.int)
#%% generación de señales
signal = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N) for j in range(Nexp)]))
#%% Bartlett
Sbar0 = np.vstack(np.transpose([sp.barlett(signal[:,ii], K=K[0], ax=0) for ii in range(Nexp)]))
Sbar1 = np.vstack(np.transpose([sp.barlett(signal[:,ii], K=K[1], ax=0) for ii in range(Nexp)]))
Sbar2 = np.vstack(np.transpose([sp.barlett(signal[:,ii], K=K[2], ax=0) for ii in range(Nexp)]))
Sbar3 = np.vstack(np.transpose([sp.barlett(signal[:,ii], K=K[3], ax=0) for ii in range(Nexp)]))
Sbar4 = np.vstack(np.transpose([sp.barlett(signal[:,ii], K=K[4], ax=0) for ii in range(Nexp)]))
#%% Cálculo de la energía
energia0 = np.sum(Sbar0, axis=0) / (N/K[0])
energia1 = np.sum(Sbar1, axis=0) / (N/K[1])
energia2 = np.sum(Sbar2, axis=0) / (N/K[2])
energia3 = np.sum(Sbar3, axis=0) / (N/K[3])
energia4 = np.sum(Sbar4, axis=0) / (N/K[4])
#%% Valor medio muestreal
valor_medio_muestreal0 = np.mean(Sbar0, axis=1)
valor_medio_muestreal1 = np.mean(Sbar1, axis=1)
valor_medio_muestreal2 = np.mean(Sbar2, axis=1)
valor_medio_muestreal3 = np.mean(Sbar3, axis=1)
valor_medio_muestreal4 = np.mean(Sbar4, axis=1)
#%% valor medio
valor_medio0 = np.mean(valor_medio_muestreal0, axis=0)
valor_medio1 = np.mean(valor_medio_muestreal1, axis=0)
valor_medio2 = np.mean(valor_medio_muestreal2, axis=0)
valor_medio3 = np.mean(valor_medio_muestreal3, axis=0)
valor_medio4 = np.mean(valor_medio_muestreal4, axis=0)
#%% sesgo
sesgo0 = np.abs(valor_medio0 - var)
sesgo1 = np.abs(valor_medio1 - var)
sesgo2 = np.abs(valor_medio2 - var)
sesgo3 = np.abs(valor_medio3 - var)
sesgo4 = np.abs(valor_medio4 - var)
#%% valor muestreal
var_muestreal0 = np.var(Sbar0, axis=1)
var_muestreal1 = np.var(Sbar1, axis=1)
var_muestreal2 = np.var(Sbar2, axis=1)
var_muestreal3 = np.var(Sbar3, axis=1)
var_muestreal4 = np.var(Sbar4, axis=1)
#%% Varianza
varianza0 = np.mean(var_muestreal0, axis=0)
varianza1 = np.mean(var_muestreal1, axis=0)
varianza2 = np.mean(var_muestreal2, axis=0)
varianza3 = np.mean(var_muestreal3, axis=0)
varianza4 = np.mean(var_muestreal4, axis=0)
#%% Grafico
A = ["2", "5", "10", "20", "50"]
## ejes de tiempo
tt = np.linspace(0, (N-1)/fs, N)
freq0 = np.linspace(0, (N-1)*df, int(N/K[0])) / fs
freq1 = np.linspace(0, (N-1)*df, int(N/K[1])) / fs
freq2 = np.linspace(0, (N-1)*df, int(N/K[2])) / fs
freq3 = np.linspace(0, (N-1)*df, int(N/K[3])) / fs
freq4 = np.linspace(0, (N-1)*df, int(N/K[4])) / fs
#%% Grafico de los resultados de K=2
plt.figure("Periodogramas de ruido blanco con K= " + A[0], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador de Bartlett con K= " + A[0])
plt.plot(freq0, Sbar0, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con K= " + A[0], constrained_layout=True)
plt.title(" Promedio de Bartlett con K= " + A[0])
plt.plot(freq0, valor_medio_muestreal0, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal0)-0.01, max(valor_medio_muestreal0)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de K=5
plt.figure("Periodogramas de ruido blanco con K= " + A[1], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador de Bartlett con K= " + A[1])
plt.plot(freq1, Sbar1, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con K= " + A[1], constrained_layout=True)
plt.title(" Promedio de Bartlett con K= " + A[1])
plt.plot(freq1, valor_medio_muestreal1, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal1)-0.01, max(valor_medio_muestreal1)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de K=10
plt.figure("Periodogramas de ruido blanco con K= " + A[2], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador de Bartlett con K= " + A[2])
plt.plot(freq2, Sbar2, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
plt.title(" Promedio de Bartlett con K= " + A[2])
plt.plot(freq2, valor_medio_muestreal2, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal2)-0.01, max(valor_medio_muestreal2)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de K=20
plt.figure("Periodogramas de ruido blanco con K= " + A[3], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador de Bartlett con K= " + A[3])
plt.plot(freq3, Sbar3, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con K= " + A[3], constrained_layout=True)
plt.title(" Promedio de Bartlett con K= " + A[3])
plt.plot(freq3, valor_medio_muestreal3, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal3)-0.01, max(valor_medio_muestreal3)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de K=50
plt.figure("Periodogramas de ruido blanco con K= " + A[4], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador de Bartlett con K= " + A[4])
plt.plot(freq4, Sbar4, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con K= " + A[4], constrained_layout=True)
plt.title(" Promedio de Bartlett con K= " + A[4])
plt.plot(freq4, valor_medio_muestreal4, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal4)-0.01, max(valor_medio_muestreal4)+0.01)
plt.grid()
plt.tight_layout()
#%% Gráfico de la varianza
varianza = [varianza0, varianza1, varianza2, varianza3, varianza4]
sesgo =[sesgo0, sesgo1, sesgo2, sesgo3, sesgo4]
plt.figure("Consistencia del estimador", constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Sesgo")
plt.plot(K, sesgo, marker='.')
plt.xlabel('número de ventanas K')
plt.ylabel("Sesgo")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(sesgo)-0.01, max(sesgo)+0.01)
plt.grid()
plt.subplot(1,2,2)
plt.title("Varianza ")
plt.plot(K, varianza, marker='.')
plt.xlabel('número de ventanas K')
plt.ylabel("Varianza")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(varianza)-0.01, max(varianza)+0.01)
plt.grid()
plt.tight_layout()
#%% tbla de resultados
tus_resultados_bartlett = [
[sesgo0, varianza0], # <-- acá debería haber numeritos :)
[sesgo1, varianza1], # <-- acá debería haber numeritos :)
[sesgo2, varianza2], # <-- acá debería haber numeritos :)
[sesgo3, varianza3], # <-- acá debería haber numeritos :)
[sesgo4, varianza4], # <-- acá debería haber numeritos :)
]
df = pd.DataFrame(tus_resultados_bartlett, columns=['$s_B$', '$v_B$'],
index=K)
print(df)
|
from typing import Union, List
from graph_db.engine.label import Label
from graph_db.engine.types import INVALID_ID
from .property import Property
from .node import Node
class Relationship:
""" Relationship between two nodes in a Graph. """
def __init__(self,
label: Label,
start_node: Node,
end_node: Node,
id: int = INVALID_ID,
start_prev_rel: 'Relationship' = None,
start_next_rel: 'Relationship' = None,
end_prev_rel: 'Relationship' = None,
end_next_rel: 'Relationship' = None,
properties: List[Property] = list(),
used: bool = True):
self._id = id
self._label = label
self._start_node = start_node
self._end_node = end_node
self._start_prev_rel = start_prev_rel
self._start_next_rel = start_next_rel
self._end_prev_rel = end_prev_rel
self._end_next_rel = end_next_rel
self._properties = properties
self._used = used
self._init_properties()
self._init_dependencies()
def _init_properties(self):
for i in range(len(self._properties) - 1):
self._properties[i].set_next_property(self._properties[i + 1])
def _init_dependencies(self):
start_node = self._start_node
end_node = self._end_node
start_prev_rel = start_node.get_last_relationship()
if start_prev_rel:
assert start_node == start_prev_rel.get_start_node() \
or start_node == start_prev_rel.get_end_node()
if start_node == start_prev_rel.get_start_node():
start_prev_rel.set_start_next_rel(self)
else:
start_prev_rel.set_end_next_rel(self)
self.set_start_prev_rel(start_prev_rel)
start_node.add_relationship(self)
end_prev_rel = end_node.get_last_relationship()
if end_prev_rel:
assert end_node == end_prev_rel.get_start_node() \
or end_node == end_prev_rel.get_end_node()
if end_node == end_prev_rel.get_start_node():
end_prev_rel.set_start_next_rel(self)
else:
end_prev_rel.set_end_next_rel(self)
self.set_end_prev_rel(end_prev_rel)
end_node.add_relationship(self)
def set_id(self, id: int):
self._id = id
def get_id(self) -> int:
return self._id
def get_start_node(self) -> Node:
return self._start_node
def get_end_node(self) -> Node:
return self._end_node
def get_label(self) -> Label:
return self._label
def set_label(self, label: Label):
self._label = label
def add_property(self, prop: Property):
if self._properties:
self.get_last_property().set_next_property(prop)
self._properties.append(prop)
def get_properties(self) -> List[Property]:
return self._properties
def get_first_property(self) -> Union[Property, None]:
return self._properties[0] if self._properties else None
def get_last_property(self) -> Union[Property, None]:
return self._properties[-1] if self._properties else None
def set_start_prev_rel(self, start_prev_rel):
self._start_prev_rel = start_prev_rel
def get_start_prev_rel(self):
return self._start_prev_rel
def set_start_next_rel(self, start_next_rel):
self._start_next_rel = start_next_rel
def get_start_next_rel(self):
return self._start_next_rel
def set_end_prev_rel(self, end_prev_rel):
self._end_prev_rel = end_prev_rel
def get_end_prev_rel(self):
return self._end_prev_rel
def set_end_next_rel(self, end_next_rel):
self._end_next_rel = end_next_rel
def get_end_next_rel(self):
return self._end_next_rel
def set_used(self, used: bool):
self._used = used
def is_used(self) -> bool:
return self._used
def remove_dependencies(self):
start_node = self._start_node
end_node = self._end_node
start_prev_rel = self.get_start_prev_rel()
start_next_rel = self.get_start_next_rel()
end_prev_rel = self.get_end_prev_rel()
end_next_rel = self.get_end_next_rel()
if start_prev_rel:
if start_node == start_prev_rel.get_start_node():
start_prev_rel.set_start_next_rel(start_next_rel)
else:
start_prev_rel.set_end_next_rel(start_next_rel)
if start_next_rel:
if start_node == start_next_rel.get_start_node():
start_next_rel.set_start_prev_rel(start_prev_rel)
else:
start_next_rel.set_end_prev_rel(start_prev_rel)
if end_prev_rel:
if end_node == end_prev_rel.get_start_node():
end_prev_rel.set_start_next_rel(end_next_rel)
else:
end_prev_rel.set_end_next_rel(end_next_rel)
if end_next_rel:
if end_node == end_next_rel.get_start_node():
end_next_rel.set_start_prev_rel(end_prev_rel)
else:
end_next_rel.set_end_prev_rel(end_prev_rel)
self.set_start_prev_rel(None)
self.set_start_next_rel(None)
self.set_end_prev_rel(None)
self.set_end_next_rel(None)
start_node.remove_relationship(self)
end_node.remove_relationship(self)
def __str__(self) -> str:
properties_str = " ".join(map(str, self._properties)) if self._properties else None
return f'Edge #{self._id} = {{' \
f'label: {self._label}, ' \
f'properties: {properties_str}, ' \
f'start_node: {self.get_start_node()}, ' \
f'end_node: {self.get_end_node()}, ' \
f'used: {self._used}' \
f'}}'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.