seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18 values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1 value | lang stringclasses 93 values | doc_type stringclasses 1 value | stars int64 0 179k ⌀ | dataset stringclasses 3 values | pt stringclasses 78 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
29965694924 | import pyowm
import telebot
owm = pyowm.OWM('6d00d1d4e704068d70191bad2673e0cc', language = "ru")
bot = telebot.TeleBot( "1031233548:AAFfUXO0e8bDuOTWaQbHQCCuA_YJwRbqQlY" )
@bot.message_handler(content_types=['text'])
def send_echo(message):
observation = owm.weather_at_place( message.text )
w = observation.get_weather()
temp = w.get_temperature('celsius')["temp"]
answer = " В городе " + message.text + " сейчас " + w.get_detailed_status() + "\n"
answer += " Температура сейчас в районе " + str(temp) + "\n\n"
if temp < 0:
answer += " Очень холодно "
elif temp < 10:
answer += " Одевайся теплее"
elif temp < 20:
answer +=" Прохладно "
else:
answer += " Можно и в шортиках "
bot.send_message(message.chat.id, answer)
bot.polling( none_stop = True) | Neynara/witherin | Bot.py | Bot.py | py | 886 | python | ru | code | 0 | github-code | 6 |
17510284873 | """
find all a,b,c,d such that sum(a3,b3,c3,d3) < 1000
"""
n=1000
ncuberoot = 1+int(pow(1000,1/3))
cache = {}
for i in range(ncuberoot):
cache[i] = i**3
for a in range(n):
for b in range(a,n):
for c in range(b,n):
for d in range(c,n):
sum = cache[a] + cache[b] + cache[c] + cache[d]
if sum < n:
print(a,b,c,d, sum)
else:
break
| soji-omiwade/cs | dsa/before_rubrik/sum_a_b_c_d_cubes_less_than_1000.py | sum_a_b_c_d_cubes_less_than_1000.py | py | 444 | python | en | code | 0 | github-code | 6 |
22360354761 | from dateutil.relativedelta import relativedelta
from odoo.tests import common
from odoo import fields
class TestContractPriceRevision(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestContractPriceRevision, cls).setUpClass()
partner = cls.env['res.partner'].create({
'name': 'Partner test',
})
product = cls.env['product.product'].create({
'name': 'Test Product',
})
cls.contract = cls.env['account.analytic.account'].create({
'name': 'Contract test',
'partner_id': partner.id,
'date_start': fields.Date.today(),
'recurring_next_date': fields.Date.to_string(
fields.date.today() + relativedelta(days=7)),
'recurring_rule_type': 'monthly',
'recurring_invoice_line_ids': [(0, 0, {
'product_id': product.id,
'quantity': 1.0,
'uom_id': product.uom_id.id,
'name': product.name,
'price_unit': 33.0,
'automatic_price': True,
}), (0, 0, {
'product_id': product.id,
'quantity': 1.0,
'uom_id': product.uom_id.id,
'name': product.name,
'price_unit': 25.0,
'automatic_price': False,
})]
})
def execute_wizard(self):
wizard = self.env['create.revision.line.wizard'].create({
'date_start': fields.Date.today(),
'date_end': fields.Date.to_string(
fields.date.today() + relativedelta(years=1)),
'variation_percent': 100.0,
})
wizard.with_context(
{'active_ids': [self.contract.id]}).action_apply()
def test_contract_price_revision_wizard(self):
self.assertEqual(len(self.contract.recurring_invoice_line_ids.ids), 2)
self.execute_wizard()
self.assertEqual(len(self.contract.recurring_invoice_line_ids.ids), 3)
lines = self.contract.mapped('recurring_invoice_line_ids').filtered(
lambda x: x.price_unit == 50.0)
self.assertEqual(len(lines), 1)
def test_contract_price_revision_invoicing(self):
self.execute_wizard()
self.contract.recurring_create_invoice()
invoices = self.env['account.invoice'].search([
('contract_id', '=', self.contract.id)])
self.assertEqual(len(invoices), 1)
lines = invoices.mapped('invoice_line_ids')
self.assertEqual(len(lines), 2)
lines = lines.filtered(lambda x: x.price_unit == 50.0)
self.assertEqual(len(lines), 1)
| detian08/bsp_addons | contract-11.0/contract_price_revision/tests/test_contract_price_revision.py | test_contract_price_revision.py | py | 2,670 | python | en | code | 1 | github-code | 6 |
23248264647 | # Usage:
# python advertising_email.py username email_text.txt csv_of_emails.csv attachment1 attachment2 ...
import smtplib
from getpass import getpass
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import sys
import csv
import time
SMTP_SERVER = "outgoing.mit.edu"
SMTP_SERVER_PORT = '465'
REPLY_TO = "physgaap@mit.edu"
email_account_name = sys.argv[1]
with open(sys.argv[2], 'r') as file:
message_content = file.read()
message_subject = "MIT Physics Graduate Application Assistance Program 2021"
message_cc = ""
message_bcc = "npaladin@mit.edu"
attachments = []
for arg in sys.argv[4:]:
payload = MIMEBase('application', 'octate-stream')
payload.set_payload(open(arg, 'rb').read())
encoders.encode_base64(payload)
payload.add_header('Content-Decomposition', 'attachment', filename=arg)
attachments.append(payload)
server = smtplib.SMTP_SSL('%s:%s' % (SMTP_SERVER, SMTP_SERVER_PORT))
server.login(email_account_name, getpass(prompt="Email Password: "))
with open(sys.argv[3],'r') as csv_file:
data = csv.reader(csv_file)
next(data)
for row in data:
uni_name = row[0]
contact_name = row[2]
custom_message_subject = message_subject.replace('{{University}}', uni_name).replace('{{university}}', uni_name).replace('{{Recipient}}', contact_name).replace('{{recipient}}', contact_name)
custom_message_content = message_content.replace('{{University}}', uni_name).replace('{{university}}', uni_name).replace('{{Recipient}}', contact_name).replace('{{recipient}}', contact_name)
message_to = row[1]
message_from = "%s@mit.edu" % email_account_name
message_to_all = message_to.split(",") + message_cc.split(",") + message_bcc.split(",")
message = MIMEMultipart()
message.attach(MIMEText(custom_message_content, 'html'))
message['From'] = message_from
message['Reply-To'] = REPLY_TO
message['To'] = message_to
message['Cc'] = message_cc
message['Subject'] = custom_message_subject
for attachment in attachments:
message.attach(attachment)
server.send_message(message, message_from, message_to_all)
time.sleep(2)
server.quit() | ngpaladi/PhysGAAP-Tools | mailer/advertising_email.py | advertising_email.py | py | 2,321 | python | en | code | 0 | github-code | 6 |
73176268028 | import pandas as pd
file_name = r"C:\Users\Sen\Desktop\Raw-Curves Files\Logan-7C_w12_TDDB_25C_Compiled Raw.txt"
File = pd.read_csv(file_name, sep ='\t', header = 0)
Columns = File.columns
Result = pd.DataFrame(columns = Columns)
Ini_key = File.iat[0,2]
criteria = 0
Low_resistance = 1000
for i in range(1, len(File.index)):
if File.iat[i,2] != Ini_key:
Ini_key = File.iat[i,2]
criteria = 0
else:
if criteria == 0 and File.iat[i,11] < Low_resistance:
Result.loc[Result.shape[0]] = File.iloc[i]
criteria = 1
else:
continue
pd.DataFrame(Result).to_csv('Low_resistance'+'.txt', index=False)
| Masoniced/Reliabity-Prediction-Model-on-Devices | FinFET Stochastic/Read_File.py | Read_File.py | py | 610 | python | en | code | 1 | github-code | 6 |
17657890511 | import time
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
r = sr.Recognizer()
voices = engine.getProperty('voices')
# to check the voices available in the system
'''for voice in voices:
print("Voice:")
print("ID: %s" %voice.id)
print("Name: %s" %voice.name)
print("Age: %s" %voice.age)
print("Gender: %s" %voice.gender)
print("Languages Known: %s" %voice.languages) '''
# female
engine.setProperty('voice', "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0")
engine.say("Hello, I am Zira.")
engine.runAndWait()
# male
engine.setProperty('voice', "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0")
engine.say("Hello,I am David.")
engine.runAndWait()
# Voice menu
while(1):
engine.say("Choose the language you want to speak in")
engine.runAndWait()
time.sleep(0.2)
print("1. English")
engine.say("1. English")
engine.runAndWait()
time.sleep(0.2)
print("2. Hindi")
engine.say("2. Hindi")
engine.runAndWait()
time.sleep(0.2)
print("3. Kannada")
engine.say("3. Kannada")
engine.runAndWait()
time.sleep(0.2)
print("4. Bengali")
engine.say("4. Bengali")
engine.runAndWait()
time.sleep(0.2)
print("5. Malayalam")
engine.say("5. Malayalam")
engine.runAndWait()
time.sleep(0.2)
print("6. Marathi")
engine.say("6. Marathi")
engine.runAndWait()
time.sleep(0.2)
print("7. Urdu")
engine.say("7. Urdu")
engine.runAndWait()
time.sleep(0.2)
print("8. Others")
engine.say("8. Others")
engine.runAndWait()
time.sleep(0.2)
n = int(input("\nEnter your choice:"))
lang = 'en'
if n == 1:
lang = 'en'
elif n == 2:
lang = 'hi-IN'
elif n == 3:
lang = 'kn-IN'
elif n == 4:
lang = 'bn-IN'
elif n == 5:
lang = 'ml-IN'
elif n == 6:
lang = 'mr-IN'
elif n == 7:
lang = 'ur'
elif n == 8:
lang = input("Enter the google language code of the language you want to see the output in: ")
elif n == 0:
exit(0)
with sr.Microphone() as source:
engine.say("Mic testing..")
engine.runAndWait()
audio = r.adjust_for_ambient_noise(source)
print("Say something")
audio = r.listen(source)
engine.say("Time is over. Thanks.")
engine.runAndWait()
try:
print("You said: ' " + r.recognize_google(audio, language=lang) + "'")
time.sleep(5)
except LookupError:
engine.say("Could not understand audio. Do you want to try again?")
engine.runAndWait()
engine.say("Do you want to continue?")
engine.runAndWait()
y = int(input("Enter 0 to quit"))
if y == 0:
exit(0)
engine.runAndWait() | prakritisharma/Voice-recognition | voice_recognition.py | voice_recognition.py | py | 2,928 | python | en | code | 0 | github-code | 6 |
2500846027 | # author: Tran Quang Loc (darkkcyan)
# editorial: https://codeforces.com/blog/entry/8166
# Note: I switched to python for this problem because I want my check function to always use integer number
# I tried to solve this problem using C++ and got overflow even with long long number
# (and really, never change it to unsigned long long because there are subtractions in the checking equation).
from collections import deque
class line:
def __init__(self, k, b):
self.k = k
self.b = b
def get(self, x):
return self.k * x + self.b
def check(l1, l2, nl):
return (nl.b - l2.b) * (l1.k - l2.k) - (nl.k - l2.k) * (l1.b - l2.b) <= 0
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
ans = 0 # we don't even need the entire dp array, because the deque store the value of all previous ones.
# this variable contains the current computed dp value
hull = deque()
hull.append(line(b[0], 0))
for i in range(1, n):
arg = a[i]
while len(hull) > 1 and hull[0].get(arg) >= hull[1].get(arg):
hull.popleft()
ans = hull[0].get(arg)
nl = line(b[i], ans)
while len(hull) > 1 and check(hull[-2], hull[-1], nl):
hull.pop()
hull.append(nl)
print(ans)
| quangloc99/CompetitiveProgramming | Codeforces/CF319-D1-C.py | CF319-D1-C.py | py | 1,270 | python | en | code | 2 | github-code | 6 |
16304395489 | import cv2
import numpy as np
import apriltag
import collections
apriltag_detect_error_thres = 0.07
def draw_pose(overlay, camera_params, tag_size, pose, z_sign=1, color=(0, 255, 0)):
opoints = np.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2 * z_sign,
1, -1, -2 * z_sign,
1, 1, -2 * z_sign,
-1, 1, -2 * z_sign,
]).reshape(-1, 1, 3) * 0.5 * tag_size
edges = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
fx, fy, cx, cy = camera_params
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
rvec, _ = cv2.Rodrigues(pose[:3, :3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
for i, j in edges:
cv2.line(overlay, ipoints[i], ipoints[j], color, 1, 16)
def draw_pose_axes(overlay, camera_params, tag_size, pose, center):
fx, fy, cx, cy = camera_params
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
rvec, _ = cv2.Rodrigues(pose[:3, :3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
opoints = np.float32([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]).reshape(-1, 3) * tag_size
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
center = np.round(center).astype(int)
center = tuple(center.ravel())
cv2.line(overlay, center, tuple(ipoints[0].ravel()), (0, 0, 255), 2)
cv2.line(overlay, center, tuple(ipoints[1].ravel()), (0, 255, 0), 2)
cv2.line(overlay, center, tuple(ipoints[2].ravel()), (255, 0, 0), 2)
def annotate_detection(overlay, detection, center):
text = str(detection.tag_id)
font = cv2.FONT_HERSHEY_SIMPLEX
tag_size_px = np.sqrt((detection.corners[1][0] - detection.corners[0][0]) ** 2 + \
(detection.corners[1][1] - detection.corners[0][1]) ** 2)
font_size = tag_size_px / 22
text_size = cv2.getTextSize(text, font, font_size, 2)[0]
tag_center = [detection.center[0], detection.center[1]]
text_x = int(tag_center[0] - text_size[0] / 2)
text_y = int(tag_center[1] + text_size[1] / 2)
cv2.putText(overlay, text, (text_x, text_y), font, font_size, (0, 255, 255), 2)
def detect_april_tag(orig, camera_params, tag_size, visualize=False, save_path=None, verbose=False):
if len(orig.shape) == 3:
gray = cv2.cvtColor(orig, cv2.COLOR_RGB2GRAY)
detector = apriltag.Detector()
detections, dimg = detector.detect(gray, return_image=True)
num_detections = len(detections)
if verbose:
print(f'Detected {num_detections} tags')
if num_detections == 0:
overlay = orig
elif len(orig.shape) == 3:
overlay = orig // 2 + dimg[:, :, None] // 2
else:
overlay = orig // 2 + dimg // 2
poses = []
for i, detection in enumerate(detections):
if verbose:
print()
print('Detection {} of {}:'.format(i + 1, num_detections))
print(detection.tostring(indent=2))
if camera_params is not None:
pose, e0, ef = detector.detection_pose(detection, camera_params, tag_size)
poses.append((detection.tag_id, pose, ef))
draw_pose(overlay, camera_params, tag_size, pose)
draw_pose_axes(overlay, camera_params, tag_size, pose, detection.center)
annotate_detection(overlay, detection, tag_size)
if verbose:
print(detection.tostring(collections.OrderedDict([('Pose', pose),
('InitError', e0), ('FinalError', ef)]), indent=2))
if visualize:
cv2.imshow('apriltag', overlay)
while cv2.waitKey(5) < 0: # Press any key to load subsequent image
continue
cv2.destroyAllWindows()
if save_path is not None:
cv2.imwrite(save_path, overlay)
return poses, overlay
if __name__ == '__main__':
imagepath = '/home/gdk/Documents/data/1652826411/827312071624/000000_color.png'
camera_params = (765.00, 764.18, 393.72, 304.66)
tag_size = 0.06
detect_april_tag(imagepath, camera_params, tag_size, visualize=True, save_path=None, verbose=True)
| dkguo/Pushing-Imitation | apriltag_detection.py | apriltag_detection.py | py | 4,540 | python | en | code | 0 | github-code | 6 |
306333387 | from fastapi import status, HTTPException, Depends, APIRouter
from database import SessionLocal
import models, schemas, utils
router = APIRouter(
prefix="/merchants",
tags=['Merchants']
)
@router.post("/", status_code=status.HTTP_201_CREATED, response_model=schemas.MerchantResponse)
def create_merchant(merchant: schemas.MerchantCreate):
# hash the password - user.password
merchant.password = utils.hash(merchant.password)
new_merchant = models.Merchant(**merchant.dict())
SessionLocal.add(new_merchant)
SessionLocal.commit()
SessionLocal.refresh(new_merchant)
return new_merchant
@router.get("/{id}", response_model=schemas.MerchantResponse)
def get_merchant(id: int):
merchant = SessionLocal.query(models.Merchant).filter(models.Merchant.id == id).first()
if not merchant:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Merchant with id:{id} was not found")
return merchant
@router.put("/{id}",response_model=schemas.MerchantResponse)
def update_merchant(id: int, updated_merchant: schemas.MerchantCreate):
merchant_query = SessionLocal.query(models.Merchant).filter(models.Merchant.id == id)
merchant = merchant_query.first()
if merchant == None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Merchant with id:{id} does not exit")
merchant_query.update(updated_merchant.dict(),synchronize_session=False)
SessionLocal.commit()
return merchant_query.first() | Roshankattel/RFID2 | rfiddemo/routers/merchants.py | merchants.py | py | 1,553 | python | en | code | 0 | github-code | 6 |
24247317201 | import tkinter
import customtkinter
import random
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11]
def draw():
global player_score
global enemy_score
win_label.configure(text=" ")
randint = random.randint(0, len(cards) - 1)
player_cards.append(cards[randint])
player_score += int(cards[randint])
if enemy_score <= 16:
randint = random.randint(0, len(cards) - 1)
enemy_cards.append(cards[randint])
enemy_score += int(cards[randint])
if player_score > 21 or enemy_score > 21:
end_game()
player_score_text = "Your score: " + str(player_score)
enemy_score_text = "Enemy score: " + str(enemy_score)
player_card_label.configure(text=player_score_text)
enemy_card_label.configure(text=enemy_score_text)
def end_game():
global player_score
global enemy_score
global player_cards
global enemy_cards
if player_score > 21:
win_label.configure(text="You Lose!")
elif player_score < enemy_score:
win_label.configure(text="You Lose!")
elif enemy_score > 21:
win_label.configure(text="You Win!")
elif player_score > enemy_score:
win_label.configure(text="You Win!")
else:
win_label.configure(text="Tie!")
player_score = 0
enemy_score = 0
player_cards = []
enemy_cards = []
if __name__ == '__main__':
player_score = 0
player_cards = []
enemy_score = 0
enemy_cards = []
root_tk = customtkinter.CTk()
root_tk.geometry("400x300")
root_tk.title("Blackjack")
customtkinter.set_appearance_mode("dark")
customtkinter.set_default_color_theme("dark-blue")
draw_button = customtkinter.CTkButton(master=root_tk, text="Draw Card", command=draw)
draw_button.place(relx=0.3, rely=0.8, anchor=tkinter.CENTER)
stand_button = customtkinter.CTkButton(master=root_tk, text="Stand", command=end_game)
stand_button.place(relx=0.7, rely=0.8, anchor=tkinter.CENTER)
player_card_label = customtkinter.CTkLabel(master=root_tk, text="Blank")
player_card_label.place(relx=0.5, rely=0.6, anchor=tkinter.CENTER)
enemy_card_label = customtkinter.CTkLabel(master=root_tk, text="Blank")
enemy_card_label.place(relx=0.5, rely=0.25, anchor=tkinter.CENTER)
win_label = customtkinter.CTkLabel(master=root_tk, text=" ")
win_label.place(relx=0.5, rely=0.35, anchor=tkinter.CENTER)
root_tk.mainloop()
| anarkitty8/gui-blackjack | blackjack_gui.py | blackjack_gui.py | py | 2,420 | python | en | code | 0 | github-code | 6 |
37149926407 | # 21. Se cuenta con una lista de películas de cada una de estas se dispone de los siguientes datos:
# nombre, valoración del público –es un valor comprendido entre 0-10–, año de estreno y recaudación.
# Desarrolle los algoritmos necesarios para realizar las siguientes tareas:
from random import randint
from lista import Lista
class Pelicula:
def __init__(self, nombre, valoracion, estreno, recaudacion):
self.nombre = nombre
self.valoracion = valoracion
self.estreno = estreno
self.recaudacion = recaudacion
def __str__(self):
return f'{self.nombre} | VL: {self.valoracion} | EST: {self.estreno} | RN: {self.recaudacion}'
peliculas = Lista()
nombres = ['Thor 1', 'Star Wars 8', 'Transformers 2', 'Iron Man 2', 'Capitán América 3', 'Star Wars 5']
for nombre in nombres:
peliculas.insertar(Pelicula(nombre, randint(0, 10), randint(2010, 2022), randint(10000*1000, 10000*1500)), 'nombre')
# a. permitir filtrar las películas por año –es decir mostrar todas las películas de un determinado año
anio = int(input('Ingrese el anio a mostrar: '))
peliculas.barrido_filtro(anio, 'estreno')
print()
# b. mostrar los datos de la película que más recaudo
mayor = peliculas.mayor_de_lista('recaudacion')
print(mayor.info)
print()
# c. indicar las películas con mayor valoración del público, puede ser más de una
peliculas.mayores_de_lista('valoracion')
print()
# d. mostrar el contenido de la lista en los siguientes criterios de orden –solo podrá utilizar una lista auxiliar:
auxiliar = Lista()
# I. por nombre,
peliculas.barrido()
print()
# II. por recaudación,
while(not peliculas.lista_vacia()):
elemento = peliculas.obtener_elemento(0)
peliculas.eliminar(elemento.nombre, 'nombre')
auxiliar.insertar(elemento, 'recaudacion')
auxiliar.barrido()
print()
# III. por año de estreno,
while(not auxiliar.lista_vacia()):
elemento = auxiliar.obtener_elemento(0)
auxiliar.eliminar(elemento.nombre, 'nombre')
peliculas.insertar(elemento, 'estreno')
peliculas.barrido()
print()
# IV. por valoración del público.
while(not peliculas.lista_vacia()):
elemento = peliculas.obtener_elemento(0)
peliculas.eliminar(elemento.nombre, 'nombre')
auxiliar.insertar(elemento, 'valoracion')
auxiliar.barrido()
| GabiC15/TPs-Algoritmos | TP4/ejercicio_21.py | ejercicio_21.py | py | 2,324 | python | es | code | 0 | github-code | 6 |
23431004467 | import numpy as np
import math
# 0 ~ 26 : blocks (color, shape, pattern)
# 27 : empty
# 28 ~ 30 : dummy blocks
def circle_dist(x, y):
return math.sqrt(x*x + y*y)
def square_dist(x, y):
return max(x, -x, y, -y)
def triangle_dist(x, y):
return max((6 * x - 3 * y + 1) / 4, (- 6 * x - 3 * y + 1) / 4, (3 * y - 1) / 2)
class BHB_renderer(object):
def __init__(self, size):
self.size = size
self.cell_w = 20
self.cell_h = 20
self.w = self.cell_w * self.size
self.h = self.cell_h * (self.size + 2)
self.low = 0
self.high = 255
self.block_templates = []
for i in range(31):
self.block_templates.append(self.block_template(i))
self.gauge_templates = [] # 0 : small, 1 : big
for i in range(2):
self.gauge_templates.append(self.gauge_template(i))
def gauge_template(self, gauge):
canvas = np.ones([self.cell_w, self.cell_h, 3])
center = [self.cell_w // 2, self.cell_h // 2]
if gauge == 0:
r = self.cell_w // 10
for x in range(center[0] - r, center[0] + r):
for y in range(center[1] - r, center[1] // 2 + r):
if circle_dist(x - center[0], y - center[1]) <= r:
canvas[x, y] = [self.low, self.low, self.low]
elif gauge == 1:
r = self.cell_w // 4
for x in range(center[0] - r, center[0] + r):
for y in range(center[1] - r, center[1] + r):
canvas[x, y] = [self.low, self.low, self.low]
return canvas
def block_template(self, block):
canvas = np.zeros([self.cell_w, self.cell_h, 3])
mw = self.cell_w // 20
mh = self.cell_h // 20
for x in range(0, self.cell_w):
for y in range(mh):
canvas[x, y] = [self.high, self.high, self.high]
canvas[x, self.cell_h - 1 - y] = [self.high, self.high, self.high]
for y in range(0, self.cell_h):
for x in range(mw):
canvas[x, y] = [self.high, self.high, self.high]
canvas[self.cell_w - 1 - x, y] = [self.high, self.high, self.high]
if block == 27:
return canvas
if block > 27:
period = 4 if block == 28 else 3 if block == 29 else 1
for x in range(mw * 2, self.cell_w - mw * 2 + 1):
for y in range(mh * 2, self.cell_h - mh * 2 + 1):
if (y - x) % period == 0:
canvas[x, y] = [self.high, self.high, self.high]
return canvas
color_ = block // 9 # R, G, B
shape = (block % 9) // 3 # circle, square, triangle
pattern = block % 3 # empty, stripe, full
color = [self.low, self.low, self.low]
color[color_] = self.high
dist_func = circle_dist if shape == 0 else square_dist if shape == 1 else triangle_dist
center = [float(self.cell_w) / 2, float(self.cell_h) / 2]
for x in range(mw * 2, self.cell_w - mw * 2 + 1):
for y in range(mh * 2, self.cell_h - mh * 2 + 1):
d = dist_func((x - center[0]) / (self.cell_w / 2 - mw * 2), (y - center[1]) / (self.cell_h / 2 - mh * 2))
if d <= 1 and ((pattern == 0 and 0.8 <= d) or
(pattern == 1 and (d <= 0.2 or (0.4 <= d and d <= 0.6) or 0.8 <= d)) or
(pattern == 2)):
canvas[x,y] = color
return canvas
def draw_cell(self, canvas, x, y, id_, is_block=True):
bw, bh = self.cell_w, self.cell_h
if(is_block):
canvas[bw * x : bw * (x + 1), bh * y : bh * (y + 1)] = self.block_templates[id_]
else:
canvas[bw * x : bw * (x + 1), bh * y : bh * (y + 1)] = self.gauge_templates[id_]
def render(self, state):
canvas = np.ones([self.w, self.h, 3])
self.draw_cell(canvas, 3, self.size + 1, state.current_block)
for x in range(0, self.size):
for y in range(0, self.size):
self.draw_cell(canvas, x, y + 1, state.blocks[x][y])
for x in range(0, self.size):
if x > self.size - 1 - state.gauge:
self.draw_cell(canvas, x, 0, 0, False)
else:
self.draw_cell(canvas, x, 0, 1, False)
return canvas
| yskim5892/gym_BHB | gym_BHB/envs/BHB_renderer.py | BHB_renderer.py | py | 4,404 | python | en | code | 0 | github-code | 6 |
16463309857 | # web template maker
import os
# templates
HTML_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
<link rel="stylesheet" href="css/style.css">
</head>
<body>
<!--HEADER-->
<header>
</header>
<!--MAIN-->
<main>
</main>
<!--FOOTER-->
<footer>
</footer>
</body>
</html>
"""
SCSS_TEMPLATE = """
$primary: #000;
$secondary: #000;
$shadow: rgba(0,0,0,0.5);
$white: #fff;
$gray: #888;
* {
margin: 0;
padding: 0;
}
body {
}
header {
}
main {
}
footer {
}
"""
def scssWatcher(root):
cmd = "sass --watch {0}/scss/index.scss {0}/css/index.css".format(root)
print("[issuing command] " + cmd + "\nLeave the terminal open ...")
os.system(cmd)
def writeFile(ext, root):
root = root[2:]
print(root)
if ext == "css":
fileName = root + "/" + ext + "/index." + ext
os.system("touch " + fileName)
print(fileName + " created.")
return
elif ext == "html":
fileName = root + "/index." + ext
fileTemplate = HTML_TEMPLATE
elif ext == "scss":
fileName = root + "/" + ext + "/index." + ext
fileTemplate = SCSS_TEMPLATE
with open(fileName, "w") as f:
f.write(fileTemplate)
print(fileName + " template generated.")
def createTemplate(dirName):
root = "~/documents/github/{}".format(dirName)
scss = "/scss"
css = "/css"
htmlExt = "html"
scssExt = "scss"
cssExt = "css"
print("Creating directories ...")
os.system("mkdir " + root)
print(root)
os.system("mkdir " + root + scss)
print(root + scss)
os.system("mkdir " + root + css)
print(root + css)
print("\nDirectories created.\nGenerating file templates ...")
writeFile(htmlExt, root)
writeFile(scssExt, root)
writeFile(cssExt, root)
print("\nTemplate generated ...\n")
scssWatcher(root)
def main():
while True:
dirName = input("Enter root directory name...\n")
if os.path.isfile("~/documents/github/" + dirName):
print("Directory already exists ...")
continue
break
createTemplate(dirName)
if __name__ == "__main__":
main() | jtriley-eth/python-scripts | scripts/web-template-generator.py | web-template-generator.py | py | 2,324 | python | en | code | 0 | github-code | 6 |
27390337341 | # Catalan Numbers and RNA Secondary Structures
# http://rosalind.info/problems/cat/
from collections import defaultdict
from utilities import get_file, read_FASTA, get_answer_file
# Noncrosing Perfect Maching
def catalan(strand):
if (strand not in cache):
if strand.count('C') != strand.count('G') or strand.count('A') != strand.count('U'):
cache[strand] = 0
else:
cache[strand] = sum([catalan(strand[1:i]) * cache[strand[0]+strand[i]] * catalan(strand[i+1:]) for i in range(1, len(strand), 2)])
return cache[strand]
with get_file() as file:
_, strand_array = read_FASTA(file)
with get_answer_file() as file:
cache = {'':1, 'A':0, 'C':0, 'G':0, 'U':0, 'AA':0, 'AC':0, 'AG':0, 'AU':1, 'CA':0, 'CC':0, 'CG':1, 'CU':0,
'GA':0, 'GC':1, 'GG':0, 'GU':0, 'UA':1, 'UC':0, 'UG':0, 'UU':0}
NPM_num = catalan(strand_array[0])
print(NPM_num % 1000000, file=file)
| Delta-Life/Bioinformatics | Rosalind/Bioinformatics Stronghold/code/CAT.py | CAT.py | py | 932 | python | en | code | 0 | github-code | 6 |
35136983267 |
import Forge.core.Process
import Anvil.core
import WindowExecute
class WindowOpen( WindowExecute.WindowExecute ):
def __init__( self, title=None, iconPath=None, size=[ 400, 100 ], entity=None, cmd=None, arg={}, ui=None ):
if not title:
title = 'Open entity : %i' %( entity['entityId'] )
self.init( title=title, iconPath=iconPath, size=size )
self._cmd = cmd
# defind class
Abutton = Anvil.core.Button
# layout init
self.layout_main = Anvil.core.Layout( parent=self.window )
# customUI init
self._buildCustom( arg=arg )
# buttons init
button_open = Abutton( name='Open', cmd=Forge.core.Process.partial( self.execute, ui ), w=size[0]/2 - 15, h=25 )
button_abort = Abutton( name='Abort', cmd=self.window.close, w=size[0]/2 - 15, h=25 )
# defind layouts content
self.layout_main.add( [ button_open, button_abort ] )
| Black-Cog/Hammer | ui/WindowOpen.py | WindowOpen.py | py | 855 | python | en | code | 0 | github-code | 6 |
3373850562 |
import random
def get_lottery_numbers(amount = 5):
lottery_numbers = []
for i in range(amount):
lottery_numbers.append(random.randint(0, 100))
return lottery_numbers
def get_user_entry(amount = 5):
user_numbers = []
for x in range(amount):
while len(user_numbers) < amount:
number = int(input("Enter a number between 0 and 100: "))
if (int(number) and (number >= 0 and number <= 100)):
user_numbers.append(number)
else:
print("Please enter a valid number between 0 and 100")
return user_numbers
# Compares the numbers in the order they appear,
# relative to the actual generated numbers. If the
# flag is set to false, then the order of the user's
# chosen numbers does not matter
def compare_numbers(user_numbers, lottery_numbers, ordered = True):
matches = 0
if ordered:
for i in range(len(user_numbers)):
if lottery_numbers[i] == user_numbers[i]:
matches += 1
else:
for i in range(len(user_numbers)):
if user_numbers[i] in lottery_numbers:
matches += 1
return matches
def main():
lottery_numbers = get_lottery_numbers()
user_numbers = get_user_entry()
# Matching based on the order
matches = compare_numbers(lottery_numbers, user_numbers)
if matches == len(lottery_numbers):
print("You won the lottery!")
else:
print("You matched %d out of the %d numbers. Better luck next time!" \
% (matches, len(lottery_numbers)))
# Matching based on the user's selected numbers
matches = compare_numbers(lottery_numbers, user_numbers, False)
if matches == len(lottery_numbers):
print("You won the lottery!")
else:
print("You matched %d out of the %d numbers! You won a small prize!" \
% (matches, len(lottery_numbers)))
if __name__ == "__main__":
main() | usman-tahir/python-snippets | python-games/lottery.py | lottery.py | py | 1,813 | python | en | code | 0 | github-code | 6 |
28359703116 | import csv
import DBN
import matplotlib.pyplot as plt
def getData(inp="../ABP_data_11traces_1min/dataset7.txt"):
f = file(inp)
lines = f.readlines()
data = (map(float,l.split(" ")[:3]) for l in lines)
# end = lines.index('\n')
# obs = lines[1:end]
# data = map(lambda x: tuple(map(float,x.split(','))),obs)
return data
def main():
data = list(getData())
bayesNet = DBN.DBN()
dataOut = []
count = 0
for each in data:
# for i in range(1000):
print("timestep: " + str(count) + " Observation: " + str(each))
# if (bayesNet.observe(each) != False):
bayesNet.observe(each)
bayesNet.elapseTime()
dataOut.append(bayesNet.getStats())
count += 1
DiaObserved = [d["dia_bp"][0] for d in dataOut]
MeanObserved = [d["mean_bp"][0] for d in dataOut]
SysObserved = [d["sys_bp"][0] for d in dataOut]
BagPressure = [d["bag_pressure"][0] for d in dataOut]
DiaObservedErr = [d["dia_bp"][1] for d in dataOut]
MeanObservedErr = [d["mean_bp"][1] for d in dataOut]
SysObservedErr = [d["sys_bp"][1] for d in dataOut]
BagPressureErr = [d["bag_pressure"][1] for d in dataOut]
DiaData = map(lambda x: x[2], data)
MeanData = map(lambda x: x[0], data)
SysData = map(lambda x: x[1], data)
l = list(range(31))
plt.plot(l,DiaData)
plt.plot(l,DiaObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(DiaObserved,DiaObservedErr)),list(x[0] + x[1] for x in zip(DiaObserved,DiaObservedErr)),interpolate=True)
plt.plot(l,MeanData)
plt.plot(l,MeanObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(MeanObserved,MeanObservedErr)),list(x[0] + x[1] for x in zip(MeanObserved,MeanObservedErr)),interpolate=True)
plt.plot(l,SysData)
plt.plot(l,SysObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(SysObserved,SysObservedErr)),list(x[0] + x[1] for x in zip(SysObserved,SysObservedErr)),interpolate=True)
# plt.plot(l,BagPressure)
# plt.fill_between(l,list(x[0] - x[1] for x in zip(BagPressure,BagPressureErr)),list(x[0] + x[1] for x in zip(BagPressure,BagPressureErr)),interpolate=True)
plt.show()
# return dataOut
if __name__ == "__main__":
main()
| romiphadte/ICU-Artifact-Detection-via-Bayesian-Inference | ABP_DBN/run.py | run.py | py | 2,090 | python | en | code | 0 | github-code | 6 |
17112178327 | '''Write a function that takes a character (i.e. a string of length 1)
and returns True if it is a vowel, False otherwise.'''
def Vowel(x):
if x=='a' or x=='e' or x=='i' or x=='o' or x=='u':
return True
else:
return False
x = input("Enter The string :")
obj = Vowel(x)
print(obj)
| abhinav319/abhinav_code | Question4.py | Question4.py | py | 319 | python | en | code | 0 | github-code | 6 |
3982866631 | import math
origin = [399809, 4881610]
end = [989876, 4291543] # 590067x590067m intermediate point -> 4586576,5
def calculate_tile(x, y, z):
"""
Calculate tile number from the coordinates passed as parameter.
Normal test
>>> calculate_tile(650000, 4400000, 9)
(217, 417)
Limit test
>>> calculate_tile(989876, 4291543, 9)
(511, 511)
Over limit test
>>> calculate_tile(990000, 4400000, 9)
('null', 'null')
"""
if x < origin[0] or x > end[0] or y < end[1] or y > origin[1]:
return ('null', 'null')
if x == end[0]:
x -= 1
if y == end[1]:
y += 1
n = 2 ** z
xtile = int(n * (x - origin[0]) / (end[0] - origin[0]))
ytile = int(n * (origin[1] - y) / (origin[1] - end[1]))
return (xtile, ytile)
def calculate_coordinates(xtile, ytile, z):
"""
Calculate the coordinates of the upper left corner of the tile passed as parameter.
Normal test
>>> calculate_coordinates(1314, 1413, 11)
(778396.9091796875, 4474498.344238281)
Over limit test
>>> calculate_coordinates(1314, 1413, 8)
'null'
"""
n = 2 ** z
if xtile >= n or ytile >= n or xtile < 0 or ytile < 0:
return ('null')
x = origin[0] + (xtile * (end[0] - origin[0]) / n)
y = origin[1] - (ytile * (origin[1] - end[1]) / n)
return (x, y)
def tile_to_south(tile, z):
"""
Transform tile number to number for south point of view.
Normal test
>>> tile_to_south((325, 785), 10)
(698, 238)
Over limit test
>>> tile_to_south((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (max_tile - tile[0], max_tile - tile[1])
def tile_to_east(tile, z):
"""
Transform tile number to number for east point of view.
Normal test
>>> tile_to_east((325, 785), 10)
(785, 698)
Over limit test
>>> tile_to_east((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (tile[1], max_tile - tile[0])
def tile_to_west(tile, z):
"""
Transform tile number to number for west point of view.
Normal test
>>> tile_to_west((325, 785), 10)
(238, 325)
Over limit test
>>> tile_to_west((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (max_tile - tile[1], tile[0])
def tile_from_south(tile, z):
"""
Transform tile number to to number for north point of view from south.
Normal test
>>> tile_from_south((133, 42),8)
(122, 213)
Over limit test
>>> tile_from_south((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (max_tile - tile[0], max_tile - tile[1])
def tile_from_east(tile, z):
"""
Transform tile number to to number for north point of view from east.
Normal test
>>> tile_from_east((133, 42),8)
(213, 133)
Over limit test
>>> tile_from_east((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (max_tile - tile[1], tile[0])
def tile_from_west(tile, z):
"""
Transform tile number to to number for north point of view from west.
Normal test
>>> tile_from_west((133, 42),8)
(42, 122)
Over limit test
>>> tile_from_west((600, 300), 9)
'null'
"""
max_tile = 2 ** z - 1
if tile[0] > max_tile or tile[1] > max_tile or tile[0] < 0 or tile[1] < 0:
return ('null')
return (tile[1], max_tile - tile[0])
| strummerTFIU/TFG-IsometricMaps | src/calculate_tile.py | calculate_tile.py | py | 3,553 | python | en | code | 0 | github-code | 6 |
15143899898 | # Created: 2022-07-06
# Link: https://open.kattis.com/problems/helpaphd
#Recommended from the guide: https://github.com/VictorieeMan/kattis-guide/blob/master/input.md
import sys
# Kattis / Machine input
input = sys.stdin.read()
# Manual input
# input = "4\n\2+2\1+2\p=NP\0+0"
string = input.split("\n")[1:-1]
for i in string:
if i == "P=NP":
print("skipped")
else:
add = i.split("+")
print(int(add[0])+int(add[1]))
print("Done!") | VictorieeMan/Kattis_Solutions | Python 3/problems/@Solved/helpaphd/helpaphd.py | helpaphd.py | py | 468 | python | en | code | 0 | github-code | 6 |
72902347389 | """
run.py
Autor: Juan Pablo
"""
from misVariable import *
#uso de condicional simple
nota= input("Ingrese la nota 1: \n")
nota2 = input("Ingrese nota 2: \n")
nota = int(nota)
nota2 = int(nota2)
if nota >= 18:
print(mensaje)
if nota2 >= 18:
print(mensaje)
| concpetosfundamentalesprogramacionaa19/ejercicios-clases5-020519-jleon1234 | miProyecto/run.py | run.py | py | 268 | python | es | code | 0 | github-code | 6 |
39425074538 | import discord
class DiscordClient(discord.Client):
def __init__(self, channel: int, players: list):
self.channel: int = channel
self.players: list = players
super().__init__()
async def on_ready(self):
print(f"{self.user} is connected!")
channel = self.get_channel(self.channel)
lines = [
"Hello! Ready to bring you RaiderIO information 🎉",
"I'll only list the following characters: If I'm missing any, please add them here: TBD",
"",
"\n".join(self.players),
]
await channel.send("\n".join(lines))
async def on_member_join(self, member):
print(member)
async def on_message(self, message):
if message.author == self.user:
return
if str(message.channel.id) != str(self.channel):
return
if message.content.lower().startswith("!rio rank"):
channel = self.get_channel(self.channel)
await channel.send("TBD, but Sylphyl Rocks!!! sozz Krugdir")
return
| kevinrobayna/rio_discord_bot | rio_discord_bot/discord_client.py | discord_client.py | py | 1,073 | python | en | code | 0 | github-code | 6 |
19499854601 | # -*- coding: utf-8 -*-
import pytest
from mdye_leetcode.solution_28 import Solution
# makes a Solution object b/c that's how leetcode rolls
@pytest.fixture(scope="module")
def sol():
yield Solution()
def test_solution_28_basic(sol: Solution):
assert sol.strStr("mississippi", "issip") == 4
assert sol.strStr("foogzon", "zon") == 4
assert sol.strStr("sadbutsad", "sad") == 0
assert sol.strStr("leetcode", "leeto") == -1
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4
| michaeldye/mdye-python-samples | src/mdye_leetcode/test/test_solution_28.py | test_solution_28.py | py | 513 | python | en | code | 0 | github-code | 6 |
29546248670 | import sys
import threading
# Use Thread to Speed up to log(n) - WRONG WAY
# WRONG! WRONG! WRONG!!!
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
threadLock = threading.Lock()
threads = []
result = True
class myThread(threading.Thread):
def __init__(self, root, lower, upper):
threading.Thread.__init__(self)
self.root = root
self.lower = lower
self.upper = upper
def run(self):
if self.root == None:
return
threadLock.acquire()
if self.root.val <= self.lower or self.root.val >= self.upper:
global result
result = False
threadLock.release()
return
# Create new threads
thread1 = myThread(self.root.left, self.lower, self.root.val)
thread2 = myThread(self.root.right, self.root.val, self.upper)
# Add threads to thread list
threads.append(thread1)
threads.append(thread2)
# !!!!!!! WRONG, since when thread is added to threads,
# for t in threads:
# t.join()
# May has been executed
threadLock.release()
# Start new Threads
thread1.start()
thread2.start()
def isValidBST(root):
global result
threads.clear()
result = True
thread = myThread(root, -sys.maxsize, sys.maxsize)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for t in threads:
t.join()
return result
test_node1 = TreeNode(1)
test_node2 = TreeNode(0)
test_node3 = TreeNode(2)
test_node1.left = test_node2
test_node1.right = test_node3
assert isValidBST(test_node1)
# Use Thread to Speed up to log(n) – Correct:
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import sys
import threading
from queue import Queue
class myThread(threading.Thread):
def __init__(self, root, lower, upper, return_queue):
threading.Thread.__init__(self)
self.root = root
self.lower = lower
self.upper = upper
self.return_queue = return_queue
def run(self):
if self.root == None:
self.return_queue.put(True)
return
if self.root.val <= self.lower or self.root.val >= self.upper:
self.return_queue.put(False)
return
# Create new threads
queue1 = Queue()
thread1 = myThread(self.root.left, self.lower, self.root.val, queue1)
queue2 = Queue()
thread2 = myThread(self.root.right, self.root.val, self.upper, queue2)
thread1.start()
thread2.start()
thread1.join() # wait both
thread2.join()
result1 = queue1.get()
result2 = queue2.get()
self.return_queue.put(True if result1 and result2 else False)
def isValidBST(root):
queue_result = Queue()
thread = myThread(root, -sys.maxsize, sys.maxsize, queue_result)
thread.start()
thread.join() # wait to complete
return queue_result.get()
test_node1 = TreeNode(1)
test_node2 = TreeNode(0)
test_node3 = TreeNode(1)
test_node1.left = test_node2
test_node1.right = test_node3
assert (not isValidBST(test_node1))
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
2
/ \
1 3
stack = [(2, -max, max)]
stack = [(3, 2, max), (1, -max, 2)]
stack = [(3, 2, max), (None, -max, 1), (None, 1, max)]
"""
stack = [(root, -sys.maxint, sys.maxint)]
while stack:
node, minv, maxv = stack.pop()
if not node: continue
if not minv < node.val < maxv: return False
stack += (node.right, node.val, maxv), (node.left, minv, node.val)
return True
class SolutionRec(object):
"""
root, min_val, max_val
2 - sys.maxint, sys.maxint
1 - sys.maxint, 2
3 2, sys.maxint
"""
def isValid(self, root, min_val, max_val):
if not root:
return True
if min_val < root.val < max_val:
return all([self.isValid(root.left, min_val, root.val), self.isValid(root.right, root.val, max_val)])
else:
return False
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.isValid(root, - sys.maxint, sys.maxint) | HeliWang/upstream | Concurrency/validate-bst.py | validate-bst.py | py | 4,552 | python | en | code | 0 | github-code | 6 |
28705163606 | # Написать программу, которая состоит 4 из этапов:
# - создает список из рандомных четырехзначных чисел
# - принимает с консоли цифру и удаляет ее из всех элементов списка
# - цифры каждого элемента суммирует пока результат не станет однозначным числом
# - из финального списка убирает все дублирующиеся элементы
# - после каждого этапа выводить результат в консоль
# Пример:
# - 1 этап: [2634, 6934, 7286, 3353, 4602, 3176, 3796]
# - 2 этап: Введите цифру: 3
# - 2 этап: [264, 694, 7286, 5, 4602, 176, 796]
# - 3 этап: 264 -> 2+6+4 -> 12 -> 1+2 -> 3
# - 3 этап: [3, 1, 5, 5, 3, 5, 4]
# - 4 этап: [3, 1, 5, 4]
# import random
#
# k = 10 # количество 4значных чисел
# num_array = []
#
# for _ in range(0, k + 1):
# num_array.append(random.randint(1000, 10000))
# print('1 этап: ', num_array)
#
# # 2
# num = int(input('Введите цифру: '))
# for i in range(0, len(num_array)):
# temp = num_array[i]
# temp2 = 0
# while temp > 0:
# if temp % 10 == num:
# pass
# else:
# temp2 = temp2 * 10 + temp % 10
# temp //= 10
# num_array[i] = temp2
# print(num_array)
#
# # 3
# for i in range(0, len(num_array)):
# while num_array[i] > 9:
# num_array[i] = int(num_array[i] / 100) + (num_array[i] % 100 - num_array[i] % 10) // 10 + num_array[i] % 10
# print('сумма цифр = ', num_array)
#
# #4
# num_array = set(num_array)
# print(num_array)
import random
my_list = [random.randint(1000, 9999) for _ in range(10)]
print(my_list)
number = input('Введите число: ')
for i in range(len(my_list)):
my_list[i] = str(my_list[i]).replace(number, '')
print(my_list)
for i in range(len(my_list)):
while len(my_list[i]) > 1:
summa = 0
# my_list[i] = str(sum(list(map(int, list(my_list[i]))))) # функция map применяет int ко всем элементам листа my_list
for elem in my_list[i]:
summa += int(elem)
my_list[i] = str(summa)
print(my_list)
print(set(my_list))
| MihailOgorodov/python_courses | seminar4/3.py | 3.py | py | 2,395 | python | ru | code | 0 | github-code | 6 |
16838118388 | from typing import List
from urllib.parse import urlparse
import pandas as pd
from pathlib import Path
from behave import Given, When, Then, Step
from csvcubeddevtools.behaviour.file import get_context_temp_dir_path
from csvcubeddevtools.helpers.file import get_test_cases_dir
from rdflib import Graph
from csvcubed.models.cube import *
from csvcubed.models.cube import (
ExistingQbAttribute,
NewQbAttribute,
NewQbConcept,
QbMultiMeasureDimension,
QbMultiUnits,
)
from csvcubed.models.validationerror import ValidationError
from csvcubed.models.cube.uristyle import URIStyle
from csvcubed.writers.qbwriter import QbWriter
from csvcubed.utils.qb.validation.cube import validate_qb_component_constraints
from csvcubed.utils.csvw import get_first_table_schema
from csvcubed.utils.pandas import read_csv
_test_case_dir = get_test_cases_dir()
def get_standard_catalog_metadata_for_name(
name: str, identifier: Optional[str] = None
) -> CatalogMetadata:
return CatalogMetadata(
name,
summary="Summary",
identifier=identifier,
description="Description",
creator_uri="https://www.gov.uk/government/organisations/office-for-national-statistics",
publisher_uri="https://www.gov.uk/government/organisations/office-for-national-statistics",
theme_uris=["http://gss-data.org.uk/def/gdp#some-test-theme"],
keywords=["Key word one", "Key word two"],
landing_page_uris=["http://example.org/landing-page"],
license_uri="http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/",
public_contact_point_uri="mailto:something@example.org",
)
_standard_data = pd.DataFrame(
{"A": ["a", "b", "c"], "D": ["e", "f", "g"], "Value": [1, 2, 3]}
)
@Given('a single-measure QbCube named "{cube_name}"')
def step_impl(context, cube_name: str):
context.cube = _get_single_measure_cube_with_name_and_id(cube_name, None)
@Given('a single-measure QbCube named "{cube_name}" with missing observation values')
def step_impl(context, cube_name: str):
cube = _get_single_measure_cube_with_name_and_id(cube_name, None)
cube.data["Value"] = [1, None, 3]
context.cube = cube
@Given(
'a single-measure QbCube named "{cube_name}" with missing observation values and `sdmxa:obsStatus` replacements'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a", "b", "c"],
"D": ["e", "f", "g"],
"Marker": ["Suppressed", None, None],
"Value": [None, 2, 3],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data(label="A", data=data["A"])),
QbColumn("D", NewQbDimension.from_data(label="D", data=data["D"])),
QbColumn(
"Marker",
NewQbAttribute.from_data(
"Marker",
data["Marker"],
parent_attribute_uri="http://purl.org/linked-data/sdmx/2009/attribute#obsStatus",
),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with missing observation values and missing `sdmxa:obsStatus` replacements'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a", "b", "c"],
"D": ["e", "f", "g"],
"Marker": [None, "Provisional", None],
"Value": [None, 2, 3],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data(label="A", data=data["A"])),
QbColumn("D", NewQbDimension.from_data(label="D", data=data["D"])),
QbColumn(
"Marker",
NewQbAttribute.from_data(
"Marker",
data["Marker"],
parent_attribute_uri="http://purl.org/linked-data/sdmx/2009/attribute#obsStatus",
),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a QbCube named "{cube_name}" with code-list defined in an existing CSV-W "{csvw_file_path}"'
)
def step_impl(context, cube_name: str, csvw_file_path: str):
tmp_dir = get_context_temp_dir_path(context)
csvw_path = tmp_dir / csvw_file_path
columns = [
QbColumn("A", NewQbDimension.from_data("A code list", _standard_data["A"])),
QbColumn(
"D",
NewQbDimension("D code list", code_list=NewQbCodeListInCsvW(csvw_path)),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
csv_path, _ = get_first_table_schema(csvw_path)
code_list_data, data_loading_errors = read_csv(csv=csvw_path.parent / csv_path)
code_list_values = code_list_data["Notation"].sample(3, random_state=1)
context.data_loading_errors = data_loading_errors
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name, None),
pd.DataFrame({"A": ["a", "b", "c"], "D": code_list_values, "Value": [1, 2, 3]}),
columns,
)
@Given('a single-measure QbCube with identifier "{cube_id}" named "{cube_name}"')
def step_impl(context, cube_name: str, cube_id: str):
context.cube = _get_single_measure_cube_with_name_and_id(cube_name, cube_id)
def _get_single_measure_cube_with_name_and_id(
cube_name: str, cube_id: str, uri_style: URIStyle = URIStyle.Standard
) -> Cube:
columns = [
QbColumn("A", NewQbDimension.from_data("A code list", _standard_data["A"])),
QbColumn("D", NewQbDimension.from_data("D code list", _standard_data["D"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
return Cube(
get_standard_catalog_metadata_for_name(cube_name, cube_id),
_standard_data,
columns,
uri_style=uri_style,
)
@Given('a single-measure QbCube named "{cube_name}" with existing dimensions')
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
ExistingQbDimension("http://example.org/some/dimension/a"),
csv_column_uri_template="http://example.org/some/codelist/a",
),
QbColumn(
"D",
ExistingQbDimension("http://example.org/some/dimension/d"),
csv_column_uri_template="http://example.org/some/codelist/d",
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Given('a single-measure QbCube named "{cube_name}" with duplicate rows')
def step_impl(context, cube_name: str):
data = pd.DataFrame({"A": ["a", "a"], "Value": [1, 1]})
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with codes not defined in the code-list'
)
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
NewQbDimension(
"A code list",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("A code list"),
[NewQbConcept("a"), NewQbConcept("b")], # Deliberately missing "c"
),
),
),
QbColumn("D", NewQbDimension.from_data("D code list", _standard_data["D"])),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with optional attribute values missing'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Some Dimension": ["a", "b", "c"],
"Some Attribute": ["attr-a", float("nan"), "attr-c"],
"Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"Some Dimension",
NewQbDimension.from_data("Some Dimension", data["Some Dimension"]),
),
QbColumn(
"Some Attribute",
NewQbAttribute.from_data("Some Attribute", data["Some Attribute"]),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given('a multi-measure QbCube named "{cube_name}"')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a_height", "a_length"],
"Measure": ["height", "length"],
"Value": [1, 20],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
QbColumn(
"Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("meters")),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given('a multi-measure QbCube named "{cube_name}" with duplicate rows')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"A": ["a_height", "a_height", "a_length"],
"Measure": ["height", "height", "length"],
"Value": [1, 1, 20],
}
)
columns = [
QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"])),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
QbColumn(
"Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("meters")),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with new attribute values and units'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Attribute": ["pending", "final", "in-review"],
"Value": [2, 2, 2],
}
)
columns = [
QbColumn(
"Existing Dimension", ExistingQbDimension("http://existing-dimension")
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("New Attribute", data["New Attribute"]),
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with one new unit extending another new unit'
)
def step_impl(context, cube_name: str):
columns = [
QbColumn(
"A",
ExistingQbDimension("http://example.org/some/dimension/a"),
csv_column_uri_template="http://example.org/some/codelist/a",
),
QbColumn(
"D",
ExistingQbDimension("http://example.org/some/dimension/d"),
csv_column_uri_template="http://example.org/some/codelist/d",
),
QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"),
NewQbUnit(
"Some Extending Unit",
base_unit=NewQbUnit("Some Base Unit"),
base_unit_scaling_factor=1000,
qudt_quantity_kind_uri="http://some-quantity-kind",
si_base_unit_conversion_multiplier=25.123123,
),
),
),
]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), _standard_data, columns
)
@Then('turtle should be written to "{file}"')
def step_impl(context, file: str):
temp_dir = get_context_temp_dir_path(context)
with open(Path(temp_dir / file), "w") as ttl_file:
ttl_file.write(context.turtle)
@When("the cube is serialised to CSV-W")
def step_impl(context):
writer = QbWriter(context.cube)
temp_dir = get_context_temp_dir_path(context)
writer.write(temp_dir)
context.csv_file_name = writer.csv_file_name
@When("the cube is serialised to CSV-W (suppressing missing uri value exceptions)")
def step_impl(context):
writer = QbWriter(context.cube, raise_missing_uri_safe_value_exceptions=False)
temp_dir = get_context_temp_dir_path(context)
writer.write(temp_dir)
@Step('the CSVqb should fail validation with "{validation_error}"')
def step_impl(context, validation_error: str):
cube: Cube = context.cube
errors = cube.validate()
errors += validate_qb_component_constraints(context.cube)
assert any([e for e in errors if validation_error in e.message]), [
e.message for e in errors
]
@Step("the CSVqb should pass all validations")
def step_impl(context):
cube: QbCube = context.cube
data_loading_errors: List[ValidationError] = (
context.data_loading_errors if hasattr(context, "data_loading_errors") else []
)
errors = cube.validate() + data_loading_errors
errors += validate_qb_component_constraints(context.cube)
assert len(errors) == 0, [e.message for e in errors]
assert (
len(data_loading_errors) == 0
), f"Errors were found in the csv: {[e.message for e in errors]}"
@Given(
'a single-measure QbCube named "{cube_name}" with "{type}" "{data_type}" attribute'
)
def step_impl(context, cube_name: str, type: str, data_type: str):
data = pd.DataFrame(
{
"A": ["uss-cerritos", "uss-titan"],
"Value": [1, 1],
"Reg": [75567, 80102],
"Appeared": ["2020-08-06", "2020-10-08"],
"First_Captain": ["William Riker", "Carol Freeman"],
}
)
dim = QbColumn("A", NewQbDimension.from_data("A Dimension", data["A"]))
val = QbColumn(
"Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Some Measure"), NewQbUnit("Some Unit")
),
)
if data_type == "int":
if type == "new":
att = QbColumn(
"Reg",
NewQbAttributeLiteral(data_type="int", label="Reg"),
)
else:
att = QbColumn(
"Reg",
ExistingQbAttributeLiteral(
data_type="int", attribute_uri="http://some-uri"
),
)
sp1 = SuppressedCsvColumn("Appeared")
sp2 = SuppressedCsvColumn("First_Captain")
columns = [dim, val, att, sp1, sp2]
elif data_type == "date":
sp1 = SuppressedCsvColumn("Reg")
if type == "new":
att = QbColumn(
"Appeared", NewQbAttributeLiteral(data_type="date", label="Appeared")
)
else:
att = QbColumn(
"Appeared",
ExistingQbAttributeLiteral(
data_type="date", attribute_uri="http://some-uri"
),
)
sp2 = SuppressedCsvColumn("First_Captain")
columns = [dim, val, sp1, att, sp2]
elif data_type == "string":
sp1 = SuppressedCsvColumn("Reg")
sp2 = SuppressedCsvColumn("Appeared")
if type == "new":
att = QbColumn(
"First_Captain",
NewQbAttributeLiteral(data_type="string", label="First Captain"),
)
else:
att = QbColumn(
"First_Captain",
ExistingQbAttributeLiteral(
data_type="string", attribute_uri="http://some-uri"
),
)
columns = [dim, val, sp1, sp2, att]
context.cube = Cube(
get_standard_catalog_metadata_for_name(cube_name), data, columns
)
@Given(
'a single-measure QbCube named "{cube_name}" with all new units/measures/dimensions/attributes/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"New Dimension": ["a", "b", "c"],
"New Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"New Dimension",
NewQbDimension(
"a new codelist",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("a new codelist"),
[NewQbConcept("a"), NewQbConcept("b"), NewQbConcept("c")],
),
),
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("new_Qb_attribute", data["New Attribute"]),
),
QbColumn(
"Observed Value",
QbSingleMeasureObservationValue(
NewQbMeasure("Part-time"), NewQbUnit("Num of Students")
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a multi-measure QbCube named "{cube_name}" with all new units/measures/dimensions/attributes/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"New Dimension": ["a", "b", "c"],
"New Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
"Measure": ["part-time", "full-time", "flex-time"],
}
)
columns = [
QbColumn(
"New Dimension",
NewQbDimension(
"New Dimension",
code_list=NewQbCodeList(
get_standard_catalog_metadata_for_name("a new codelist"),
[NewQbConcept("a"), NewQbConcept("b"), NewQbConcept("c")],
),
),
),
QbColumn(
"New Attribute",
NewQbAttribute.from_data("New Attribute", data["New Attribute"]),
),
QbColumn(
"Observed Value",
QbMultiMeasureObservationValue(unit=NewQbUnit("Num of students")),
),
QbColumn(
"Measure", QbMultiMeasureDimension.new_measures_from_data(data["Measure"])
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a single measure QbCube named "{cube_name}" with existing units/measure/dimensions/attribute/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Dimension": ["d", "e", "f"],
"Existing Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
csv_column_title="Existing Dimension",
structural_definition=ExistingQbDimension("http://existing/dimension"),
csv_column_uri_template="http://existing/dimension/code-list/{+existing_dimension}",
),
QbColumn(
csv_column_title="New Dimension",
structural_definition=NewQbDimension(
label="existing codelist",
code_list=ExistingQbCodeList(
concept_scheme_uri="http://existing/concept/scheme/uri"
),
),
),
QbColumn(
csv_column_title="Existing Attribute",
structural_definition=ExistingQbAttribute("http://existing/attribute"),
csv_column_uri_template="http://existing/attribute/{+existing_attribute}",
),
QbColumn(
csv_column_title="Observed Value",
structural_definition=QbSingleMeasureObservationValue(
ExistingQbMeasure("http://existing/measure"),
ExistingQbUnit("http://exisiting/unit"),
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given(
'a multi measure QbCube named "{cube_name}" with existing units/measure/dimensions/attribute/codelists'
)
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Existing Dimension": ["a", "b", "c"],
"New Dimension": ["d", "e", "f"],
"Existing Attribute": ["university", "students", "masters"],
"Observed Value": [1, 2, 3],
"Units": ["gbp", "count", "count"],
"Existing Measures": ["part-time", "full-time", "flex-time"],
}
)
columns = [
QbColumn(
"Existing Dimension",
ExistingQbDimension("http://existing/dimension"),
csv_column_uri_template="http://existing/dimension/code-list/{+existing_dimension}",
),
QbColumn(
csv_column_title="New Dimension",
structural_definition=NewQbDimension(
label="existing codelist",
code_list=ExistingQbCodeList(
concept_scheme_uri="http://gss-data.org.uk/def/concept-scheme/some-existing-codelist"
),
),
),
QbColumn(
csv_column_title="Existing Attribute",
structural_definition=ExistingQbAttribute("http://existing/attribute"),
csv_column_uri_template="http://existing/attribute/{+existing_attribute}",
),
QbColumn(
"Observed Value",
QbMultiMeasureObservationValue("number"),
),
QbColumn(
"Units",
QbMultiUnits(
[
ExistingQbUnit("http://existing/unit/gbp"),
ExistingQbUnit("http://existing/unit/count"),
]
),
csv_column_uri_template="http://existing/unit/{+units}",
),
QbColumn(
"Existing Measures",
QbMultiMeasureDimension(
[
ExistingQbMeasure("http://existing/measure/part-time"),
ExistingQbMeasure("http://existing/measure/full-time"),
ExistingQbMeasure("http://existing/measure/flex-time"),
]
),
csv_column_uri_template="http://existing/measure/{+existing_measures}",
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Given('a QbCube named "{cube_name}" which references a legacy composite code-list')
def step_impl(context, cube_name: str):
data = pd.DataFrame(
{
"Location": [
"http://data.europa.eu/nuts/code/UKC",
"http://data.europa.eu/nuts/code/UKL",
"http://data.europa.eu/nuts/code/UKD",
],
"Observed Value": [1, 2, 3],
}
)
columns = [
QbColumn(
"Location",
NewQbDimension(
"Location",
code_list=NewQbCodeListInCsvW(
_test_case_dir
/ "readers"
/ "skoscodelistreader"
/ "location.csv-metadata.json"
),
),
),
QbColumn(
"Observed Value",
QbSingleMeasureObservationValue(
unit=NewQbUnit("Num of students"), measure=NewQbMeasure("Total")
),
),
]
cube = Cube(get_standard_catalog_metadata_for_name(cube_name), data, columns)
errors = cube.validate()
errors += validate_qb_component_constraints(cube)
assert len(errors) == 0, [e.message for e in errors]
context.cube = cube
@Then("some additional turtle is appended to the resulting RDF")
def step_impl(context):
rdf_to_add = context.text.strip()
context.turtle += rdf_to_add
@Then("the cube's metadata should contain URLs with file endings")
def step_impl(context):
temp_dir = get_context_temp_dir_path(context)
assertURIStyle(URIStyle.Standard, temp_dir, context.csv_file_name)
@Then("the cube's metadata should contain URLs without file endings")
def step_impl(context):
temp_dir = get_context_temp_dir_path(context)
assertURIStyle(URIStyle.WithoutFileExtensions, temp_dir, context.csv_file_name)
@Given(
'a single-measure QbCube named "{cube_name}" configured with "{uri_style}" URI style'
)
def step_impl(context, cube_name: str, uri_style: str):
context.cube = _get_single_measure_cube_with_name_and_id(
cube_name, None, URIStyle[uri_style]
)
def assertURIStyle(uri_style: URIStyle, temp_dir: Path, csv_file_name: str):
baseUri = "file://relative-uris/"
metadataFilePath = temp_dir.joinpath(f"{csv_file_name}-metadata.json")
g = Graph()
g.parse(metadataFilePath, publicID=baseUri)
for (s, p, o) in g:
if s.startswith(baseUri):
assert_uri_style_for_uri(uri_style, s, (s, p, o))
if p.startswith(baseUri):
assert_uri_style_for_uri(uri_style, p, (s, p, o))
def assert_uri_style_for_uri(uri_style: URIStyle, uri: str, node):
path = urlparse(uri).path
if uri_style == URIStyle.WithoutFileExtensions:
assert not path.endswith(
".csv"
), f"expected {node} to end without a CSV file extension"
else:
assert path.endswith(".csv") or path.endswith(
".json"
), f"expected {node} to end with .csv or .json"
| GDonRanasinghe/csvcubed-models-test-5 | csvcubed/tests/behaviour/steps/qbwriter.py | qbwriter.py | py | 27,298 | python | en | code | 0 | github-code | 6 |
42560657012 | # 4.3
# List of Depths: Given a binary tree, design an algorithm which creates a linked list of all the nodes
# at each depth (e.g., if you have a tree with depth D, you'll have D linked lists).
import unittest
from Chapter_4_TreesAndGraphs import BinaryTree
from Chapter_4_TreesAndGraphs.Node import Node
from Chapter_4_TreesAndGraphs.ex_4_2_MinimalTree import minimal_tree
def list_of_depths(tree: BinaryTree):
list_by_depths = [None] * tree.height()
def add_tree_to_list_by_depths(current_tree: BinaryTree, current_depth: int):
if list_by_depths[current_depth] is None:
list_by_depths[current_depth] = Node(current_tree.data)
else:
n = list_by_depths[current_depth]
while n.next is not None:
n = n.next
n.next = Node(current_tree.data)
if current_tree.left is not None:
add_tree_to_list_by_depths(current_tree.left, current_depth + 1)
if current_tree.right is not None:
add_tree_to_list_by_depths(current_tree.right, current_depth + 1)
add_tree_to_list_by_depths(tree, 0)
return list_by_depths
class TestCase(unittest.TestCase):
def test(self):
tree = minimal_tree(range(20))
self.assertEqual('[15 -> None, 7 -> 19 -> None, 3 -> 11 -> 17 -> None, 1 -> 5 -> 9 -> 13 -> 16 -> 18 -> None, 0 -> 2 -> 4 -> 6 -> 8 -> 10 -> 12 -> 14 -> None]',\
list_of_depths(tree).__repr__()) | JSchoreels/CrackingTheCodingInterview | Chapter_4_TreesAndGraphs/ex_4_3_ListOfDepths.py | ex_4_3_ListOfDepths.py | py | 1,460 | python | en | code | 0 | github-code | 6 |
44035409119 | def nearestValidPoint(x, y, points):
# if x matches left side of any of the points its valid
# if y matches right side of any of the points its valid
# Manhatten distance is abs(x)
smallest_seen = float('inf')
index = -1
for i, (a, b) in enumerate(points):
print(i, a, b)
print(points)
if x == a or y == b:
if abs(x - a) + abs(y - b) < smallest_seen:
smallest_seen = abs(x - a) + abs(y - b)
index = i
return index
nearestValidPoint(3, 4, [[1,2],[3,1],[2,4],[2,3],[4,4]]) | MichelleGray78/LeetCode_Problems | LeetCode_Problems/nearest_point_with_same_x_or_y/main.py | main.py | py | 600 | python | en | code | 0 | github-code | 6 |
39920879314 | """
This module implement the ServiceProxy class.
This class is used to provide a local proxy to a remote service for a ZeroRobot.
When a service or robot ask the creation of a service to another robot, a proxy class is created locally
so the robot see the service as if it as local to him while in reality the service is managed by another robot.
"""
import urllib
from requests.exceptions import HTTPError
from jose import jwt
from js9 import j
from zerorobot.task import (TASK_STATE_ERROR, TASK_STATE_NEW, TASK_STATE_OK,
TASK_STATE_RUNNING, Task, TaskNotFoundError)
from zerorobot.template.state import ServiceState
class ServiceProxy():
"""
This class is used to provide a local proxy to a remote service for a ZeroRobot.
When a service or robot ask the creation of a service to another robot, a proxy class is created locally
so the robot see the service as if it as local to him while in reality the service is managed by another robot.
"""
def __init__(self, name, guid, zrobot_client):
"""
@param name: name of the service
@param guid: guid of the service
@param zrobot_client: Instance of ZeroRobotClient that talks to the robot on which the
service is actually running
"""
self._zrobot_client = zrobot_client
self.name = name
self.guid = guid
self.template_uid = None
# a proxy service doesn't have direct access to the data of it's remote homologue
# cause data are always only accessible by the service itself and locally
self._data = None
self.task_list = TaskListProxy(self)
def __repr__(self):
# Provide a nice representation in tools like IPython / js9
return "robot://%s/%s?%s" % (self._zrobot_client.instance, self.template_uid, urllib.parse.urlencode(dict(name=self.name, guid=self.guid)))
@property
def data(self):
return self._data
@property
def state(self):
# TODO: handle exceptions
service, _ = self._zrobot_client.api.services.GetService(self.guid)
s = ServiceState()
for state in service.state:
s.set(state.category, state.tag, state.state.value)
return s
@property
def actions(self):
"""
list available actions of the services
"""
actions, _ = self._zrobot_client.api.services.ListActions(self.guid)
return sorted([a.name for a in actions])
@property
def logs(self):
try:
logs, resp = self._zrobot_client.api.services.GetLogs(self.guid)
except HTTPError as err:
if err.response.status_code == 400:
raise RuntimeError(err.response.json()['message'])
raise err
return logs.logs
def schedule_action(self, action, args=None):
"""
Do a call on a remote ZeroRobot to add an action to the task list of
the corresponding service
@param action: action is the name of the action to add to the task list
@param args: dictionnary of the argument to pass to the action
"""
req = {
"action_name": action,
}
if args:
req["args"] = args
task, _ = self._zrobot_client.api.services.AddTaskToList(req, service_guid=self.guid)
return _task_proxy_from_api(task, self)
def delete(self):
self._zrobot_client.api.services.DeleteService(self.guid)
# clean up secret from zrobot client
for secret in list(self._zrobot_client.config.data['secrets_']):
try:
claims = jwt.get_unverified_claims(secret)
except:
continue
else:
if claims.get('service_guid') == self.guid:
self._zrobot_client.config.data['secrets_'].remove(secret)
self._zrobot_client.config.save()
return
class TaskListProxy:
def __init__(self, service_proxy):
self._service = service_proxy
def empty(self):
tasks, _ = self._service._zrobot_client.api.services.getTaskList(service_guid=self._service.guid, query_params={'all': False})
return len(tasks) <= 0
def list_tasks(self, all=False):
tasks, _ = self._service._zrobot_client.api.services.getTaskList(service_guid=self._service.guid, query_params={'all': all})
return [_task_proxy_from_api(t, self._service) for t in tasks]
def get_task_by_guid(self, guid):
"""
return a task from the list by it's guid
"""
try:
task, _ = self._service._zrobot_client.api.services.GetTask(service_guid=self._service.guid, task_guid=guid)
return _task_proxy_from_api(task, self._service)
except HTTPError as err:
if err.response.status_code == 404:
raise TaskNotFoundError("no task with guid %s found" % guid)
raise err
class TaskProxy(Task):
"""
class that represent a task on a remote service
the state attribute is an property that do an API call to get the
actual state of the task on the remote ZeroRobot
"""
def __init__(self, guid, service, action_name, args, created):
super().__init__(func=None, args=args)
self.action_name = action_name
self.service = service
self.guid = guid
self._created = created
def execute(self):
raise RuntimeError("a TaskProxy should never be executed")
@property
def result(self):
if self._result is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
if task.result:
self._result = j.data.serializer.json.loads(task.result)
return self._result
@property
def duration(self):
if self._duration is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
self._duration = task.duration
return self._duration
@property
def state(self):
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
return task.state.value
@state.setter
def state(self, value):
raise RuntimeError("you can't change the statet of a TaskProxy")
@property
def eco(self):
if self._eco is None:
task, _ = self.service._zrobot_client.api.services.GetTask(task_guid=self.guid, service_guid=self.service.guid)
if task.eco:
d_eco = task.eco.as_dict()
d_eco['_traceback'] = task.eco._traceback
self._eco = j.core.errorhandler.getErrorConditionObject(ddict=d_eco)
return self._eco
def _task_proxy_from_api(task, service):
t = TaskProxy(task.guid, service, task.action_name, task.args, task.created)
if task.duration:
t._duration = task.duration
if task.eco:
d_eco = task.eco.as_dict()
d_eco['_traceback'] = task.eco._traceback
t._eco = j.core.errorhandler.getErrorConditionObject(ddict=d_eco)
return t
| BolaNasr/0-robot | zerorobot/service_proxy.py | service_proxy.py | py | 7,252 | python | en | code | 0 | github-code | 6 |
73026312507 | from typing import DefaultDict
import sys
import os
import csv
sys.path.append(0, os.path.abspath('.'))
sys.path.append(0, os.path.abspath('./src'))
sys.path.append(0, os.path.abspath('./src/utilities'))
from src.utilities import SCRIPT_HOME
from src.utilities.post_process import post_proc_timeseries
from net_sim import Attack_Sim
from time import time
from sys import argv
def main(argv):
if len(argv) == 1:
network = 'Comcast'
hosts = 149
te_method = "-semimcfraeke"
traffic_type = "FlashCrowd"
iter_i = 1
experiment_tag = "NoFailures"
network_instance = network
repeat = "False"
traffic_file = "/home/mhall/OLTE/data/traffic/{}_x100_10000_pareto-matrix.txt".format(network)
n_fallow_transponders = "20"
else:
_ ,\
network ,\
hosts ,\
te_method ,\
traffic_type ,\
iter_i ,\
experiment_tag ,\
network_instance ,\
repeat ,\
traffic_file ,\
n_fallow_transponders ,\
optical_strategy ,\
fallow_tx_allocation ,\
ftx_file = argv
iter_i = int(iter_i)
hosts = int(hosts)
iterations = int(os.popen('wc -l ' + traffic_file).read().split()[0])
data = DefaultDict(list)
if 1: ########################## Baseline ########################
t0_baseline_init = time()
attack_sim = Attack_Sim(network_instance,
hosts,
"_".join([traffic_type,experiment_tag]),
iterations=iterations,
te_method=te_method,
method="none",
traffic_file=traffic_file,
# strategy="baseline",
strategy=optical_strategy,
use_heuristic='no',
fallow_transponders=n_fallow_transponders,
fallow_tx_allocation_strategy=fallow_tx_allocation,
fallow_tx_allocation_file=ftx_file,
salt=str(iter_i)
)
if repeat == "repeat":
result = attack_sim.perform_sim(circuits=1, start_iter=iter_i, end_iter=iter_i, repeat = True)
else:
result = attack_sim.perform_sim(circuits=1, start_iter=iter_i, end_iter=iter_i)
for key in result:
data[key].extend(result[key])
# results_file = SCRIPT_HOME + "/data/results/{}_coremelt_every_link_{}_{}_{}".format(
# network, attack, net_iter.split('/')[1], iter_i) + ".csv"
# print("writing results to: " + results_file)
# with open(results_file, "w") as outfile:
# writer = csv.writer(outfile)
# writer.writerow(data.keys())
# writer.writerows(zip(*data.values()))
if __name__ == "__main__":
main(argv)
| mattall/topology-programming | scripts/TDSC/sim_event.py | sim_event.py | py | 3,375 | python | en | code | 0 | github-code | 6 |
9224589864 | from prediction.M2I.predictor import M2IPredictor
import numpy as np
import math
import logging
import copy
import random
import time
import interactive_sim.envs.util as utils
import plan.helper as plan_helper
import agents.car as car
S0 = 2
T = 0.25 #1.5 # reaction time when following
DELTA = 4 # the power term in IDM
PLANNING_HORIZON = 5 # in frames
PREDICTION_HTZ = 10 # prediction_htz
T_HEADWAY = 0.2
A_SPEEDUP_DESIRE = 0.3 # A
A_SLOWDOWN_DESIRE = 1.5 # B
XPT_SHRESHOLD = 0.7
MINIMAL_DISTANCE_PER_STEP = 0.05
MINIMAL_DISTANCE_TO_TRAVEL = 4
# MINIMAL_DISTANCE_TO_RESCALE = -999 #0.1
REACTION_AFTER = 200 # in frames
MINIMAL_SCALE = 0.3
MAX_DEVIATION_FOR_PREDICTION = 4
TRAFFIC_LIGHT_COLLISION_SIZE = 2
MINIMAL_SPEED_TO_TRACK_ORG_GOAL = 5
MINIMAL_DISTANCE_TO_GOAL = 15
OFF_ROAD_DIST = 30
PRINT_TIMER = False
DRAW_CBC_PTS = False
def get_angle(x, y):
return math.atan2(y, x)
def euclidean_distance(pt1, pt2):
x_1, y_1 = pt1
x_2, y_2 = pt2
return math.sqrt((x_1-x_2)**2+(y_1-y_2)**2)
def get_angle_of_a_line(pt1, pt2):
# angle from horizon to the right, counter-clockwise,
x1, y1 = pt1
x2, y2 = pt2
angle = math.atan2(y2 - y1, x2 - x1)
return angle
def calculate_yaw_from_states(trajectory, default_yaw):
time_frames, _ = trajectory.shape
pred_yaw = np.zeros([time_frames])
for i in range(time_frames - 1):
pose_p = trajectory[i+1]
pose = trajectory[i]
delta_x = pose_p[0] - pose[0]
delta_y = pose_p[1] - pose[1]
dis = np.sqrt(delta_x*delta_x + delta_y*delta_y)
if dis > 1:
angel = get_angle(delta_x, delta_y)
pred_yaw[i] = angel
default_yaw = angel
else:
pred_yaw[i] = default_yaw
return pred_yaw
def change_axis(yaw):
return - yaw - math.pi/2
def get_current_pose_and_v(current_state, agent_id, current_frame_idx):
agent_dic = current_state['predicting']['original_trajectory']
my_current_pose = agent_dic[agent_id]['pose'][current_frame_idx - 1]
if agent_dic[agent_id]['pose'][current_frame_idx - 1, 0] == -1 or agent_dic[agent_id]['pose'][current_frame_idx - 6, 0] == -1:
my_current_v_per_step = 0
print("Past invalid for ", agent_id, " and setting v to 0")
else:
my_current_v_per_step = euclidean_distance(agent_dic[agent_id]['pose'][current_frame_idx - 1, :2],
agent_dic[agent_id]['pose'][current_frame_idx - 6, :2]) / 5
return my_current_pose, my_current_v_per_step
class EnvPlanner:
"""
EnvPlanner is capable of using as much information as it can to satisfy its loss like avoiding collisions.
EnvPlanner can assume it's controlling all agents around if it does not exacerbate the sim-2-real gap.
While the baseline planner or any planner controlling the ego vehicle can only use the prediction or past data
"""
def __init__(self, env_config, predictor, dataset='Waymo', map_api=None):
self.planning_from = env_config.env.planning_from
self.planning_interval = env_config.env.planning_interval
self.planning_horizon = env_config.env.planning_horizon
self.planning_to = env_config.env.planning_to
self.scenario_frame_number = 0
self.online_predictor = predictor
self.method_testing = env_config.env.testing_method # 0=densetnt with dropout, 1=0+post-processing, 2=1+relation
self.test_task = env_config.env.test_task
self.all_relevant = env_config.env.all_relevant
self.follow_loaded_relation = env_config.env.follow_loaded_relation
self.follow_prediction_traj = env_config.env.follow_prediction
self.target_lanes = [0, 0] # lane_index, point_index
self.routed_traj = {}
self.follow_gt_first = env_config.env.follow_gt_first
self.predict_env_for_ego_collisions = env_config.env.predict_env_for_ego_collisions
self.predict_relations_for_ego = env_config.env.predict_relations_for_ego
self.predict_with_rules = env_config.env.predict_with_rules
self.frame_rate = env_config.env.frame_rate
self.current_on_road = True
self.dataset = dataset
self.online_predictor.dataset = dataset
self.valid_lane_types = [1, 2] if self.dataset == 'Waymo' else [0, 11]
self.vehicle_types = [1] if self.dataset == 'Waymo' else [0, 7] # Waymo: Unset=0, Vehicle=1, Pedestrian=2, Cyclist=3, Other=4
self.map_api = map_api # NuPlan only
self.past_lanes = {}
def reset(self, *args, **kwargs):
time1 = time.perf_counter()
self.online_predictor(new_data=kwargs['new_data'], model_path=kwargs['model_path'],
time_horizon=kwargs['time_horizon'], predict_device=kwargs['predict_device'],
use_prediction=(self.follow_prediction_traj or self.predict_env_for_ego_collisions) and kwargs['ego_planner'],
predictor_list=kwargs['predictor_list'])
time2 = time.perf_counter()
self.online_predictor.setting_goal_points(current_data=kwargs['new_data'])
self.current_on_road = True
print(f"predictor reset with {time2-time1:04f}s")
# self.data = self.online_predictor.data
def is_planning(self, current_frame_idx):
self.scenario_frame_number = current_frame_idx
frame_diff = self.scenario_frame_number - self.planning_from
if frame_diff >= 0 and frame_diff % self.planning_interval == 0:
return True
return False
def is_first_planning(self, current_frame_idx):
self.scenario_frame_number = current_frame_idx
frame_diff = self.scenario_frame_number - self.planning_from
if frame_diff >= 0 and frame_diff == 0: # frame_diff % self.planning_interval == 0:
return True
return False
def collision_based_relevant_detection(self, current_frame_idx, current_state, predict_ego=True):
ego_agent = current_state['predicting']['ego_id'][1]
# print("before: ", current_state['predicting']['relevant_agents'], bool(current_state['predicting']['relevant_agents']))
if not current_state['predicting']['relevant_agents']:
relevant_agents = [ego_agent]
undetected_piles = [ego_agent]
else:
relevant_agents = current_state['predicting']['relevant_agents'].copy()
if ego_agent not in relevant_agents:
relevant_agents += [ego_agent]
undetected_piles = relevant_agents.copy()
colliding_pairs = []
while len(undetected_piles) > 0:
if self.all_relevant:
# hard force all agents as relevant
current_agent = undetected_piles.pop()
for each_agent_id in current_state['agent']:
if each_agent_id != current_agent:
relevant_agents.append(each_agent_id)
break
current_agent = undetected_piles.pop()
ego_poses = current_state['agent'][current_agent]['pose']
ego_shape = current_state['agent'][current_agent]['shape'][0]
detected_pairs = []
ego_agent_0 = None
for idx, each_pose in enumerate(ego_poses):
if idx <= current_frame_idx:
continue
ego_agent_packed =Agent(x=each_pose[0],
y=each_pose[1],
yaw=each_pose[3],
length=max(1, ego_shape[1]),
width=max(1, ego_shape[0]),
agent_id=current_agent)
if ego_agent_0 is None:
ego_agent_0 = ego_agent_packed
for each_agent_id in current_state['agent']:
if [current_agent, each_agent_id] in detected_pairs:
continue
if each_agent_id == current_agent or each_agent_id in relevant_agents:
continue
each_agent_frame_num = current_state['agent'][each_agent_id]['pose'].shape[0]
if idx >= each_agent_frame_num:
continue
target_agent_packed =Agent(x=current_state['agent'][each_agent_id]['pose'][idx, 0],
y=current_state['agent'][each_agent_id]['pose'][idx, 1],
yaw=current_state['agent'][each_agent_id]['pose'][idx, 3],
length=current_state['agent'][each_agent_id]['shape'][0][1],
width=current_state['agent'][each_agent_id]['shape'][0][0],
agent_id=each_agent_id)
if each_pose[0] == -1 or each_pose[1] == -1 or current_state['agent'][each_agent_id]['pose'][idx, 0] == -1 or current_state['agent'][each_agent_id]['pose'][idx, 1] == -1:
continue
collision = utils.check_collision(ego_agent_packed, target_agent_packed)
if collision:
detected_pairs.append([current_agent, each_agent_id])
yield_ego = True
# FORWARD COLLISION CHECKINGS
collision_0 = utils.check_collision(ego_agent_0, target_agent_packed)
if collision_0:
detected_relation = [[ego_agent_0, target_agent_packed]]
else:
# check relation
# print(f"In: {current_agent} {each_agent_id} {undetected_piles} {current_state['predicting']['relation']}")
self.online_predictor.relation_pred_onetime(each_pair=[current_agent, each_agent_id],
current_frame=current_frame_idx,
clear_history=True,
current_data=current_state)
# print(f"Out: {current_agent} {each_agent_id} {undetected_piles} {current_state['predicting']['relation']}")
detected_relation = current_state['predicting']['relation']
if [each_agent_id, current_agent] in detected_relation:
if [current_agent, each_agent_id] in detected_relation:
# bi-directional relations, still yield
pass
else:
yield_ego = False
if yield_ego or self.method_testing < 2:
relevant_agents.append(each_agent_id)
undetected_piles.append(each_agent_id)
if [current_agent, each_agent_id] not in colliding_pairs and [each_agent_id, current_agent] not in colliding_pairs:
colliding_pairs.append([current_agent, each_agent_id])
# print(f"Detected for {current_agent} with {undetected_piles}")
if self.test_task != 1:
# don't predict ego
relevant_agents.remove(ego_agent)
current_state['predicting']['relevant_agents'] = relevant_agents
current_state['predicting']['colliding_pairs'] = colliding_pairs
# print(f"Collision based relevant agent detected finished: \n{relevant_agents} \n{colliding_pairs}")
def clear_markers_per_step(self, current_state, current_frame_idx):
if self.is_planning(current_frame_idx):
current_state['predicting']['relation'] = []
current_state['predicting']['points_to_mark'] = []
current_state['predicting']['trajectory_to_mark'] = []
def get_prediction_trajectories(self, current_frame_idx, current_state=None, time_horizon=80):
if self.is_planning(current_frame_idx):
frame_diff = self.scenario_frame_number - self.planning_from
self.collision_based_relevant_detection(current_frame_idx, current_state)
current_state['predicting']['relation'] = []
for each_pair in current_state['predicting']['colliding_pairs']:
self.online_predictor.relation_pred_onetime(each_pair=each_pair, current_data=current_state,
current_frame=current_frame_idx)
if self.follow_prediction_traj and len(current_state['predicting']['relevant_agents']) > 0:
if self.method_testing < 0:
self.online_predictor.variety_predict(frame_diff)
else:
self.online_predictor.marginal_predict(frame_diff)
self.online_predictor.last_predict_frame = frame_diff + 5
return True
else:
return False
# def update_env_trajectory_speed_only(self, current_frame_idx, relevant_only=True, current_state=None):
def update_env_trajectory_for_sudo_base_planner(self, current_frame_idx, current_state=None):
"""
the sudo base planner for the ego vehicle
"""
if self.test_task in [1, 2]:
# predict ego
return current_state
# self.scenario_frame_number = current_frame_idx
ego_id = current_state['predicting']['ego_id'][1]
# for each_agent in current_state['agent']:
# if each_agent in [748, 781, 735]:
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][each_agent]['pose'][:, :])
# frame_diff = self.scenario_frame_number - self.planning_from
# if frame_diff >= 0 and frame_diff == 0: # frame_diff % self.planning_interval == 0:
if self.is_first_planning(current_frame_idx):
# print("updating ego trajectory: ", self.planning_interval, self.scenario_frame_number)
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][ego_id]['pose'][current_frame_idx:, :])
my_current_pose = current_state['agent'][ego_id]['pose'][current_frame_idx - 1]
my_current_v_per_step = euclidean_distance(
current_state['agent'][ego_id]['pose'][current_frame_idx - 1, :2],
current_state['agent'][ego_id]['pose'][current_frame_idx - 2, :2])
org_pose = current_state['predicting']['original_trajectory'][ego_id]['pose'].copy()
projected_pose_on_original = my_current_pose
closest_distance = 999999
closest_index = 0
for idx, each_pose in enumerate(org_pose):
dist = euclidean_distance(each_pose[:2], my_current_pose[:2])
if dist < closest_distance:
closest_distance = dist
projected_pose_on_original = each_pose
closest_index = idx
my_interpolator = SudoInterpolator(org_pose[closest_index:, :2], projected_pose_on_original)
# my_current_pose = projected_pose_on_original
total_frames = current_state['agent'][ego_id]['pose'].shape[0]
total_distance_traveled = 0
for i in range(total_frames - current_frame_idx):
my_current_v_per_step -= A_SLOWDOWN_DESIRE/self.frame_rate/self.frame_rate
step_speed = euclidean_distance(
current_state['agent'][ego_id]['pose'][current_frame_idx+i - 1, :2],
current_state['agent'][ego_id]['pose'][current_frame_idx+i - 2, :2])
my_current_v_per_step = max(0, min(my_current_v_per_step, step_speed))
current_state['agent'][ego_id]['pose'][current_frame_idx+i, :] = my_interpolator.interpolate(total_distance_traveled + my_current_v_per_step)
total_distance_traveled += my_current_v_per_step
if self.is_planning(self.scenario_frame_number):
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][ego_id]['pose'][current_frame_idx:, :])
current_state['predicting']['trajectory_to_mark'].append(current_state['agent'][ego_id]['pose'][current_frame_idx:, :])
return current_state
def find_closes_lane(self, current_state, agent_id, my_current_v_per_step, my_current_pose, no_unparallel=False,
return_list=False, current_route=[]):
# find a closest lane to trace
closest_dist = 999999
closest_dist_no_yaw = 999999
closest_dist_threshold = 5
closest_lane = None
closest_lane_no_yaw = None
closest_lane_pt_no_yaw_idx = None
closest_lane_pt_idx = None
current_lane = None
current_closest_pt_idx = None
dist_to_lane = None
distance_threshold = None
closest_lanes_same_dir = []
closest_lanes_idx_same_dir = []
for each_lane in current_state['road']:
if len(current_route) > 0 and each_lane not in current_route:
continue
if isinstance(current_state['road'][each_lane]['type'], int):
if current_state['road'][each_lane]['type'] not in self.valid_lane_types:
continue
else:
if current_state['road'][each_lane]['type'][0] not in self.valid_lane_types:
continue
road_xy = current_state['road'][each_lane]['xyz'][:, :2]
if road_xy.shape[0] < 3:
continue
current_lane_closest_dist = 999999
current_lane_closest_idx = None
for j, each_xy in enumerate(road_xy):
road_yaw = current_state['road'][each_lane]['dir'][j]
dist = euclidean_distance(each_xy, my_current_pose[:2])
yaw_diff = abs(utils.normalize_angle(my_current_pose[3] - road_yaw))
if dist < closest_dist_no_yaw:
closest_lane_no_yaw = each_lane
closest_dist_no_yaw = dist
closest_lane_pt_no_yaw_idx = j
if yaw_diff < math.pi / 180 * 20 and dist < closest_dist_threshold:
if dist < closest_dist:
closest_lane = each_lane
closest_dist = dist
closest_lane_pt_idx = j
if dist < current_lane_closest_dist:
current_lane_closest_dist = dist
current_lane_closest_idx = j
# classify current agent as a lane changer or not:
if my_current_v_per_step > 0.1 and 0.5 < current_lane_closest_dist < 3.2 and each_lane not in closest_lanes_same_dir and current_state['road'][each_lane]['turning'] == 0:
closest_lanes_same_dir.append(each_lane)
closest_lanes_idx_same_dir.append(current_lane_closest_idx)
if closest_lane is not None and not 0.5 < closest_dist < 3.2:
closest_lanes_same_dir = []
closest_lanes_idx_same_dir = []
if closest_lane is not None:
current_lane = closest_lane
current_closest_pt_idx = closest_lane_pt_idx
dist_to_lane = closest_dist
distance_threshold = max(7, max(7 * my_current_v_per_step, dist_to_lane))
elif closest_lane_no_yaw is not None and not no_unparallel:
current_lane = closest_lane_no_yaw
current_closest_pt_idx = closest_lane_pt_no_yaw_idx
dist_to_lane = closest_dist_no_yaw
distance_threshold = max(10, dist_to_lane)
else:
logging.warning(f'No current lane founded: {agent_id}')
# return
if return_list:
if len(closest_lanes_same_dir) > 0:
return closest_lanes_same_dir, closest_lanes_idx_same_dir, dist_to_lane, distance_threshold
else:
return [current_lane], [current_closest_pt_idx], dist_to_lane, distance_threshold
else:
return current_lane, current_closest_pt_idx, dist_to_lane, distance_threshold
def set_route(self, goal_pt, road_dic, current_pose=None, previous_routes=None, max_number_of_routes=50, route_roadblock_check=None, agent_id=None):
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.maps_datatypes import SemanticMapLayer
closest_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(point=Point2D(current_pose[0], current_pose[1]),
layer=SemanticMapLayer.LANE)
target_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(point=Point2D(goal_pt[0], goal_pt[1]),
layer=SemanticMapLayer.LANE)
if route_roadblock_check is not None and agent_id == 'ego':
route_lanes = []
for each_roadbloack in route_roadblock_check:
if each_roadbloack not in road_dic:
continue
route_lanes += road_dic[each_roadbloack]['lower_level']
if closest_lane_id not in route_lanes:
closest_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(
point=Point2D(current_pose[0], current_pose[1]),
layer=SemanticMapLayer.LANE_CONNECTOR)
if closest_lane_id not in route_lanes:
for each_lane in route_lanes:
if each_lane not in self.past_lanes:
print("[env planner] WARNING: closest lane/connector in original route not found with closest lanes for ego")
closest_lane_id = each_lane
dist_to_lane = 1
break
if not isinstance(dist_to_lane, int) or dist_to_lane > 30:
target_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(
point=Point2D(goal_pt[0], goal_pt[1]),
layer=SemanticMapLayer.LANE_CONNECTOR)
closest_lane_id = int(closest_lane_id)
target_lane_id = int(target_lane_id)
available_routes = []
checking_pile = [[closest_lane_id]]
lanes_visited = []
if previous_routes is not None:
for each_route in previous_routes:
if closest_lane_id in each_route:
closest_lane_idx = each_route.index(closest_lane_id)
available_routes.append(each_route[closest_lane_idx:])
while len(checking_pile) > 0 and len(available_routes) < max_number_of_routes:
# BFS
next_pile = []
for each_route in checking_pile:
latest_lane = each_route[-1]
if latest_lane not in road_dic:
continue
if latest_lane == target_lane_id:
available_routes.append(each_route+[target_lane_id])
next_pile = [[closest_lane_id]]
lanes_visited = []
else:
all_next_lanes = road_dic[latest_lane]['next_lanes']
uppder_roadblock = road_dic[latest_lane]['upper_level'][0]
ENVCHANGE_LANE = False
if uppder_roadblock in road_dic and ENVCHANGE_LANE:
parallel_lanes = road_dic[uppder_roadblock]['lower_level']
else:
parallel_lanes = []
all_next_lanes += parallel_lanes
# all_next_lanes += self.road_dic[latest_lane]['upper_level']
# if len(all_next_lanes) == 0 and len(each_route) == 1:
# # starting from a dead end, turn around
# all_next_lanes = road_dic[latest_lane]['previous_lanes']
for each_next_lane in all_next_lanes:
if each_next_lane in each_route:
# avoid circles
continue
if each_next_lane not in lanes_visited:
next_pile.append(each_route+[each_next_lane])
lanes_visited.append(each_next_lane)
else:
for each_available_route in available_routes:
if each_next_lane in each_available_route:
idx = each_available_route.index(each_next_lane)
if idx != 0:
route_to_add = each_route + [each_next_lane] + each_available_route[idx:]
if route_to_add not in available_routes:
available_routes.append(route_to_add)
break
checking_pile = next_pile
return available_routes
def get_reroute_traj(self, current_state, agent_id, current_frame_idx,
follow_org_route=False, dynamic_turnings=True, current_route=[], is_ego=False):
"""
return a marginal planned trajectory with a simple lane follower
for NuPlan, use route_roadbloacks. a list of road bloacks
for Waymo, use route, a list of lane_ids, and prior, a list of lane_ids detected from the original gt trajectories
"""
assert self.routed_traj is not None, self.routed_traj
# generate a trajectory based on the route
# 1. get the route for relevant agents
# find the closest lane to trace
my_current_pose, my_current_v_per_step = plan_helper.get_current_pose_and_v(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
my_current_v_per_step = np.clip(my_current_v_per_step, a_min=0, a_max=7)
goal_pt, goal_yaw = self.online_predictor.goal_setter.get_goal(current_data=current_state,
agent_id=agent_id,
dataset=self.dataset)
if PRINT_TIMER:
last_tic = time.perf_counter()
if agent_id not in self.past_lanes:
self.past_lanes[agent_id] = []
if self.dataset == 'NuPlan' and is_ego:
goal_lane, _, _ = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=[goal_pt[0], goal_pt[1], -1, goal_yaw],
valid_lane_types=self.valid_lane_types,
)
# current_route is a list of multiple routes to choose
if len(current_route) == 0:
lanes_in_route = []
route_roadblocks = current_state['route'] if 'route' in current_state else None
for each_block in route_roadblocks:
if each_block not in current_state['road']:
continue
lanes_in_route += current_state['road'][each_block]['lower_level']
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=lanes_in_route,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
else:
selected_lanes = []
for each_route in current_route:
selected_lanes += each_route
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=selected_lanes,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
else:
if len(current_route) > 0:
current_route = current_route[0]
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=current_route,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
if dist_to_lane is not None:
distance_threshold = max(self.frame_rate, max(self.frame_rate * my_current_v_per_step, dist_to_lane))
else:
dist_to_lane = 999
self.current_on_road = not (dist_to_lane > OFF_ROAD_DIST)
if self.dataset == 'NuPlan' and len(current_route) == 0 and is_ego:
pass
# route_roadblocks = current_state['route'] if 'route' in current_state else None
# current_routes = self.set_route(road_dic=current_state['road'],
# goal_pt=[goal_pt[0], goal_pt[1], 0, goal_yaw], current_pose=my_current_pose,
# previous_routes=[current_route], max_number_of_routes=1,
# route_roadblock_check=route_roadblocks,
# agent_id=agent_id)
# print(f"Got {len(current_routes)} for {agent_id} with {goal_pt} and {my_current_pose} given route {route_roadblocks}")
# current_route = current_routes[0] if len(current_routes) > 0 else []
else:
if current_lanes in current_route and not isinstance(current_lanes, list):
for each_past_lane in current_route[:current_route.index(current_lanes)]:
if each_past_lane not in self.past_lanes[agent_id]:
self.past_lanes[agent_id].append(each_past_lane)
if isinstance(current_lanes, list):
# deprecated
lane_found_in_route = False
for each_lane in current_lanes:
if each_lane in current_route:
current_lane = each_lane
lane_found_in_route = True
break
if not lane_found_in_route:
current_lane = random.choice(current_lanes)
idx = current_lanes.index(current_lane)
current_closest_pt_idx = current_closest_pt_indices[idx]
else:
current_lane = current_lanes
current_closest_pt_idx = current_closest_pt_indices
if PRINT_TIMER:
print(f"Time spent on first lane search: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if self.dataset == 'NuPlan' and is_ego:
# use route_roadblocks
prior_lanes = []
if current_lane is None:
print("WARNING: Ego Current Lane not found")
elif len(current_route) == 0:
# get route from the original trajectory, this route does not have to be neither accurate nor connected
prior_lanes = []
org_closest_pt_idx = []
for i in range(50):
if i + current_frame_idx > 90:
break
if i == 0:
continue
if i % 10 != 0:
continue
looping_pose, looping_v = get_current_pose_and_v(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx + i)
# looping_lane, looping_closest_idx, _, _ = self.find_closes_lane(current_state=current_state,
# agent_id=agent_id,
# my_current_v_per_step=looping_v,
# my_current_pose=looping_pose,
# no_unparallel=follow_org_route,
# return_list=False)
looping_lane, looping_closest_idx, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=looping_pose,
# include_unparallel=not follow_org_route
include_unparallel=False,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
if looping_lane is not None and looping_lane not in prior_lanes and dist_to_lane < 5:
prior_lanes.append(looping_lane)
org_closest_pt_idx.append(looping_closest_idx)
if PRINT_TIMER:
print(f"Time spent on loop lane search: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
else:
prior_lanes = current_route
# 2. find a spot to enter
# Make connection with BC
accum_dist = -0.0001
p4 = None
cuttin_lane_id = None
cuttin_lane_idx = None
first_lane = True
def search_lanes(current_lane, route_roadblocks):
result_lanes = []
if goal_lane not in self.past_lanes['ego']:
goal_roadblock = current_state['road'][goal_lane]['upper_level'][0]
current_roadblock = current_state['road'][current_lane]['upper_level'][0]
if goal_roadblock == current_roadblock:
current_lane = goal_lane
lanes_to_loop = [[current_lane]]
visited_lanes = [current_lane]
while len(lanes_to_loop) > 0:
looping_lanes = lanes_to_loop.pop()
if len(looping_lanes) >= 3:
result_lanes.append(looping_lanes)
continue
looping_lane = looping_lanes[-1]
looping_roadblock = current_state['road'][looping_lane]['upper_level'][0]
if looping_roadblock not in route_roadblocks:
continue
# no lane changing
# all_lanes_in_block = current_state['road'][looping_roadblock]['lower_level']
# for each_lane in all_lanes_in_block:
# if each_lane not in visited_lanes:
# visited_lanes.append(each_lane)
# lanes_to_loop.append(looping_lanes[:-1]+[each_lane])
next_lanes = current_state['road'][looping_lane]['next_lanes']
for each_lane in next_lanes:
if each_lane not in visited_lanes:
visited_lanes.append(each_lane)
if each_lane not in current_state['road']:
result_lanes.append(looping_lanes)
continue
each_block = current_state['road'][each_lane]['upper_level'][0]
if each_block not in route_roadblocks:
continue
lanes_to_loop.append(looping_lanes+[each_lane])
if len(lanes_to_loop) == 0 and len(looping_lanes) > 0:
result_lanes.append(looping_lanes)
return result_lanes
if self.dataset == 'NuPlan' and is_ego and current_lane is not None:
route_roadblocks = current_state['route'] if 'route' in current_state else None
current_upper_roadblock = current_state['road'][current_lane]['upper_level'][0]
if current_upper_roadblock not in route_roadblocks:
route_roadblocks.insert(0, current_upper_roadblock)
while len(route_roadblocks) < 3 and route_roadblocks[-1] in current_state['road']:
next_roadblocks = current_state['road'][route_roadblocks[-1]]['next_lanes']
if len(next_roadblocks) == 0 or next_roadblocks[0] not in current_state['road']:
break
route_roadblocks.append(current_state['road'][route_roadblocks[-1]]['next_lanes'][0])
# assumption: not far from current lane
result_lanes = search_lanes(current_lane, route_roadblocks)
if len(result_lanes) == 0:
# choose a random lane from the first roadblock
print("WARNING: No available route found")
assert False, 'No Available Route Found for ego'
result_traj = []
for each_route in result_lanes:
current_trajectory = None
reference_trajectory = None
reference_yaw = None
for each_lane in each_route:
if each_lane not in current_state['road']:
break
if reference_trajectory is None:
reference_trajectory = current_state['road'][each_lane]['xyz'][current_closest_pt_idx:, :2].copy()
reference_yaw = current_state['road'][each_lane]['dir'][current_closest_pt_idx:].copy()
else:
reference_trajectory = np.concatenate((reference_trajectory,
current_state['road'][each_lane]['xyz'][:, :2].copy()))
reference_yaw = np.concatenate((reference_yaw,
current_state['road'][each_lane]['dir'].copy()))
# get CBC
if reference_trajectory.shape[0] < 2:
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
delta = self.planning_horizon
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
p3 = p2
x, y = -math.sin(yaw) * delta + p2[0], -math.cos(yaw) * delta + p2[1]
p4 = [x, y]
# 4. generate a curve with cubic BC
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
if euclidean_distance(p4, p1) > 1:
print(f"No lanes found for route of {agent_id} {proper_v_for_cbc} {my_current_pose}")
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
else:
assert False, f"Error: P4, P1 overlapping {p4, p1}"
assert connection_traj.shape[0] > 0, connection_traj.shape
result_traj.append(connection_traj)
current_state['predicting']['trajectory_to_mark'].append(current_trajectory)
else:
starting_index = int(my_current_v_per_step * self.frame_rate * 2)
starting_index = min(starting_index, reference_trajectory.shape[0] - 1)
p4 = reference_trajectory[starting_index, :2]
starting_yaw = -utils.normalize_angle(reference_yaw[starting_index] + math.pi / 2)
delta = euclidean_distance(p4, my_current_pose[:2]) / 4
x, y = math.sin(starting_yaw) * delta + p4[0], math.cos(starting_yaw) * delta + p4[1]
p3 = [x, y]
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
# delta = euclidean_distance(p4, my_current_pose[:2]) / 4
delta = min(70/self.frame_rate, euclidean_distance(p4, my_current_pose[:2]) / 2)
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + my_current_pose[1]
p2 = [x, y]
if euclidean_distance(p4, p1) > 2:
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
current_trajectory = np.concatenate((connection_traj, reference_trajectory[starting_index:, :2]))
else:
current_trajectory = reference_trajectory[starting_index:, :2]
result_traj.append(current_trajectory)
current_state['predicting']['trajectory_to_mark'].append(current_trajectory)
assert len(result_traj) == len(result_lanes), f'unmatched shape {len(result_traj)} {len(result_lanes)}'
self.routed_traj[agent_id] = result_traj
return self.routed_traj[agent_id], result_lanes
if current_lane is not None:
current_looping_lane = current_lane
while_counter = 0
if distance_threshold > 100:
print("Closest lane detection failded: ", agent_id, current_looping_lane, distance_threshold, my_current_v_per_step, dist_to_lane, current_route)
else:
distance_threshold = max(distance_threshold, self.frame_rate * my_current_v_per_step)
while accum_dist < distance_threshold and distance_threshold <= 100:
if while_counter > 100:
print("ERROR: Infinite looping lanes")
break
while_counter += 1
# turning: 1=left turn, 2=right turn, 3=UTurn
# UTurn -> Skip
# Left/Right check distance, if < 15 then skip, else not skip
if current_looping_lane not in current_state['road']:
break
current_looping_lane_turning = current_state['road'][current_looping_lane]['turning']
if dynamic_turnings and current_looping_lane_turning == 3 or (current_looping_lane_turning in [1, 2] and euclidean_distance(current_state['road'][current_looping_lane]['xyz'][-1, :2], my_current_pose[:2]) < 15):
# skip turning lanes
# accum_dist = distance_threshold - 0.1
pass
elif while_counter > 50:
print("Inifinite looping lanes (agent_id/current_lane): ", agent_id, current_looping_lane)
accum_dist = distance_threshold - 0.1
else:
if first_lane:
road_xy = current_state['road'][current_looping_lane]['xyz'][current_closest_pt_idx:, :2].copy()
else:
road_xy = current_state['road'][current_looping_lane]['xyz'][:, :2].copy()
for j, each_xy in enumerate(road_xy):
if j == 0:
continue
accum_dist += euclidean_distance(each_xy, road_xy[j - 1])
if accum_dist >= distance_threshold:
p4 = each_xy
if first_lane:
yaw = - utils.normalize_angle(
current_state['road'][current_looping_lane]['dir'][j + current_closest_pt_idx] + math.pi / 2)
else:
yaw = - utils.normalize_angle(
current_state['road'][current_looping_lane]['dir'][j] + math.pi / 2)
delta = euclidean_distance(p4, my_current_pose[:2]) / 4
x, y = math.sin(yaw) * delta + p4[0], math.cos(yaw) * delta + p4[1]
p3 = [x, y]
cuttin_lane_id = current_looping_lane
if first_lane:
cuttin_lane_idx = j + current_closest_pt_idx
else:
cuttin_lane_idx = j
break
if p4 is None:
if current_looping_lane in prior_lanes and current_looping_lane != prior_lanes[-1]:
# if already has route, then use previous route
current_lane_route_idx = prior_lanes.index(current_looping_lane)
current_looping_lane = prior_lanes[current_lane_route_idx+1]
else:
# if not, try to loop a new route
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
next_lane_found = False
if follow_org_route:
if current_looping_lane in prior_lanes: # True:
# follow original lanes
current_idx = prior_lanes.index(current_looping_lane)
if current_idx < len(prior_lanes) - 1:
next_lane = prior_lanes[current_idx + 1]
next_lane_found = True
if next_lane in next_lanes:
# next lane connected, loop this next lane and continue next loop
current_looping_lane = next_lane
else:
# next lane not connected
# 1. find closest point
road_xy = current_state['road'][current_looping_lane]['xyz'][:, :2].copy()
closest_dist = 999999
closest_lane_idx = None
turning_yaw = None
for j, each_xy in enumerate(road_xy):
dist = euclidean_distance(each_xy[:2], my_current_pose[:2])
if dist < closest_dist:
closest_lane_idx = j
closest_dist = dist
turning_yaw = utils.normalize_angle(my_current_pose[3] - current_state['road'][current_looping_lane]['dir'][j])
if closest_lane_idx is None:
# follow no next lane logic below
next_lane_found = False
else:
max_turning_dist = 120 / math.pi
if closest_dist >= max_turning_dist:
# too far for max turning speed 15m/s
if turning_yaw > math.pi / 2:
# turn towards target lane first on the right
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2) + math / 2
delta = 180 / math.pi
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p4 = [x, y]
yaw = yaw - math / 2
delta = delta / 2
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p3 = [x, y]
break
if turning_yaw <= math.pi / 2:
# turn towards target lane first on the right
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2) - math / 2
delta = 180 / math.pi
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p4 = [x, y]
yaw = yaw + math / 2
delta = delta / 2
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p3 = [x, y]
break
else:
accum_dist = distance_threshold - 0.1
if not next_lane_found:
# follow prior or choose a random one as the next
if len(next_lanes) > 0:
current_looping_lane_changes = False
for each_lane in next_lanes:
if each_lane in prior_lanes:
current_looping_lane = each_lane
current_looping_lane_changes = True
if not current_looping_lane_changes:
# random choose one lane as route
current_looping_lane = random.choice(next_lanes)
else:
print("warning: no next lane found with breaking the lane finding loop")
break
# return
else:
break
first_lane = False
if PRINT_TIMER:
print(f"Time spent on while loop: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if p4 is None:
# not found any lane at all, generate a linear line forward
# 3. gennerate p1 and p2
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
delta = self.planning_horizon
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
p3 = p2
x, y = -math.sin(yaw) * delta + p2[0], -math.cos(yaw) * delta + p2[1]
p4 = [x, y]
# 4. generate a curve with cubic BC
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
if euclidean_distance(p4, p1) > 1:
print(f"No lanes found for route of {agent_id} {proper_v_for_cbc} {my_current_pose}")
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
else:
assert False, f"Error: P4, P1 overlapping {p4, p1}"
assert connection_traj.shape[0] > 0, connection_traj.shape
self.routed_traj[agent_id] = connection_traj
else:
assert cuttin_lane_id is not None
# 3. gennerate p1 and p2
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
# delta = euclidean_distance(p4, my_current_pose[:2]) / 4
delta = min(7, euclidean_distance(p4, my_current_pose[:2]) / 2)
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
# loop out a route
current_looping_lane = cuttin_lane_id
lanes_in_a_route = [current_looping_lane]
route_traj_left = np.array(current_state['road'][current_looping_lane]['xyz'][cuttin_lane_idx:, :2], ndmin=2)
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
while len(next_lanes) > 0 and len(lanes_in_a_route) < 10:
any_lane_in_route = False
if len(prior_lanes) > 0:
for each_next_lane in next_lanes:
if each_next_lane in prior_lanes:
any_lane_in_route = True
current_looping_lane = each_next_lane
break
if not any_lane_in_route:
# try to follow original route
current_lane_changed = False
lanes_to_choose = []
for each_next_lane in next_lanes:
if each_next_lane in prior_lanes:
current_looping_lane = each_next_lane
current_lane_changed = True
break
if each_next_lane in current_state['road']:
lanes_to_choose.append(each_next_lane)
if current_lane_changed:
pass
elif len(lanes_to_choose) == 0:
print("NO VALID NEXT LANE TO CHOOSE from env_planner for ", agent_id)
break
else:
# random choose one lane as route
current_looping_lane = random.choice(lanes_to_choose)
# amend route manually for scenario 54 file 00000
# if current_looping_lane == 109:
# current_looping_lane = 112
# if current_looping_lane == 131:
# current_looping_lane = 132
if current_looping_lane not in current_state['road']:
print("selected lane not found in road dic")
break
lanes_in_a_route.append(current_looping_lane)
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
# route_traj_left = np.concatenate(
# (route_traj_left, current_state['road'][current_looping_lane]['xyz'][:, :2]))
route_traj_left = np.concatenate(
(route_traj_left, current_state['road'][current_looping_lane]['xyz'][10:, :2])) # start with a margin to avoid overlapping ends and starts
if len(current_route) == 0:
# initiation the route and return
current_route = lanes_in_a_route
if is_ego:
goal_pt, goal_yaw = self.online_predictor.goal_setter.get_goal(current_data=current_state,
agent_id=agent_id,
dataset=self.dataset)
assert goal_pt is not None and goal_yaw is not None, goal_pt
ending_lane, ending_lane_idx, dist_to_ending_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=[goal_pt[0], goal_pt[1], 0, goal_yaw],
valid_lane_types=self.valid_lane_types
)
if ending_lane is not None:
if dist_to_ending_lane > 30:
logging.warning('Goal Point Off Road')
self.target_lanes = [ending_lane, ending_lane_idx]
if ending_lane not in lanes_in_a_route:
back_looping_counter = 0
back_to_loop_lanes = [ending_lane]
target_lane = ending_lane
while back_looping_counter < 10:
back_looping_counter += 1
current_back_looping_lane = back_to_loop_lanes.pop()
_, _, distance_to_ending_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=[current_back_looping_lane],
valid_lane_types=self.valid_lane_types
)
if distance_to_ending_lane < OFF_ROAD_DIST:
target_lane = current_back_looping_lane
break
else:
if current_back_looping_lane not in current_state['road']:
break
prev_lanes = current_state['road'][current_back_looping_lane]['previous_lanes']
if not isinstance(prev_lanes, list):
prev_lanes = prev_lanes.tolist()
if len(prev_lanes) == 0:
break
back_to_loop_lanes += prev_lanes
current_route = [target_lane]
else:
logging.warning('No Lane Found for Goal Point at all')
route_traj_left = np.array(route_traj_left, ndmin=2)
# 4. generate a curve with cubic BC
if euclidean_distance(p4, p1) > 2:
if len(route_traj_left.shape) < 2:
print(route_traj_left.shape, route_traj_left)
self.routed_traj[agent_id] = connection_traj
else:
if euclidean_distance(p4, p1) > 1 and len(connection_traj.shape) > 0 and connection_traj.shape[0] > 1:
# concatenate org_traj, connection_traj, route_traj_left
self.routed_traj[agent_id] = np.concatenate(
(connection_traj, route_traj_left))
else:
self.routed_traj[agent_id] = route_traj_left
else:
self.routed_traj[agent_id] = route_traj_left
if PRINT_TIMER:
print(f"Time spent on CBC: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if DRAW_CBC_PTS:
current_state['predicting']['mark_pts'] = [p4, p3, p2, p1]
if is_ego:
if self.dataset == 'NuPlan':
return [self.routed_traj[agent_id]], current_route
else:
return [self.routed_traj[agent_id]], [current_route]
else:
return self.routed_traj[agent_id], current_route
def adjust_speed_for_collision(self, interpolator, distance_to_end, current_v, end_point_v, reschedule_speed_profile=False):
# constant deceleration
time_to_collision = min(self.planning_horizon, distance_to_end / (current_v + end_point_v + 0.0001) * 2)
time_to_decelerate = abs(current_v - end_point_v) / (0.1/self.frame_rate)
traj_to_return = []
desired_deceleration = 0.2 /self.frame_rate
if time_to_collision < time_to_decelerate:
# decelerate more than 3m/ss
deceleration = (end_point_v - current_v) / time_to_collision
dist_travelled = 0
for i in range(int(time_to_collision)):
current_v += deceleration * 1.2
current_v = max(0, current_v)
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
while current_len < 100:
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
else:
# decelerate with 2.5m/ss
time_for_current_speed = np.clip(((distance_to_end - 3 - (current_v+end_point_v)/2*time_to_decelerate) / (current_v + 0.0001)), 0, self.frame_rate*self.frame_rate)
dist_travelled = 0
if time_for_current_speed > 1:
for i in range(int(time_for_current_speed)):
if reschedule_speed_profile:
dist_travelled += current_v
else:
if i == 0:
dist_travelled += current_v
elif i >= interpolator.trajectory.shape[0]:
dist_travelled += current_v
else:
current_v_hat = interpolator.get_speed_with_index(i)
if abs(current_v_hat - current_v) > 2 / self.frame_rate:
print("WARNING: sharp speed changing", current_v, current_v_hat)
current_v = current_v_hat
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
for i in range(int(time_to_decelerate)):
current_v -= desired_deceleration
current_v = max(0, current_v)
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
while current_len < 100:
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
if len(traj_to_return) > 0:
short = self.planning_horizon - len(traj_to_return)
for _ in range(short):
traj_to_return.append(traj_to_return[-1])
else:
for _ in range(self.planning_horizon):
traj_to_return.append(interpolator.interpolate(0))
return np.array(traj_to_return, ndmin=2)
def get_traffic_light_collision_pts(self, current_state, current_frame_idx,
continue_time_threshold=5):
tl_dics = current_state['traffic_light']
road_dics = current_state['road']
traffic_light_ending_pts = []
for lane_id in tl_dics.keys():
if lane_id == -1:
continue
tl = tl_dics[lane_id]
# get the position of the end of this lane
# Unknown = 0, Arrow_Stop = 1, Arrow_Caution = 2, Arrow_Go = 3, Stop = 4, Caution = 5, Go = 6, Flashing_Stop = 7, Flashing_Caution = 8
try:
tl_state = tl["state"][current_frame_idx]
except:
tl_state = tl["state"][0]
if tl_state in [1, 4, 7]:
end_of_tf_checking = min(len(tl["state"]), current_frame_idx + continue_time_threshold)
all_red = True
for k in range(current_frame_idx, end_of_tf_checking):
if tl["state"][k] not in [1, 4, 7]:
all_red = False
break
if all_red:
for seg_id in road_dics.keys():
if lane_id == seg_id:
road_seg = road_dics[seg_id]
if self.dataset == 'Waymo':
if road_seg["type"] in [1, 2, 3]:
if len(road_seg["dir"].shape) < 1:
continue
if road_seg['turning'] == 1 and tl_state in [4, 7]:
# can do right turn with red light
continue
end_point = road_seg["xyz"][0][:2]
traffic_light_ending_pts.append(end_point)
break
elif self.dataset == 'NuPlan':
end_point = road_seg["xyz"][0][:2]
traffic_light_ending_pts.append(end_point)
break
else:
assert False, f'Unknown dataset in env planner - {self.dataset}'
return traffic_light_ending_pts
def check_past_goal(self, traj, current_idx, current_state, agent_id):
# if 'follow_goal' in current_state['predicting'] and agent_id in current_state['predicting']['follow_goal'] and not current_state['predicting']['follow_goal'][agent_id]:
# return True
# detect by angle
index = 1
valid = abs(current_state['predicting']['original_trajectory'][agent_id]['pose'][-1, :2][0] + 1) > 0.01
while not valid:
index += 1
valid = abs(current_state['predicting']['original_trajectory'][agent_id]['pose'][-index, :2][0] + 1) > 0.01
original_goal = current_state['predicting']['original_trajectory'][agent_id]['pose'][-index, :2]
total_frame = traj.shape[0]
if current_idx + self.planning_interval * 2 > total_frame - 1 or current_idx + self.planning_interval + self.frame_rate > total_frame - 1:
return False
next_checking_pt = traj[current_idx+self.planning_interval*2, :2]
angle_to_goal = get_angle_of_a_line(next_checking_pt, original_goal)
goal_yaw = current_state['predicting']['original_trajectory'][agent_id]['pose'][-1, 3]
past_goal = False
normalized_angle = utils.normalize_angle(angle_to_goal - goal_yaw)
if normalized_angle > math.pi / 2 or normalized_angle < -math.pi / 2:
past_goal = True
# detect by distance for low speed trajectories
two_point_dist = euclidean_distance(original_goal, next_checking_pt)
if two_point_dist < MINIMAL_DISTANCE_TO_GOAL:
past_goal = True
# goal_distance2 = euclidean_distance(marginal_traj[self.planning_interval + 20, :2], origial_goal)
two_point_dist = euclidean_distance(traj[current_idx+self.planning_interval, :2],
traj[current_idx+self.planning_interval+self.frame_rate, :2])
if two_point_dist < MINIMAL_SPEED_TO_TRACK_ORG_GOAL:
past_goal = True
if past_goal:
current_state['predicting']['follow_goal'][agent_id] = False
else:
current_state['predicting']['follow_goal'][agent_id] = True
return past_goal
def get_trajectory_from_interpolator(self, my_interpolator, my_current_speed, a_per_step=None,
check_turning_dynamics=True, desired_speed=7,
emergency_stop=False, hold_still=False,
agent_id=None, a_scale_turning=0.7, a_scale_not_turning=0.9):
total_frames = self.planning_horizon
total_pts_in_interpolator = my_interpolator.trajectory.shape[0]
trajectory = np.ones((total_frames, 4)) * -1
# get proper speed for turning
largest_yaw_change = -1
largest_yaw_change_idx = None
if check_turning_dynamics and not emergency_stop:
for i in range(min(200, total_pts_in_interpolator - 2)):
if my_interpolator.trajectory[i, 0] == -1.0 or my_interpolator.trajectory[i+1, 0] == -1.0 or my_interpolator.trajectory[i+2, 0] == -1.0:
continue
current_yaw = utils.normalize_angle(get_angle_of_a_line(pt1=my_interpolator.trajectory[i, :2], pt2=my_interpolator.trajectory[i+1, :2]))
next_yaw = utils.normalize_angle(get_angle_of_a_line(pt1=my_interpolator.trajectory[i+1, :2], pt2=my_interpolator.trajectory[i+2, :2]))
dist = utils.euclidean_distance(pt1=my_interpolator.trajectory[i, :2], pt2=my_interpolator.trajectory[i+1, :2])
yaw_diff = abs(utils.normalize_angle(next_yaw - current_yaw))
if yaw_diff > largest_yaw_change and 0.04 < yaw_diff < math.pi / 2 * 0.9 and 100 > dist > 0.3:
largest_yaw_change = yaw_diff
largest_yaw_change_idx = i
proper_speed_minimal = max(5, math.pi / 3 / largest_yaw_change) # calculate based on 20m/s turning for 12s a whole round with a 10hz data in m/s
proper_speed_minimal_per_frame = proper_speed_minimal / self.frame_rate
if largest_yaw_change_idx is not None:
deceleration_frames = max(0, largest_yaw_change_idx - abs(my_current_speed - proper_speed_minimal_per_frame) / (A_SLOWDOWN_DESIRE / self.frame_rate / self.frame_rate / 2))
else:
deceleration_frames = 99999
if agent_id is not None:
pass
dist_past = 0
current_speed = my_current_speed
for i in range(total_frames):
if current_speed < 0.1:
low_speed_a_scale = 1 * self.frame_rate
else:
low_speed_a_scale = 0.1 * self.frame_rate
if hold_still:
trajectory[i] = my_interpolator.interpolate(0)
continue
elif emergency_stop:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate
elif largest_yaw_change_idx is not None:
proper_speed_minimal_per_frame = max(0.5, min(proper_speed_minimal_per_frame, 5))
if largest_yaw_change_idx >= i >= deceleration_frames:
if current_speed > proper_speed_minimal_per_frame:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate / 2
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_not_turning * low_speed_a_scale
elif i < deceleration_frames:
if current_speed < desired_speed / 4.7:
# if far away from the turnings and current speed is smaller than 15m/s, then speed up
# else keep current speed
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_turning * low_speed_a_scale
elif i > largest_yaw_change_idx:
if current_speed > proper_speed_minimal_per_frame:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate
else:
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_turning * low_speed_a_scale
else:
if current_speed < desired_speed:
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_not_turning * low_speed_a_scale # accelerate with 0.2 of desired acceleration
current_speed = max(0, current_speed)
dist_past += current_speed
trajectory[i] = my_interpolator.interpolate(dist_past)
return trajectory
def update_env_trajectory_reguild(self, current_frame_idx, relevant_only=True,
current_state=None, plan_for_ego=False, dynamic_env=True):
"""
plan and update trajectory to commit for relevant environment agents
current_frame_idx: 1,2,3,...,11(first frame to plan)
"""
# if self.online_predictor.prediction_data is None:
# logging.warning('Skip planning: Planning before making a prediction')
# return
if not dynamic_env:
return current_state
# self.scenario_frame_number = current_frame_idx
# frame_diff = self.scenario_frame_number - self.planning_from
if self.is_planning(current_frame_idx):
# if frame_diff >= 0 and frame_diff % self.planning_interval == 0:
# load scenario data
if current_state is None:
return
agents = current_state['agent']
relevant_agents = current_state['predicting']['relevant_agents']
edges = current_state['predicting']['relation']
# XPts = current_state['predicting']['XPt']
# select marginal prediction traj
# prediction_traj_dic_m = current_state['predicting']['marginal_trajectory']
# prediction_traj_dic_c = current_state['predicting']['conditional_trajectory']
# prediction_traj_dic_m = prediction_traj_dic_c
ego_id = current_state['predicting']['ego_id'][1]
agents_dic_copy = copy.deepcopy(current_state['agent'])
for agent_id in agents:
# loop each relevant agent
if relevant_only and agent_id not in relevant_agents:
continue
current_state['agent'][agent_id]['action'] = None
total_time_frame = current_state['agent'][agent_id]['pose'].shape[0]
goal_point = current_state['predicting']['goal_pts'][agent_id]
my_current_pose = current_state['agent'][agent_id]['pose'][current_frame_idx - 1]
my_current_v_per_step = euclidean_distance(current_state['agent'][agent_id]['pose'][current_frame_idx - 1, :2],
current_state['agent'][agent_id]['pose'][current_frame_idx - 6, :2])/5
my_target_speed = 70 / self.frame_rate
if my_current_v_per_step > 100 / self.frame_rate:
my_current_v_per_step = 10 / self.frame_rate
org_pose = current_state['predicting']['original_trajectory'][agent_id]['pose'].copy()
# for non-vehicle types agent, skip
if int(current_state['agent'][agent_id]['type']) not in self.vehicle_types:
continue
# rst = prediction_traj_dic_m[agent_id]['rst']
# score = np.exp(prediction_traj_dic_m[agent_id]['score'])
# score /= np.sum(score)
# best_idx = np.argmax(score)
# prediction_traj_m = rst[best_idx]
# use_rules = 0 # 0=hybird, 1=use rules only
# info: always use rules for env agents
use_rules = not self.follow_prediction_traj
if use_rules:
# past_goal = self.check_past_goal(traj=current_state['agent'][agent_id]['pose'],
# current_idx=current_frame_idx,
# current_state=current_state,
# agent_id=agent_id)
my_traj, _ = self.get_reroute_traj(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
else:
routed_traj, _ = self.get_reroute_traj(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
marginal_trajs = current_state['predicting']['marginal_trajectory'][agent_id]['rst'][0]
x_dist = []
for r_p in routed_traj[:50, :2]:
line_dist = []
for m_p in marginal_trajs[:50, :2]:
dist = euclidean_distance(r_p, m_p)
line_dist.append(dist)
x_dist.append(min(line_dist))
minimal_distance = max(x_dist)
if True:
# if minimal_distance < 3:
my_traj = marginal_trajs
else:
my_traj = routed_traj
# current_state['predicting']['routed_trajectory'][agent_id]
# if False:
# # use prediction trajectory
# target_lanes = org_pose
# if agent_id in current_state['lanes_traveled']:
# lane_traveled_list = current_state['lanes_traveled'][agent_id]
# if len(lane_traveled_list) > 0:
# for i, each_lane_id in enumerate(lane_traveled_list):
# if i == 0:
# target_lanes = current_state['road'][each_lane_id]['xyz'][:, :2].copy()
# else:
# target_lanes = np.concatenate(
# (target_lanes, current_state['road'][each_lane_id]['xyz'][:, :2])).copy()
# prediction_traj_m, follow_org = self.select_trajectory_from_prediction(prediction_traj_dic_m, agent_id,
# goal_point,
# original_trajectory=target_lanes, #org_pose,
# remaining_frames=min(10, total_time_frame - current_frame_idx),
# follow_goal=
# current_state['predicting'][
# 'follow_goal'][
# agent_id],
# follow_original_as_default=follow_org_as_default)
# assert prediction_traj_m is not None, f'{agent_id} / {relevant_agents}'
# action = 0 # 0=No Action, 1=Follow, 2=Yield
# my_traj = prediction_traj_m.copy()
# detect trajectory collisions
# after collision detection, we have earliest_collision_idx, earliest_target_id, latest_collision_idx(for that earliest collision detected
my_interpolator = SudoInterpolator(my_traj.copy(), my_current_pose)
interpolated_trajectory = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
agent_id=agent_id)
my_interpolator = SudoInterpolator(interpolated_trajectory.copy(), my_current_pose)
earliest_collision_idx = None
earliest_target_agent = None
collision_point = None
traffic_light_ending_pts = self.get_traffic_light_collision_pts(current_state=current_state,
current_frame_idx=current_frame_idx)
tl_checked = False
running_red_light = False
if self.method_testing < 1:
continue
# check collisions for ego from frame 1 of the prediction trajectory
ego_index_checking = 1 # current_frame_idx+1
collision_detected_now = False
latest_collision_id = None
end_checking_frame = np.clip(current_frame_idx + REACTION_AFTER, 0, total_time_frame)
end_checking_frame = min(end_checking_frame, current_frame_idx+self.planning_horizon)
# pack an Agent object for collision detection
my_reactors = []
for i in range(current_frame_idx, end_checking_frame):
ego_index_checking = i - current_frame_idx
ego_pose2_valid = False
if i - current_frame_idx > 0:
ego_pose2 = interpolated_trajectory[ego_index_checking - 1]
if abs(ego_pose2[0]) < 1.1 and abs(ego_pose2[1]) < 1.1:
pass
else:
ego_agent2 =Agent(x=(ego_pose2[0] + ego_pose[0]) / 2,
y=(ego_pose2[1] + ego_pose[1]) / 2,
yaw=get_angle_of_a_line(ego_pose2[:2], ego_pose[:2]),
length=euclidean_distance(ego_pose2[:2], ego_pose[:2]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
ego_pose2_valid = True
for each_other_agent in agents:
if each_other_agent == agent_id:
continue
if each_other_agent in my_reactors:
continue
if current_state['agent'][each_other_agent]['shape'][0][1] == -1:
continue
if ego_index_checking >= interpolated_trajectory.shape[0]:
continue
ego_pose = interpolated_trajectory[ego_index_checking, :] # ego start checking from frame 0
if abs(ego_pose[0]) < 1.1 and abs(ego_pose[1]) < 1.1:
# print("WARNING invalid pose for collision detection: ", pose_in_pred)
continue
ego_agent =Agent(x=ego_pose[0],
y=ego_pose[1],
yaw=ego_pose[3],
length=max(1, current_state['agent'][agent_id]['shape'][0][1]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
# check traffic light violation
for tl_pt in traffic_light_ending_pts:
dummy_tf_agent = Agent(x=tl_pt[0], y=tl_pt[1], yaw=0,
length=TRAFFIC_LIGHT_COLLISION_SIZE,
width=TRAFFIC_LIGHT_COLLISION_SIZE, agent_id=99999)
running = utils.check_collision(
checking_agent=ego_agent,
target_agent=dummy_tf_agent)
if ego_pose2_valid:
running |= utils.check_collision(
checking_agent=ego_agent2,
target_agent=dummy_tf_agent)
if running:
running_red_light = True
earliest_collision_idx = ego_index_checking
collision_point = [ego_pose[0], ego_pose[1]]
earliest_target_agent = 99999
target_speed = 0
# break collision detection
break
if running_red_light:
to_yield = True
break
each_other_agent_pose_array = current_state['agent'][each_other_agent]['pose']
target_current_pose = each_other_agent_pose_array[i]
target_agent =Agent(x=target_current_pose[0],
y=target_current_pose[1],
yaw=target_current_pose[3],
length=max(1, current_state['agent'][each_other_agent]['shape'][0][1]),
width=max(1, current_state['agent'][each_other_agent]['shape'][0][0]),
agent_id=each_other_agent)
has_collision = utils.check_collision(checking_agent=ego_agent,
target_agent=target_agent)
if ego_pose2_valid:
has_collision |= utils.check_collision(checking_agent=ego_agent2,
target_agent=target_agent)
to_yield = False
if has_collision:
to_yield = True
# solve this conflict
found_in_loaded = False
if self.follow_loaded_relation:
detected_relation = []
for edge in current_state['edges']:
if agent_id == edge[0] and each_other_agent == edge[1]:
to_yield = False
found_in_loaded = True
break
current_state['predicting']['relation'] += [agent_id, each_other_agent]
if not found_in_loaded:
# FORWARD COLLISION CHECKINGS
target_pose_0 = each_other_agent_pose_array[current_frame_idx]
target_agent_0 =Agent(x=target_pose_0[0],
y=target_pose_0[1],
yaw=target_pose_0[3],
length=max(1, current_state['agent'][each_other_agent]['shape'][0][1]),
width=max(1, current_state['agent'][each_other_agent]['shape'][0][0]),
agent_id=each_other_agent)
collision_0 = utils.check_collision(ego_agent, target_agent_0)
if ego_pose2_valid:
collision_0 |= utils.check_collision(ego_agent2, target_agent_0)
if collision_0:
# yield
detected_relation = [[each_other_agent, agent_id]]
else:
# FCC backwards
ego_agent_0 =Agent(
x=interpolated_trajectory[0, 0],
y=interpolated_trajectory[0, 1],
yaw=interpolated_trajectory[0, 3],
length=max(1, current_state['agent'][agent_id]['shape'][0][1]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
collision_back = utils.check_collision(ego_agent_0, target_agent)
if collision_back:
# not yield
detected_relation = [[agent_id, each_other_agent]]
else:
# check relation
self.online_predictor.relation_pred_onetime(each_pair=[agent_id, each_other_agent],
current_frame=current_frame_idx,
clear_history=True,
current_data=current_state)
detected_relation = current_state['predicting']['relation']
# data to save
if 'relations_per_frame_env' not in current_state['predicting']:
current_state['predicting']['relations_per_frame_env'] = {}
for dt in range(self.planning_interval):
if (current_frame_idx + dt) not in current_state['predicting']['relations_per_frame_env']:
current_state['predicting']['relations_per_frame_env'][current_frame_idx + dt] = []
current_state['predicting']['relations_per_frame_env'][current_frame_idx + dt] += detected_relation
if [agent_id, each_other_agent] in detected_relation:
if [each_other_agent, agent_id] in detected_relation:
# bi-directional relations, still yield
pass
else:
my_reactors.append(each_other_agent)
to_yield = False
if to_yield:
earliest_collision_idx = ego_index_checking
collision_point = [ego_pose[0], ego_pose[1]]
earliest_target_agent = each_other_agent
if abs(each_other_agent_pose_array[i, 0] + 1) < 0.1 or abs(each_other_agent_pose_array[i-5, 0] + 1) < 0.1:
target_speed = 0
else:
target_speed = euclidean_distance(each_other_agent_pose_array[i, :2], each_other_agent_pose_array[i-5, :2]) / 5
break
if earliest_collision_idx is not None:
break
if earliest_collision_idx is not None or self.method_testing < 2:
distance_to_travel = my_interpolator.get_distance_with_index(earliest_collision_idx) - S0
stopping_point = my_interpolator.interpolate(max(0, distance_to_travel - S0))[:2]
if euclidean_distance(interpolated_trajectory[0, :2],
stopping_point) < MINIMAL_DISTANCE_TO_TRAVEL or distance_to_travel < MINIMAL_DISTANCE_TO_TRAVEL or my_current_v_per_step < 0.1:
planed_traj = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
desired_speed=my_target_speed,
emergency_stop=True)
agents_dic_copy[agent_id]['action'] = 'stop'
else:
planed_traj = self.adjust_speed_for_collision(interpolator=my_interpolator,
distance_to_end=distance_to_travel,
current_v=my_current_v_per_step,
end_point_v=min(my_current_v_per_step * 0.8,
target_speed))
assert len(planed_traj.shape) > 1, planed_traj.shape
agents_dic_copy[agent_id]['action'] = 'yield'
# print("Yielding log: ", agent_id, each_other_agent, earliest_target_agent, earliest_collision_idx, distance_to_travel)
else:
# no conflicts to yield
if euclidean_distance(interpolated_trajectory[0, :2], interpolated_trajectory[-1, :2]) < MINIMAL_DISTANCE_TO_TRAVEL:
planed_traj = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
desired_speed=my_target_speed,
hold_still=True)
else:
planed_traj = interpolated_trajectory
agents_dic_copy[agent_id]['action'] = 'controlled'
if self.test_task == 1:
plan_for_ego = True
if not plan_for_ego and ego_id == agent_id:
agents_dic_copy[agent_id]['action'] = None
else:
if self.test_task != 2:
if collision_point is not None:
current_state['predicting']['points_to_mark'].append(collision_point)
current_state['predicting']['trajectory_to_mark'].append(planed_traj)
# if agent_id == 181:
# for each_traj in prediction_traj_dic_m[agent_id]['rst']:
# current_state['predicting']['trajectory_to_mark'].append(each_traj)
# replace the trajectory
planning_horizon, _ = planed_traj.shape
agents_dic_copy[agent_id]['pose'][current_frame_idx:planning_horizon+current_frame_idx, :] = planed_traj[:total_time_frame - current_frame_idx, :]
current_state['agent'] = agents_dic_copy
return current_state
def trajectory_from_cubic_BC(self, p1, p2, p3, p4, v):
# form a Bezier Curve
total_dist = utils.euclidean_distance(p4, p1)
total_t = min(93, int(total_dist/max(1, v)))
traj_to_return = []
for i in range(total_t):
if i >= 92:
break
t = (i+1)/total_t
p0_x = pow((1 - t), 3) * p1[0]
p0_y = pow((1 - t), 3) * p1[1]
p1_x = 3 * pow((1 - t), 2) * t * p2[0]
p1_y = 3 * pow((1 - t), 2) * t * p2[1]
p2_x = 3 * (1 - t) * pow(t, 2) * p3[0]
p2_y = 3 * (1 - t) * pow(t, 2) * p3[1]
p3_x = pow(t, 3) * p4[0]
p3_y = pow(t, 3) * p4[1]
traj_to_return.append((p0_x+p1_x+p2_x+p3_x, p0_y+p1_y+p2_y+p3_y))
return np.array(traj_to_return, ndmin=2)
def select_trajectory_from_prediction(self, prediction_dic, agent_id, goal_point, original_trajectory,
remaining_frames, follow_goal=False, follow_original_as_default=True):
if agent_id not in prediction_dic:
return None
# if always follow original as default
if follow_original_as_default:
follow_original = True
else:
follow_original = False
rst = prediction_dic[agent_id]['rst']
score = np.exp(prediction_dic[agent_id]['score'])
score /= np.sum(score)
if isinstance(rst, type([])):
total_rst = len(rst)
else:
total_rst = rst.shape[0]
if self.method_testing < 0:
# SimNet variety does not follow original path
return rst[0], False
if follow_original:
# select the closest prediction and return
distance = np.zeros_like(score)
for i in range(total_rst):
distance[i] = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, rst[i], remaining_frames)
best_idx = np.argmax(score/distance)
else:
best_idx = np.argmax(score)
follow_goal = False
return rst[best_idx], follow_goal
# if follow_goal:
# distance = np.zeros_like(score)
# for i in range(total_rst):
# distance[i] = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, rst[i], remaining_frames)
# if min(distance) > MAX_DEVIATION_FOR_PREDICTION and remaining_frames > 5:
# follow_original = True
# best_idx = np.argmax(score/distance)
# else:
# best_idx = np.argmax(score)
#
# distance_from_current_pose = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, [rst[best_idx, 0, :]], remaining_frames)
# current_v = euclidean_distance(rst[best_idx, 0, :2], rst[best_idx, 1, :2])
# if distance_from_current_pose > current_v:
# # too far to project back
# follow_original = False
# yaw_diff = utils.normalize_angle(original_trajectory[0, 3] - original_trajectory[-1, 3])
# if abs(yaw_diff) < math.pi/180*45:
# if current_v < MINIMAL_SPEED_TO_TRACK_ORG_GOAL:
# follow_original = False
# elif follow_goal:
# follow_original = True
#
# return rst[best_idx], follow_original
def get_l2_regulate_distance_for_two_trajectories(self, original_trajectory, compared_trajectory, comparing_frames):
distance = []
for idx1, each_pose in enumerate(compared_trajectory):
if idx1 > comparing_frames:
break
distances_across = []
for idx2, each_in_org in enumerate(original_trajectory):
l2 = euclidean_distance(each_pose[:2], each_in_org[:2])
distances_across.append(l2)
distance.append(min(distances_across))
# return distance
return max(distance)
def get_rescale_trajectory(self, reactor_current_pose, reactor_traj, reactor_interpolator, scale, debug=False,
current_v_per_step=None, constant_speed=False, current_a_per_step=None, target_speed=7,
follow_lanes=False):
total_time = min(150, reactor_traj.shape[0])
traj_to_return = np.zeros([total_time, 4])
total_distance_traveled = []
if current_v_per_step is not None:
current_v = current_v_per_step
else:
current_v = euclidean_distance(reactor_current_pose[:2], reactor_traj[0, :2])
for i in range(total_time):
if constant_speed:
if current_a_per_step is None:
dist = current_v
else:
current_v += max(-A_SLOWDOWN_DESIRE/self.frame_rate, min(A_SPEEDUP_DESIRE/self.frame_rate, current_a_per_step))
current_v = max(0, current_v)
dist = current_v
else:
if i == 0:
dist = utils.euclidean_distance(reactor_current_pose[:2], reactor_traj[i, :2])*scale
else:
dist = utils.euclidean_distance(reactor_traj[i-1, :2], reactor_traj[i, :2])*scale
if dist > current_v + A_SPEEDUP_DESIRE/self.frame_rate:
current_v += A_SPEEDUP_DESIRE/self.frame_rate
current_v = min(target_speed, current_v)
dist = current_v
elif dist < current_v - A_SLOWDOWN_DESIRE/self.frame_rate:
current_v -= A_SLOWDOWN_DESIRE/self.frame_rate
current_v = max(0, current_v)
dist = current_v
total_distance_traveled.append(dist)
total_distance_traveled = np.cumsum(total_distance_traveled)
for i in range(len(total_distance_traveled)):
traj_to_return[i, :] = reactor_interpolator.interpolate(total_distance_traveled[i], debug=debug)
return traj_to_return
def filter_trajectory_after_goal_point(self, traj, goal_point):
last_pose = None
last_distance = 999999
traj_to_returen = traj.copy()
for idx, each_pose in enumerate(traj):
if last_pose is not None:
traj_to_returen[idx, :] = last_pose
continue
next_distance = euclidean_distance(each_pose[:2], goal_point)
if next_distance < last_distance + 0.001:
last_distance = next_distance
else:
last_pose = each_pose
return traj_to_returen
def get_action(self):
return 0
def assert_traj(self, traj):
total_time, _ = traj.shape
if total_time < 30:
return -1
for i in range(total_time):
if i == 0:
continue
if i >= total_time - 3 or i >= 20:
break
dist_1 = euclidean_distance(traj[6+i, :2], traj[1+i, :2]) / 5
dist_2 = euclidean_distance(traj[5+i, :2], traj[i, :2]) / 5
if abs(dist_1 - dist_2) > 5.0/self.frame_rate:
print("Warning: frame jumping at: ", i, abs(dist_1 - dist_2))
return i
return -1
class SudoInterpolator:
def __init__(self, trajectory, current_pose):
self.trajectory = trajectory
self.current_pose = current_pose
def interpolate(self, distance: float, starting_from=None, debug=False):
if starting_from is not None:
assert False, 'not implemented'
else:
pose = self.trajectory.copy()
if distance <= MINIMAL_DISTANCE_PER_STEP:
return self.current_pose
if pose.shape is None or len(pose.shape) < 2:
return self.current_pose
total_frame, _ = pose.shape
# assert distance > 0, distance
distance_input = distance
for i in range(total_frame):
if i == 0:
pose1 = self.current_pose[:2]
pose2 = pose[0, :2]
else:
pose1 = pose[i - 1, :2]
pose2 = pose[i, :2]
next_step = euclidean_distance(pose1, pose2)
if debug:
print(f"{i} {next_step} {distance} {total_frame} {self.current_pose}")
if next_step >= MINIMAL_DISTANCE_PER_STEP:
if distance > next_step and i != total_frame - 1:
distance -= next_step
continue
else:
return self.get_state_from_poses(pose1, pose2, distance, next_step)
# x = (pose2[0] - pose1[0]) * distance / next_step + pose1[0]
# y = (pose2[1] - pose1[1]) * distance / next_step + pose1[1]
# yaw = utils.normalize_angle(get_angle_of_a_line(pt1=pose1, pt2=pose2))
# return [x, y, 0, yaw]
if distance_input - 2 > distance:
# hide it outshoot
# logging.warning(f'Over shooting while planning!!!!!!!!!')
return self.get_state_from_poses(pose1, pose2, distance, next_step)
else:
# return current pose if trajectory not moved at all
return self.current_pose
# pose1 = self.current_pose[:2]
# pose2 = pose[0, :2]
# return self.get_state_from_poses(pose1, pose2, 0, 0.001)
def get_state_from_poses(self, pose1, pose2, mul, divider):
x = (pose2[0] - pose1[0]) * mul / (divider + 0.0001) + pose1[0]
y = (pose2[1] - pose1[1]) * mul / (divider + 0.0001) + pose1[1]
yaw = utils.normalize_angle(get_angle_of_a_line(pt1=pose1, pt2=pose2))
return [x, y, 0, yaw]
def get_distance_with_index(self, index: int):
distance = 0
if index != 0:
pose = self.trajectory.copy()
total_frame, _ = pose.shape
for i in range(total_frame):
if i >= index != -1:
# pass -1 to travel through all indices
break
elif i == 0:
step = euclidean_distance(self.current_pose[:2], pose[i, :2])
else:
step = euclidean_distance(pose[i, :2], pose[i-1, :2])
if step > MINIMAL_DISTANCE_PER_STEP:
distance += step
return distance
def get_speed_with_index(self, index: int):
if index != 0:
p_t = self.trajectory[index, :2]
p_t1 = self.trajectory[index - 1, :2]
speed_per_step = utils.euclidean_distance(p_t, p_t1)
return speed_per_step
else:
return None
class Agent(car.Agent):
def yaw_changer(self, yaw):
return change_axis(-yaw)
| Tsinghua-MARS-Lab/InterSim | simulator/plan/env_planner.py | env_planner.py | py | 107,809 | python | en | code | 119 | github-code | 6 |
21928800747 | # Coroutines
import time
def coroutine():
time.sleep(3)
text = "Hey guys! Welcome to Parallax Coders. How are you ? Have a great day"
while True:
checking_text = (yield)
if checking_text in text:
print("Your word has been found !")
else :
print("Your book has not been found !")
searcher = coroutine()
print("Initializing function...")
searcher.__next__()
searcher.send("guys")
print("Sent !")
searcher.send("Parallax Coders")
print("Sent again !")
searcher.send("How")
print("Again sent !")
searcher.close()
| MahbinAhmed/Learning | Python/Python Learning/Revision/56. Coroutines.py | 56. Coroutines.py | py | 573 | python | en | code | 0 | github-code | 6 |
18003867595 | import torch
import torch.nn as nn
import torch.nn.functional as F
from algo.pn_utils.maniskill_learn.networks import build_model, hard_update, soft_update
from algo.pn_utils.maniskill_learn.optimizers import build_optimizer
from algo.pn_utils.maniskill_learn.utils.data import to_torch
from ..builder import MFRL
from algo.pn_utils.maniskill_learn.utils.torch import BaseAgent
@MFRL.register_module()
class TD3(BaseAgent):
def __init__(self, policy_cfg, value_cfg, obs_shape, action_shape, action_space, batch_size=128, gamma=0.99,
update_coeff=0.005, action_noise=0.2, noise_clip=0.5, policy_update_interval=2):
super(TD3, self).__init__()
policy_optim_cfg = policy_cfg.pop("optim_cfg")
value_optim_cfg = value_cfg.pop("optim_cfg")
self.gamma = gamma
self.batch_size = batch_size
self.update_coeff = update_coeff
self.policy_update_interval = policy_update_interval
self.action_noise = action_noise
self.noise_clip = noise_clip
policy_cfg['obs_shape'] = obs_shape
policy_cfg['action_shape'] = action_shape
policy_cfg['action_space'] = action_space
value_cfg['obs_shape'] = obs_shape
value_cfg['action_shape'] = action_shape
self.policy = build_model(policy_cfg)
self.critic = build_model(value_cfg)
self.target_policy = build_model(policy_cfg)
self.target_critic = build_model(value_cfg)
hard_update(self.target_critic, self.critic)
hard_update(self.target_policy, self.policy)
self.policy_optim = build_optimizer(self.policy, policy_optim_cfg)
self.critic_optim = build_optimizer(self.critic, value_optim_cfg)
def update_parameters(self, memory, updates):
sampled_batch = memory.sample(self.batch_size)
sampled_batch = to_torch(sampled_batch, dtype='float32', device=self.device, non_blocking=True)
for key in sampled_batch:
if not isinstance(sampled_batch[key], dict) and sampled_batch[key].ndim == 1:
sampled_batch[key] = sampled_batch[key][..., None]
with torch.no_grad():
_, _, next_mean_action, _, _ = self.target_policy(sampled_batch['next_obs'], mode='all')
noise = (torch.randn_like(next_mean_action) * self.action_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = self.target_policy['policy_head'].clamp_action(next_mean_action + noise)
q_next_target = self.target_critic(sampled_batch['next_obs'], next_action)
min_q_next_target = torch.min(q_next_target, dim=-1, keepdim=True).values
q_target = sampled_batch['rewards'] + (1 - sampled_batch['dones']) * self.gamma * min_q_next_target
q = self.critic(sampled_batch['obs'], sampled_batch['actions'])
critic_loss = F.mse_loss(q, q_target.repeat(1, q.shape[-1])) * q.shape[-1]
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
if updates % self.policy_update_interval == 0:
policy_loss = -self.critic(sampled_batch['obs'], self.policy(sampled_batch['obs'], mode='eval'))[
..., 0].mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
soft_update(self.target_critic, self.critic, self.update_coeff)
soft_update(self.target_policy, self.policy, self.update_coeff)
else:
policy_loss = torch.zeros(1)
return {
'critic_loss': critic_loss.item(),
'q': torch.min(q, dim=-1).values.mean().item(),
'q_target': torch.mean(q_target).item(),
'policy_loss': policy_loss.item(),
}
| PKU-EPIC/UniDexGrasp | dexgrasp_policy/dexgrasp/algo/pn_utils/maniskill_learn/methods/mfrl/td3.py | td3.py | py | 3,767 | python | en | code | 63 | github-code | 6 |
34421435243 | import numpy as np
import pandas as pd
import json
import argparse
import catboost
from catboost import CatBoostClassifier, Pool, metrics, cv
from catboost.utils import get_roc_curve, get_confusion_matrix, eval_metric
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def build_model(**kwargs):
model = CatBoostClassifier(
custom_loss=[metrics.Accuracy()],
**kwargs
)
return model
def plot_roc_curves(model_list, X, Y, labels, cat_features_indices):
k = len(model_list)
assert(k <= 3)
color_list = ['blue', 'green', 'red']
plt.title('ROC for models')
for m, color, label in zip(model_list, color_list[:k], labels[:k]):
fpr, tpr, _ = get_roc_curve(m, Pool(X, Y, cat_features=cat_features_indices))
plt.plot(fpr, tpr, color=color, label=label, linewidth=0.5)
plt.legend()
plt.grid(True, linewidth=0.75)
plt.savefig('roc_curve_result.png', dpi=150)
def stats(model_list, X_test, Y_test, cat_features_indices):
auc_scores = []
print('Models info:')
for k in range(0, len(model_list)):
m = model_list[k]
pr_prob = m.predict_proba(X_test)
pr = m.predict(X_test)
ans = Y_test.to_numpy()
check = [True if i==j else False for i, j in zip(ans, pr)]
cm = get_confusion_matrix(m, Pool(X_test, Y_test, cat_features=cat_features_indices))
auc_scores.append(eval_metric(ans, pr_prob, 'AUC')[0])
print(f'\nModel {k} confusion_matrix:\n', cm)
print('AUC:', auc_scores[k])
print(f'Correct predictions: {check.count(True)}/{Y_test.shape[0]}\n')
print(m.get_feature_importance(prettified=True))
return auc_scores
def cv_models(model_list, X, Y, cat_features_indices):
print('CV:')
for k in range(0, len(model_list)):
m = model_list[k]
cv_params = m.get_params()
cv_params.update(
{'loss_function': metrics.Logloss()
})
cv_data = cv(Pool(X, Y, cat_features = cat_features_indices), cv_params, logging_level='Silent')
best_step = np.argmax(cv_data['test-Accuracy-mean'])
print('- Mean: ', cv_data['test-Accuracy-mean'][best_step])
print('- Std: ', cv_data['test-Accuracy-std'][best_step])
print('- Best step: ', best_step)
def split_dataset(df):
seed = 123
Y = df['class']
X = df.drop(['class','score'], axis=1)
X_train, X_, Y_train, Y_ = train_test_split(X, Y, train_size=0.8, random_state=seed)
X_val, X_test, Y_val, Y_test = train_test_split(X_, Y_, train_size=0.5, random_state=seed)
return X_train, X_val, X_test, Y_train, Y_val, Y_test
def build_dataset(dataset_filename):
df = pd.read_csv(dataset_filename, index_col=0)
threshold_score = 50
df['class'] = df['score']//threshold_score
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_file', type=str, required=True)
parser.add_argument('--model_params_file', type=str, required=True)
args = parser.parse_args()
dataset_filename = args.dataset_file
models_params_filename = args.model_params_file
df = build_dataset(dataset_filename)
X_train, X_val, X_test, Y_train, Y_val, Y_test = split_dataset(df)
cat_features_indices = np.where(X_test.dtypes != float)[0]
labels, params, models = [], {}, []
with open(models_params_filename, 'r') as f:
params = json.load(f)
labels = list(params.keys())
for label in labels:
models.append(build_model(**params[label]))
for m in models:
m.fit(X_train, Y_train, eval_set=(X_val, Y_val), cat_features=cat_features_indices)
auc_scores = stats(models, X_test, Y_test, cat_features_indices)
for i in range(0, len(labels)):
labels[i] = labels[i] + f': AUC={auc_scores[i]:.3f}'
plot_roc_curves(models, X_test, Y_test, labels, cat_features_indices)
cv_models(models, X_train, Y_train, cat_features_indices)
| mihael-tunik/SteppingStonesCatboost | classifier.py | classifier.py | py | 4,248 | python | en | code | 0 | github-code | 6 |
23185192152 | #!/usr/bin/env python3
"""
Download the name of all games in the bundle.
Download their info and scrore from opencritic if they exist there.
Sort by score.
"""
import json
import urllib.request
import urllib.parse
from typing import List
from bs4 import BeautifulSoup
from typing_extensions import TypedDict
Game = TypedDict(
"Game",
{
"name": str,
"itch": str,
"opencritic": str,
"steam": str,
"score": int,
"correct": float,
"description": str,
"genres": List[str],
},
)
def get_game_list() -> List[Game]:
"""
Get the game list from the bundle.
"""
# bundle_url = "https://itch.io/b/520/bundle-for-racial-justice-and-equality"
# As I need to scroll to the bottom of the page I just used javascript to do
# that and saved the resulted html into a file.
with open("itchio_520.html", "r",) as inf:
soup = BeautifulSoup(inf, "html.parser")
games: List[Game] = []
games_soup = soup.find_all("div", class_="index_game_cell_widget game_cell")
for game in games_soup:
info = game.find("div", class_="label").a.attrs
games.append(
{
"name": info["title"],
"itch": info["href"],
"opencritic": "",
"steam": "",
"score": -1,
"correct": -1,
"description": "",
"genres": [],
}
)
return games
def get_opencritic_info(games: List[Game]) -> List[Game]:
"""
Get information from opencritic regarding the game.
"""
url_api_search = "https://api.opencritic.com/api/game/search?"
url_api_game = "https://api.opencritic.com/api/game/{}"
url_opencritic = "https://www.opencritic.com/game/{}/{}"
url_steam = "https://store.steampowered.com/app/{}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0"
}
for game in games:
print("Getting info for: {}".format(game["name"]))
# Search for the game, Assume the first respond is the correct one
url1 = url_api_search + urllib.parse.urlencode({"criteria": game["name"]})
request1 = urllib.request.Request(url1, None, headers)
respond1 = json.loads(
urllib.request.urlopen(request1).read().decode("utf-8")
)[0]
# From 0.2 to 0.3 some games were correct some not
if respond1["dist"] > 0.3:
continue
# Get game info
url2 = url_api_game.format(respond1["id"])
request2 = urllib.request.Request(url2, None, headers)
respond2 = json.loads(
urllib.request.urlopen(request2).read().decode("utf-8")
)
game["correct"] = respond1["dist"]
game["score"] = respond2.get("medianScore", -1)
game["opencritic"] = url_opencritic.format(respond1["id"], respond1["name"])
game["steam"] = url_steam.format(respond2.get("steamId", ""))
game["description"] = respond2.get("description", "")
game["genres"] = [val["name"] for val in respond2.get("Genres", [])]
return games
def sort_by_score(games: List[Game]) -> List[Game]:
"""
Sort the games by score.
"""
games = sorted(games, key=lambda k: k['score'], reverse=True)
return games
if __name__ == "__main__":
print("Getting the game list")
my_games = get_game_list()
with open("all_games.json", "w") as outf:
json.dump(my_games, outf, indent=2)
print("Getting info from opencritic")
my_games = get_opencritic_info(my_games)
print("Sorting games")
my_games = sort_by_score(my_games)
with open("games.json", "w") as outf:
json.dump(my_games, outf, indent=2)
| Hyerfatos/itchio_bundle_games | itch.py | itch.py | py | 3,789 | python | en | code | 0 | github-code | 6 |
26039112676 | from __future__ import annotations
from dataclasses import dataclass
from pants.core.goals.package import BuiltPackageArtifact
from pants.util.strutil import bullet_list, pluralize
@dataclass(frozen=True)
class BuiltDockerImage(BuiltPackageArtifact):
# We don't really want a default for this field, but the superclass has a field with
# a default, so all subsequent fields must have one too. The `create()` method below
# will ensure that this field is properly populated in practice.
image_id: str = ""
tags: tuple[str, ...] = ()
@classmethod
def create(
cls, image_id: str, tags: tuple[str, ...], metadata_filename: str
) -> BuiltDockerImage:
tags_string = tags[0] if len(tags) == 1 else f"\n{bullet_list(tags)}"
return cls(
image_id=image_id,
tags=tags,
relpath=metadata_filename,
extra_log_lines=(
f"Built docker {pluralize(len(tags), 'image', False)}: {tags_string}",
f"Docker image ID: {image_id}",
),
)
| pantsbuild/pants | src/python/pants/backend/docker/package_types.py | package_types.py | py | 1,072 | python | en | code | 2,896 | github-code | 6 |
10159706498 | # 6) Write a Script to sum of prime numbers in a given number
number = int(input("Enter a no:"))
sum = 0
while number != 0:
rem = number % 10
number = number // 10
if rem != 4 and rem != 6 and rem != 8 and rem != 9:
sum = sum + rem
print("the sum is:", sum)
| suchishree/django_assignment1 | python/looping/while loop/demo8.py | demo8.py | py | 280 | python | en | code | 0 | github-code | 6 |
35708774977 | import random
def local_search(items, capacity):
"""
Solves the knapsack problem using a local search approach.
Args:
items: A list of items, where each item is a tuple of (value, weight).
capacity: The capacity of the knapsack.
Returns:
A list of items that are included in the knapsack.
"""
solution = []
for i in range(len(items)):
solution.append(items[i])
while True:
best_solution = solution
for i in range(len(items)):
new_solution = solution[:]
new_solution.remove(items[i])
new_solution.append(items[random.randint(0, len(items) - 1)])
if evaluate_solution(new_solution, capacity) > evaluate_solution(best_solution, capacity):
best_solution = new_solution
if best_solution == solution:
break
solution = best_solution
return solution
def evaluate_solution(solution, capacity):
"""
Evaluates a solution to the knapsack problem.
Args:
solution: A list of items that are included in the knapsack.
capacity: The capacity of the knapsack.
Returns:
The value of the solution.
"""
value = 0
weight = 0
for item in solution:
value += item[0]
weight += item[1]
if weight > capacity:
return 0
return value
def main():
items = [(10, 5), (20, 10), (30, 15)]
capacity = 20
solution = local_search(items, capacity)
print("The following items are included in the knapsack:")
for item in solution:
print(item)
if __name__ == "__main__":
main()
# This code first defines a function called local_search(). This function takes two arguments: items and capacity. items is a list of items, where each item is a tuple of (value, weight). capacity is the capacity of the knapsack.
#The local_search() function then works by iteratively improving the solution. At each iteration, the function randomly removes one item from the solution and then randomly adds another item to the solution. The function then evaluates the new solution and keeps it if it is better than the old solution.
#The local_search() function then returns the best solution that it finds.
#The evaluate_solution() function works by calculating the value of a solution. The value of a solution is the sum of the values of the items that are included in the solution.
#The main() function then calls the local_search() function with the items and capacity. The main() function then prints the list of items that are included in the best solution.
#To run this code, you can save it as a Python file and then run it from the command line. For example, if you save the code as local_search.py, you can run it by typing the following command into the command line:
| Jonathanseng/Algorithm-Design-Methods | 13. Local Search/13.7 Local Search Pattern.py | 13.7 Local Search Pattern.py | py | 2,691 | python | en | code | 3 | github-code | 6 |
28773188393 | """
examples
@when('the user searches for "{phrase}"')
def step_impl(context, phrase):
search_input = context.browser.find_element_by_name('q')
search_input.send_keys(phrase + Keys.RETURN)
@then('results are shown for "{phrase}"')
def step_impl(context, phrase):
links_div = context.browser.find_element_by_id('links')
assert len(links_div.find_elements_by_xpath('//div')) > 0
search_input = context.browser.find_element_by_name('q')
assert search_input.get_attribute('value') == phrase
"""
import time
from behave import *
from selenium.common.exceptions import (NoAlertPresentException,
NoSuchElementException,
TimeoutException)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as cond
from selenium.webdriver.support.ui import WebDriverWait
ROCKETMILES_HOME = "https://www.rocketmiles.com"
LOCATION_TO_SELECT = "Peoria, IL, USA"
OPENING_MODAL_CLOSE_XPATH = '//div[@id="new-sign-up-modal"]//button[@class="close"]'
SUBMIT_BUTTON = (
'//button[@class="rm-btn-orange search-submit-btn"]/span[@class="ng-scope"]'
)
@given("a web browser is at the rocketmiles home page")
def step_impl(context):
context.browser.get(ROCKETMILES_HOME)
time.sleep(20)
try:
# Remove initial modal if present
context.browser.find_element(By.XPATH, '//div[@id="new-sign-up-modal"]')
context.browser.find_element(By.XPATH, OPENING_MODAL_CLOSE_XPATH).click()
except NoSuchElementException:
# This modal is only displayed on first startup so
# we don't expect to see it for subsequent tests
pass
@given("the location is blank")
def step_impl(context):
location_dropdown = context.browser.find_element(By.NAME, "locationSearch")
location_dropdown.clear()
wait_for_it(context.browser, location_dropdown)
selected_location = location_dropdown.text
assert selected_location == ""
@when("a search is initiated")
def step_impl(context):
submit_btn = context.browser.find_element(By.XPATH, SUBMIT_BUTTON)
click_on_it(context.browser, submit_btn)
time.sleep(4)
@then("the missing location error is shown")
def step_impl(context):
location_error_modal = context.browser.find_element(
By.XPATH, '//div[@class="popover-inner"]//div[@class="popover-content ng-binding"]'
)
wait_for_it(context.browser, location_error_modal)
time.sleep(4)
@given("the reward program is blank")
def step_impl(context):
rewards_dropdown = context.browser.find_element(By.NAME, "programAutosuggest")
rewards_dropdown.clear()
wait_for_it(context.browser, rewards_dropdown)
selected_program = rewards_dropdown.text
assert selected_program == ""
@given("the location is not blank")
def step_impl(context):
location_dropdown = context.browser.find_element(By.NAME, "locationSearch")
location_dropdown.click()
time.sleep(4)
location_dropdown.send_keys(LOCATION_TO_SELECT[:-4])
time.sleep(4)
location_dropdown.send_keys(Keys.ARROW_DOWN + Keys.RETURN)
time.sleep(4)
selected_location = location_dropdown.text
print("selected_location is {}".format(selected_location))
assert selected_location == LOCATION_TO_SELECT
@then("the missing reward program error is shown")
def step_impl(context):
reward_error_modal = context.browser.find_element(
By.XPATH, '//div[@class-="popover-title"]a//div[@class="popover-content"]'
)
wait_for_it(context.browser, reward_error_modal)
@When("blank")
def step_impl(context):
raise NotImplementedError("STEP: blank")
pass
def click_on_it(driver, element):
action = ActionChains(driver)
action.move_to_element(element)
action.click()
action.perform()
def wait_for_it(driver, element):
try:
WebDriverWait(driver, 10).until(cond.visibility_of(element))
except (NoAlertPresentException, TimeoutException) as py_ex:
print("Alert not present")
print(py_ex)
print(py_ex.args)
return element
| kevindvaf/rocketmiles | features/steps/search.py | search.py | py | 4,218 | python | en | code | 0 | github-code | 6 |
20602544780 | from sqlalchemy import create_engine, Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///test.db', echo=True)
Base = declarative_base(engine)
########################################################################
class CompanyModel(Base):
""""""
__tablename__ = 'COMPANY'
# __table_args__ = {'autoload':True} # when auto is enabled manual mapping is not required
############BEGIN MANUAL MAPPING ################################################
ID = Column(Integer, primary_key=True)
NAME = Column(String)
AGE = Column(Integer)
ADDRESS = Column(String)
SALARY = Column(Float)
def __init__(self, id, name, age, addreess, salary):
self.id = id
self.name = name
self.age = age
self.address = address
self.salary = salary
def __repr__(self):
return "<Company - '%s': '%s' >" % (self.id, self.name )
# ######################END MANUAL MAPPING #############################################
#----------------------------------------------------------------------
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
if __name__ == "__main__":
session = loadSession()
res = session.query(CompanyModel).all()
for i in res:
print (i.NAME+' '+ str(i.AGE) + ' ' + i.ADDRESS + ' ' + str(i.SALARY))
'''
could also do it using mapper
from sqlalchemy import create_engine, Column, MetaData, Table
from sqlalchemy import Integer, String, Text
from sqlalchemy.orm import mapper, sessionmaker
class Bookmarks(object):
pass
#----------------------------------------------------------------------
def loadSession():
""""""
dbPath = 'places.sqlite'
engine = create_engine('sqlite:///%s' % dbPath, echo=True)
metadata = MetaData(engine)
moz_bookmarks = Table('moz_bookmarks', metadata,
Column('id', Integer, primary_key=True),
Column('type', Integer),
Column('fk', Integer),
Column('parent', Integer),
Column('position', Integer),
Column('title', String),
Column('keyword_id', Integer),
Column('folder_type', Text),
Column('dateAdded', Integer),
Column('lastModified', Integer)
)
mapper(Bookmarks, moz_bookmarks)
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == "__main__":
session = loadSession()
res = session.query(Bookmarks).all()
print res[1].title
'''
| BhujayKumarBhatta/flask-learning | flaskr/db/mysqlalchemy.py | mysqlalchemy.py | py | 2,971 | python | en | code | 1 | github-code | 6 |
37158346153 | import squarify
import matplotlib.pyplot as plt
import matplotlib.cm
import numpy as np
x = 0.
y = 0.
width = 950
height = 733
fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, axisbg='white')
values = [285.4, 188.4, 173, 140.6, 91.4, 75.5, 62.3, 39.6, 29.4, 28.5, 26.2, 22.2]
labels = ['South Africa', 'Egypt', 'Nigeria', 'Algeria', 'Morocco', 'Angola', 'Libya', 'Tunisia', 'Kenya', 'Ethiopia', 'Ghana', 'Cameron']
colors = [0]*11
for i in range(11):
colors[i] = tuple(np.random.randint(0,255,3)/255.0)
initvalues = values
values = squarify.normalize_sizes(values, width, height)
rects = squarify.padded_squarify(values, x, y, width, height)
cmap = matplotlib.cm.get_cmap()
color = [cmap(random.random()) for i in range(len(values))]
x = [rect['x'] for rect in rects]
y = [rect['y'] for rect in rects]
dx = [rect['dx'] for rect in rects]
dy = [rect['dy'] for rect in rects]
ax.bar(x, dy, width=dx, bottom=y, color=colors, label=labels, align='edge')
va = 'center'
idx=1
for l,r,v in zip(labels, rects, initvalues):
x,y,dx,dy = r['x'], r['y'], r['dx'], r['dy']
ax.text(x+dx/2, y+dy/2+10, str(idx)+'--> '+l, va=va, ha='center', color='white', fontsize=14)
ax.text(x+dx/2, y+dy/2-12, '($'+str(v)+'b)', va=va, ha='center', color='white', fontsize=12)
idx=idx+1
ax.set_xlim(0, 1000)
ax.set_ylim(0, 1000)
plt.title('Top 12 GDP Africa Country', fontsize=20)
plt.savefig('datavis/Africa-GDP') | QiliWu/Python-datavis | datavis/Africa GDP.py | Africa GDP.py | py | 1,435 | python | en | code | 2 | github-code | 6 |
71819476349 | n=int(input("Enter digit:"))
if(n<=1):
print(n)
else:
n1=0
n2=1
for x in range(n-1):
feb=n1+n2
n1=n2
n2=feb
print(feb)
#recursive methode
| P111111111/DAA_Lab-Manual | A1_withot_recurssion.py | A1_withot_recurssion.py | py | 182 | python | en | code | 0 | github-code | 6 |
23313489127 | import bpy
class SFX_Socket_Float(bpy.types.NodeSocket):
'''SFX Socket for Float'''
bl_idname = 'SFX_Socket_Float'
bl_label = "Float"
float: bpy.props.FloatProperty(name = "Float",
description = "Float",
default = 0.0)
def draw(self, context, layout, node, text):
if self.is_output:
col = layout.column(align = True)
col1 = col.split(factor = 0.85)
col2 = col1.split(factor = 0.85)
col3 = col2.split(factor = 1)
col1.prop(self, "float", text='')
col2.label(text = text)
else:
col = layout.column(align = True)
col1 = col.split(factor = 0.30)
col2 = col1.split(factor = 0.5)
col3 = col2.split(factor = 1)
col1.label(text = '')
col3.prop(self, "float", text='')
col2.label(text = text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5) | wiredworks/wiredworks_winches | sockets/SFX_Socket_Float.py | SFX_Socket_Float.py | py | 1,073 | python | en | code | 12 | github-code | 6 |
71979063548 | #not sure how to go about this one????
#I can create a loop craeteing a list of coordinates i have been to for both wires
#then find intersections based on those coordinates
#also the main port will be zero zero
import math
intersections = []
#keep in mind the main port is (0,0)
f = open('data.txt')
wireOne = f.readline()
wireTwo = f.readline()
def wireCounter(wire):
myArray = []
coordinates = [0,0]
wireList = wire.split(",")
for i in wireList:
letter = i[0:1]
number = int(i[1:])
temp = []
if letter == 'R':
for i in range(number):
coordinates[0] = coordinates[0] + 1
temptemp = coordinates.copy()
temp.append(temptemp)
elif letter == 'L':
for i in range(number):
coordinates[0] = coordinates[0] - 1
temptemp = coordinates.copy()
temp.append(temptemp)
elif letter == 'U':
for i in range(number):
coordinates[1] = coordinates[1] + 1
temptemp = coordinates.copy()
temp.append(temptemp)
elif letter == 'D':
for i in range(number):
coordinates[1] = coordinates[1] - 1
temptemp = coordinates.copy()
temp.append(temptemp)
myArray = myArray + temp
print(myArray, '============')
return myArray
firstWire = wireCounter(wireOne)
secondWire = wireCounter(wireTwo)
for i in firstWire:
if i in secondWire:
intersections.append(i)
shortestPath = abs(intersections[0][0]) + abs(intersections[0][1])
for i in intersections:
mathy = abs(i[0]) + abs(i[1])
if mathy < shortestPath:
shortestPath = mathy
print("done: ", shortestPath)
| Hector-bit/adventOfCode | year2019/crossedWiresDAY3/parteUno.py | parteUno.py | py | 1,779 | python | en | code | 0 | github-code | 6 |
41939702684 | # translate exercise in python
# translate the file in japanese
# so use ' pip install translate'
from translate import Translator
translator = Translator(to_lang='ja')
try:
with open('test.txt', mode='r') as my_file:
text = my_file.read()
translation = translator.translate(text)
with open('./test-ja.txt', mode='w') as my_file2:
my_file2.write(translation)
except FileNotFoundError as e:
print('check your file silly!')
raise e
| hyraja/python-starter | 09.FILE I-O python/03.exercise_translator.py | 03.exercise_translator.py | py | 481 | python | en | code | 0 | github-code | 6 |
74160534269 | import datetime
import enum
import os
import signal
import subprocess
import sys
import time
import typing
from logging import getLogger
from threading import Thread
import requests
from slugify import slugify
from config import Setting, client_id, client_secret
from util import file_size_mb, get_setting
logger = getLogger(__name__)
class TwitchResponseStatus(enum.Enum):
ONLINE = 0
OFFLINE = 1
NOT_FOUND = 2
UNAUTHORIZED = 3
ERROR = 4
class RecordingDetail(typing.TypedDict):
path: str
processed: bool
class Twitch:
def __init__(self) -> None:
# twitch stuff
self.client_id = client_id
self.client_secret = client_secret
self.api_url = "https://api.twitch.tv/helix/streams"
self.token_url = f"https://id.twitch.tv/oauth2/token?client_id={self.client_id}&client_secret={self.client_secret}&grant_type=client_credentials"
self.access_token = self.fetch_access_token()
# streams
self.root_path = os.getcwd()
self.streams: dict[str, subprocess.Popen[bytes]] = {}
self.processing_loop = self.init_process_loop()
# setup directories
[os.makedirs(path) for path in [os.path.join(self.root_path, "recorded"), os.path.join(self.root_path, "processed")] if not os.path.isdir(path)]
def fetch_access_token(self) -> str:
"""
Fetch a fresh OAuth2 access token from Twitch
"""
try:
token_response = requests.post(self.token_url, timeout=15)
token_response.raise_for_status()
token = token_response.json()
logger.info(f'Connected to Twitch with client_id={self.client_id}')
return token["access_token"]
except requests.exceptions.RequestException as e:
logger.error(e)
def stream_status(self, username: str):
"""
Determine if a Twitch user is streaming
"""
info = None
status = TwitchResponseStatus.ERROR
try:
headers = {"Client-ID": self.client_id,
"Authorization": f"Bearer {self.access_token}"}
r = requests.get(
f"{self.api_url}?user_login={username}", headers=headers, timeout=15)
r.raise_for_status()
info = r.json()
if info is None or not info["data"]:
status = TwitchResponseStatus.OFFLINE
logger.info(f"Streamer {username} is offline")
else:
status = TwitchResponseStatus.ONLINE
logger.info(f"Streamer {username} is online and streaming")
except requests.exceptions.RequestException as e:
if e.response:
if e.response.status_code == 401:
status = TwitchResponseStatus.UNAUTHORIZED
if e.response.status_code == 404:
status = TwitchResponseStatus.NOT_FOUND
return status, info
def start_watching(self, username: str):
"""
Start watching a Twitch stream
Returns:
bool indicate if stream started recording or not
str | None any errors if bool was false
"""
status, info = self.stream_status(username)
if status is not TwitchResponseStatus.ONLINE:
logger.error('{} is not online'.format(username))
return False, f"{username} is not streaming"
else:
recorded_path = os.path.join(self.root_path, "recorded", username)
if not os.path.isdir(recorded_path):
os.makedirs(recorded_path)
channels = info["data"]
channel = next(iter(channels), None)
filename = '_'.join([datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S"), slugify(channel.get("title"))]) + '.mp4'
recorded_filename = os.path.join(recorded_path, filename)
self.streams[username] = subprocess.Popen(
["streamlink", "--twitch-disable-ads", f"twitch.tv/{username}", "best", "-o", recorded_filename])
# keep checking until file exists or timer runs out
start = time.time()
while True:
if os.path.isfile(recorded_filename) or time.time() - start > 5:
break
else:
time.sleep(0.5)
continue
return True, None
def stop_watching(self, username):
"""
Stop watching a Twitch stream
"""
if username not in self.streams.keys():
logger.error('could not stop, not watching {}'.format(username))
return False, f"Not watching {username}"
else:
# stop recording process
proc = self.streams[username]
proc.send_signal(signal.SIGTERM)
self.streams.pop(username)
time.sleep(0.5)
# process recording
if get_setting(Setting.AutoProcessRecordings) == True:
recordings = self.get_recordings()
if username in recordings.keys():
video = recordings[username][len(recordings[username]) - 1]
self.process_recording(f"{username}/{video['path']}")
return True, None
def get_recordings(self):
recordings: dict[str, list[RecordingDetail]] = {}
recordings_dir = os.path.join(self.root_path, "recorded")
def is_processed(user, video):
return os.path.isfile(os.path.join(self.root_path, "processed", user, video))
def size_of(user, video):
if is_processed(user, video):
return file_size_mb(os.path.join(self.root_path, "processed", user, video))
else:
return file_size_mb(os.path.join(self.root_path, "recorded", user, video))
for user in os.listdir(recordings_dir):
# ugh this is annoying! i hate mac sometimes
if user != '.DS_Store':
recordings[user] = [dict(path=f, processed=is_processed(user, f), size=size_of(user, f)) for f in os.listdir(
os.path.join(recordings_dir, user)) if str(f).endswith('.mp4')]
return recordings
def process_recording(self, file_path: str):
"""
Expect file_path to be like tsm_imperialhal/some_video_name.mp4
"""
source = os.path.join(self.root_path, "recorded", file_path)
if not os.path.isfile(source):
raise FileNotFoundError(
f"source file {source} does not exist, cannot process")
if not os.path.isdir(os.path.join(self.root_path, "processed", file_path.split('/')[0])):
os.makedirs(os.path.join(self.root_path,
"processed", file_path.split('/')[0]))
dest = os.path.join(self.root_path, "processed", file_path)
try:
subprocess.run(['ffmpeg', '-err_detect', 'ignore_err',
'-i', source, '-c', 'copy', dest, '-y'])
except Exception as e:
logger.error(e)
def delete_recording(self, file_path: str):
"""
Expect file_path to be like tsm_imperialhal/some_video_name.mp4
"""
recorded = os.path.join(self.root_path, "recorded", file_path)
if os.path.isfile(recorded):
os.remove(recorded)
processed = os.path.join(self.root_path, "processed", file_path)
if os.path.isfile(processed):
os.remove(processed)
def init_process_loop(self):
if get_setting(Setting.AutoProcessRecordings) == True:
def run_loop():
logger.info('starting background processing loop...')
while True:
for streamer, recordings in self.get_recordings().items():
for r in recordings:
if streamer not in self.streams.keys() and not r["processed"]:
logger.info(
f"processing saved stream: [{streamer}] -> {r['path']}")
self.process_recording(
f"{streamer}/{r['path']}")
time.sleep(10)
self.processing_loop = Thread(target=run_loop)
self.processing_loop.start()
| bcla22/twitch-multistream-recorder | twitch.py | twitch.py | py | 8,358 | python | en | code | 0 | github-code | 6 |
37056623803 | import os
import numpy as np
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.svm import SVC
from sklearn.externals import joblib
from utils import save_answer
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DUMP_PATH = os.path.join(BASE_DIR, 'clf.joblib')
ANSWER_PATH = os.path.join(BASE_DIR, 'anwser.txt')
newsgroups = datasets.fetch_20newsgroups(
subset='all',
categories=['alt.atheism', 'sci.space']
)
def initialize_classifier(X, y) -> GridSearchCV:
grid = {'C': np.power(10.0, np.arange(-5, 6))}
cv = KFold(n_splits=5, shuffle=True, random_state=241)
clf = SVC(kernel='linear', random_state=241)
gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)
gs.fit(X, y)
joblib.dump(gs, MODEL_DUMP_PATH)
return gs
def load_classifier() -> GridSearchCV:
return joblib.load(MODEL_DUMP_PATH)
def find_best_C(X, y):
if os.path.exists(MODEL_DUMP_PATH):
clf = load_classifier()
else:
clf = initialize_classifier(X, y)
print(clf.cv_results_.__getitem__('params'))
print(clf.cv_results_.__getitem__('mean_test_score'))
def run():
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(newsgroups.data)
y = newsgroups.target
clf = SVC(kernel='linear', random_state=241)
clf.fit(X, y)
names = vectorizer.get_feature_names()
arr = clf.coef_.toarray()
arr[0] = [abs(v) for v in arr[0]]
sorted_weights = arr[::, arr[0, :].argsort()[::-1]]
top_10_weights = sorted_weights[0, :10]
words = list()
for w in top_10_weights:
index = np.where(arr == w)
word_index = index[1][0]
words.append(names[word_index])
words.sort()
print('Most weight words are:', words)
save_answer(os.path.join(BASE_DIR, 'answer.txt'), ','.join(words))
| Nick-Omen/coursera-yandex-introduce-ml | lessons/article/main.py | main.py | py | 1,903 | python | en | code | 0 | github-code | 6 |
22480925936 | from fastai.vision import *
from fastai.widgets import*
import numpy as np
classes = ['ac','as','cb','cc','ci','cs','cuCon','cu','ns','sc','st']
# %%
path = Path('images/')
#for name in classes:
# folder = name
# file = name + '.csv'
# dest = path/folder
# dest.mkdir(parents = True, exist_ok = True)
# download_images(path/file, dest, max_pics = 1000)
# %%
for c in classes:
print(c)
verify_images(path/c, delete = True, max_size = 500)
# %%
np.random.seed()
data = ImageDataBunch.from_folder(path, train = ".", valid_pct = 0.2, ds_tfms = get_transforms(), size = 224, num_workers = 4).normalize(imagenet_stats)
# %%
data.classes
data.show_batch(rows = 3, figsize = (7, 8))
data.classes, data.c, len(data.train_ds), len(data.valid_ds)
# %%
learn = cnn_learner(data, models.resnet34, metrics = error_rate)
learn.fit_one_cycle(50)
learn.save('stage-1')
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
# %%
learn.fit_one_cycle(50, max_lr = slice(1e-6, 1e-4))
learn.save('stage-2')
# %%
learn.load('stage-2')
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
# %% Testing on new images
learn.export()
defaults.device = torch.device('cpu')
# %%
img = open_image('test/test5.jpg')
learn = load_learner(path)
pred_class,pred_idx,outputs = learn.predict(img)
pred_class.obj | DrDJIng/CloudIdentifier | classifyClouds.py | classifyClouds.py | py | 1,353 | python | en | code | 0 | github-code | 6 |
30066696464 | import logging
import os
from PIL import Image
from PIL.ExifTags import TAGS
class Utils:
@staticmethod
def extract_exif_data(image: Image) -> {}:
map_tag_dict = {}
exif_data = image.getexif()
for tag_id in exif_data:
tag = TAGS.get(tag_id, tag_id)
data = exif_data.get(tag_id)
map_tag_dict[tag] = data
return map_tag_dict
@staticmethod
def gather_images_from_path(path: str) -> []:
images = []
valid_images = [".jpg", ".gif", ".png", ".tga"]
for file in os.listdir(path):
extension = os.path.splitext(file)[1]
if extension.lower() not in valid_images:
continue
images.append(os.path.join(path, file))
return images
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
blue = "\x1b[34m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "[%(filename)s:%(lineno)d] %(levelname)s: %(message)s "
FORMATS = {
logging.DEBUG: blue + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_logging_handler():
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(CustomFormatter())
return stream_handler
| greencashew/image-captioner | imagecaptioner/utils.py | utils.py | py | 1,630 | python | en | code | 0 | github-code | 6 |
10633670979 | def isPrime(n):
if n == 1:
return False
for i in range(2, int(n**(1/2))+1):
if n%i == 0:
return False
return True
def dec2n(n, k):
result = ""
while n>0:
n, i = divmod(n, k)
result += str(i)
return result[::-1]
def solution(n, k):
arr = dec2n(n, k).split('0')
arr = [x for x in arr if len(x) != 0]
arr = list(map(int, arr))
# print(arr)
answer = 0
for i in arr:
if isPrime(i):
answer += 1
return answer | eastdh/CT-algorithm | programmers_school/lv2/23.01.15 - k진수에서 소수 개수 구하기, 압축/k진수에서 소수 개수 구하기.py | k진수에서 소수 개수 구하기.py | py | 525 | python | en | code | 0 | github-code | 6 |
35665366566 | import UploadgramPyAPI
from core import logger
class Uploadgram:
@staticmethod
def upload(path_to_file):
try:
up_file = UploadgramPyAPI.NewFile(path_to_file)
response: dict = up_file.upload()
logger.info(response)
return response
except UploadgramPyAPI.UploadgramConnectionError as e:
logger.info(e.args)
return e.args
@staticmethod
def delete(uploaded_file_data: dict[str, str, str]):
file_id = str(uploaded_file_data.get('url')).split('/')[-1]
delete_key = uploaded_file_data.get('delete_key')
try:
up_file = UploadgramPyAPI.File(file_id, delete_key)
response: dict = up_file.delete()
return response
except UploadgramPyAPI.UploadgramConnectionError as e:
logger.info(e.args)
return e.args
| YuryVetrov/Media-file-downloader-bot | cloud_storage/Uploadgram.py | Uploadgram.py | py | 892 | python | en | code | 0 | github-code | 6 |
11814433027 | from get_url import GetUrl
import requests
from bs4 import BeautifulSoup
class GetText():
def __init__(self, area):
self.got_url = GetUrl()
self.url = self.got_url.get_url(area)
def get_url(self):
url = self.url
return url
def get_text(self):
err_text = '以下の地域から選んでください\n北海道\n東北\n関東\n信越・北陸\n東海\n近畿\n中国\n四国\n九州\n沖縄\n'
url = self.get_url()
if url is None:
return err_text
# else:
def get_info(self, url):
html = requests.get(url)
soup = BeautifulSoup(html.text, "html.parser")
text_list = soup.select("#main > div > p")
text_sorce = str(text_list[0])
text = text_sorce.strip('<p class="gaikyo">').strip('</p>').replace('<br/>\n', '')
return text
| yutatakaba/weather_apr | get_text.py | get_text.py | py | 761 | python | en | code | 0 | github-code | 6 |
1776827146 | from pandas.tseries.frequencies import to_offset
import numpy as np
import pandas as pd
def get_numeric_frequency(freq):
"""
Return the frequency of a time series in numeric format.
The function returns the frequency of a time series in numeric format. This is useful when working with
forecasting libraries that require the frequency to be a number instead of a string.
If frequency has multiple seasonalities, for example Daily and Hourly, returns a list with all periods.
Args:
freq (str): A string specifying the frequency of the time series.
Valid values are:
'Y' (yearly), 'A' (annually), 'Q' (quarterly), 'M' (monthly), 'W' (weekly), 'D' (daily), or 'H' (hourly).
Returns:
int: The frequency of the time series in numeric format if frequency has only one seasonalities.
list: A list with all periods if frequency has multiple seasonalities.
References:
- https://otexts.com/fpp3/tsibbles.html
Example:
>>> get_numeric_frequency('M')
1
>>> get_numeric_frequency('W')
13
>>> get_numeric_frequency('D')
365
"""
keys = ["Y", "A", "Q", "M", "W", "D", "H"]
vals = [1, 1, 4, 12, 52, [7, 30, 364], [24, 168, 720, 8760]]
freq_dictionary = dict(zip(keys, vals))
# Getting the period and the frequency
period = to_offset(freq).n
# Taking the first letter of the frequency in case we have MS for month start etc
freq = to_offset(freq).name[0]
# Initializing the dictionary
numeric_freq = freq_dictionary[freq]
# Dividing with the period:
# For example if I have a 2M frequency:
# Then instead of 12 months we have 6 examina
numeric_freq = (
int(freq_dictionary[freq] / period)
if isinstance(numeric_freq, int)
else [int(i / period) for i in numeric_freq]
)
return numeric_freq
# A functions that extends input features to the extended format.
def add_missing_values(input_features, input_transformations=None):
"""
Fills the features and transformations dictionaries with default values.
Default values are Nones and nans.
Args:
input_features (dict):
A dictionary containing the features to be used in the model.
input_transformations (dict):
A dictionary containing the transformations to be used in the model.
Returns:
input_features: (dict):
A dictionary containing the features to be used in the model.
input_transformations: (dict):
A dictionary containing the transformations to be used in the model.
"""
# Default values for features and transformations dictionaries
# Initialize a dictionary for transformations if it is none
input_transformations = {} if input_transformations is None else input_transformations
features = {
"lags": None,
"rolling_features": None,
"rolling_lags": None,
"seasonal_features": None,
"fourier_terms": None,
"positional_features": False,
"time_series_id": False,
"level_information": None,
}
transformations = {
"stationarity": False,
"logarithm": False,
"normalize": None,
"custom_no_reverse_1": None,
"custom_no_reverse_2": None,
"custom_no_reverse_3": None,
"custom_reverse_1": [None, None],
"custom_reverse_2": [None, None],
"custom_reverse_3": [None, None],
}
# Check if each key in default features exists in input_features,
# if not, add it with value equal to None
for key in features.keys():
if key not in input_features.keys():
input_features[key] = features[key]
# Check if each key in default transformations exists in input_transformations,
# if not, add it with value equal to None
for key in transformations.keys():
if key not in input_transformations.keys():
input_transformations[key] = transformations[key]
return input_features, input_transformations
def augment(ts, window_size):
"""
Augments the time series data by creating windows of size `window_size` days.
If the length of the series is less than the window size, it pads the series with zeros.
Args:
ts (np.array):
time series data
window_size (int):
size of the windows in days
Returns:
view (np.array):
augmented time series data
"""
total_length = len(ts)
# If the length of the series is less than the window size, add padding with NaN values
if total_length < window_size:
zeros_to_add = window_size - total_length
# Pad the series with NaN values
view = np.pad(ts, pad_width=(zeros_to_add, 0), constant_values=np.nan)
# Reshape the series to a 2D array
view = view.reshape(1, -1)
else:
# Use the windowed function
view = window(ts, window_size)
return view
def window(a, window_size):
"""
Create windows of size `window_size` from the 1D array `a`.
Args:
a (np.array):
1D array
window_size (int):
size of the windows
Returns:
view (np.array):
2D array of windows
"""
# Convert window size to int
w = int(window_size)
# Calculate the shape of the windowed array
sh = (a.size - w + 1, w)
# Calculate the strides for the windowed array
st = a.strides * 2
# Create the windowed array using as_strided method
view = np.lib.stride_tricks.as_strided(a, strides=st, shape=sh)[0::1] # The step size is 1, i.e. no overlapping
# Discard windows with all zeros
view = view[~np.all(view == 0, axis=1)]
return view
def create_lags(df, lags):
"""
Creates the lagged dataframe for all time series on the input dataframe.
Args:
df (pd.DataFrame):
A dataframe containing the time series data.
lags (list):
A list containing the lags to be used.
Returns:
lags_df (pd.DataFrame):
A dataframe containing the lagged time series data.
"""
lags_df = df.apply(lambda x: augment(x.values, lags).squeeze(), axis=1).to_frame(name="lag_windows")
return lags_df
def construct_single_rolling_feature(df, rolling_aggregation, original_lags, rolling_windows, rolling_lags=1):
# Check if rolling_window is integer and convert to list
rolling_windows = [rolling_windows] if isinstance(rolling_windows, int) else rolling_windows
# In case rolling_lags has a single value, repeat it for the number of rolling windows
rolling_lags = np.repeat(rolling_lags, len(rolling_windows)) if isinstance(rolling_lags, int) else rolling_lags
# Initialize a dataframe to include all rolling features
rolling_df = pd.DataFrame()
# for every rolling window itterate
for window, temp_lag in zip(rolling_windows, rolling_lags):
# Create the name of the rolling feature
name = f"rolling_{rolling_aggregation}_{window}"
# construct the rolling features for the time series
temp_df = df.rolling(window, axis=1).agg(rolling_aggregation)
# Slice them into lags -> using the starting number of lags as the original lags
# Also drop the lag_windows column
temp_df = create_lags(temp_df, original_lags)
# Keep only the specified amount and round the subwindow
# temp_df['lag_windows'] = [subwindows[:, -temp_lag:] for subwindows in temp_df['lag_windows'].values]
temp_df["lag_windows"] = [subwindows[:, -temp_lag:].round(3) for subwindows in temp_df["lag_windows"].values]
# rename
temp_df = temp_df.rename(columns={"lag_windows": name})
# Append to the main df
rolling_df = pd.concat([rolling_df, temp_df], axis=1)
# return]
return rolling_df
def split_lag_targets(df, test_size=1):
"""
Splits the lagged dataframe into targets and lagged values.
Args:
df (pd.DataFrame):
A dataframe containing the lagged time series data.
test_size (int):
The number of windows to be used for testing.
Returns:
df (pd.DataFrame):
A dataframe containing the lagged time series data with the targets and lagged values.
"""
# dimension of targets: (windows, 1)
# dimension of lags: (windows, lags)
# Fix an issue for when we have just a single lag
if len(df["lag_windows"].values[0].shape) == 1:
# reshape all the lag windows
df["lag_windows"] = df["lag_windows"].apply(lambda x: x.reshape(1, -1))
# targets are the last value for each window
if test_size == 1:
# df["targets"] = [subwindows[:, -1].reshape(-1, 1) for subwindows in df["lag_windows"].values]
df["targets"] = [
subwindows[:, -1].reshape(-1, 1)
if len(subwindows.shape) == 2
else subwindows.reshape(1, -1)[:, -1].reshape(-1, 1)
for subwindows in df["lag_windows"].values
]
else:
# df["targets"] = [subwindows[:, -test_size:] for subwindows in df["lag_windows"].values]
df["targets"] = [
subwindows[:, -test_size:].reshape(-1, 1)
if len(subwindows.shape) == 2
else subwindows.reshape(1, -1)[:, -test_size:].reshape(-1, 1)
for subwindows in df["lag_windows"].values
]
# lagged values are all values until the last one
# df["lagged_values"] = [subwindows[:, :-test_size] for subwindows in df["lag_windows"].values]
df["lagged_values"] = [
subwindows[:, :-test_size]
if len(subwindows.shape) == 2
else subwindows.reshape(1, -1)[:, :-test_size].reshape(-1, 1)
for subwindows in df["lag_windows"].values
]
return df
def standard_scaler_custom(df, mode="train"):
"""
A custom standard scaler normalization method.
Normalized the lagged windows
Args:
df (pd.DataFrame):
A dataframe containing the lagged time series data.
mode (str):
A string indicating the mode of the normalization.
If mode is 'train' then the normalization is performed on the lagged windows and the targets.
If mode is 'test' then the normalization is performed only on the lagged windows.
Returns:
df (pd.DataFrame):
A dataframe containing the lagged time series data with the normalized lagged windows and targets.
"""
# Take the mean and the std of each subwindow
df["mus"] = [
np.array([np.mean(subwindow) for subwindow in windows]).reshape(-1, 1).tolist()
for windows in df["lagged_values"].values
]
df["stds"] = [
np.array([np.std(subwindow) for subwindow in windows]).reshape(-1, 1).tolist()
for windows in df["lagged_values"].values
]
# Normalize the lagged values by substracting the mean and dividing with the std of every window.
# If std is zero or nan skip the division.
df["normalized_lagged_values"] = [
np.array(
[(subwindow - mu) / std if std[0] > 0 else subwindow - mu for subwindow, mu, std in zip(windows, mus, stds)]
).tolist()
for windows, mus, stds in zip(df["lagged_values"].values, df["mus"].values, df["stds"].values)
]
# If we have rolling features
rolling_columns = [col for col in df.columns if "rolling" in col]
if len(rolling_columns) > 0:
# Normalize these as well
for rolling_col in rolling_columns:
new_rolling_col = "normalized_" + rolling_col
df[new_rolling_col] = [
np.array(
[
(subwindow - mu) / std if std[0] > 0 else subwindow - mu
for subwindow, mu, std in zip(windows, mus, stds)
]
).tolist()
for windows, mus, stds in zip(df[rolling_col].values, df["mus"].values, df["stds"].values)
]
# drop the old rolling column
df = df.drop(columns=rolling_col)
# Normalize the targets in the same way
if mode == "train":
df["normalized_targets"] = [
np.array(
[(target - mu) / std if std[0] > 0 else target - mu for target, mu, std in zip(targets, mus, stds)]
)
.reshape(-1, 1)
.tolist()
for targets, mus, stds in zip(df["targets"].values, df["mus"].values, df["stds"].values)
]
else:
# Squezze the mus and stds columns
df["mus"] = df["mus"].apply(lambda x: x[0][0])
df["stds"] = df["stds"].apply(lambda x: x[0][0])
return df
| yForecasting/DeepRetail | DeepRetail/forecasting/utils.py | utils.py | py | 12,762 | python | en | code | 0 | github-code | 6 |
13412454502 | # 自己设计的CNN模型
import torch.nn as nn
import torch.nn.functional as F
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1) # conv1 (RGB图像,输入通道数为3)
self.conv2 = nn.Conv2d(6, 16, 3, 1) # conv2
self.fc1 = nn.Linear(54 * 54 * 16, 120) # fc1
self.fc2 = nn.Linear(120, 84) # fc2
self.fc3 = nn.Linear(84, 6) # fc3 6个类别
def forward(self, X):
X = F.relu(self.conv1(X)) # 激活函数:ReLU
X = F.max_pool2d(X, 2, 2) # 最大池化
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54 * 54 * 16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1) # 输出层为softmax | Tommy-Bie/sign_language_classification | my_CNN.py | my_CNN.py | py | 924 | python | en | code | 1 | github-code | 6 |
26848770395 | import sys
from PySide6.QtWidgets import QApplication, QPushButton
from PySide6.QtCore import Slot
# 这个例子包含Signals and Slots(信号与槽机制)
# 使用@Slot()表明这是一个槽函数
# @Slot() 服了,没使用这个居然也能照常运行
def say_hello():
print("Button, clicked, hello!")
app = QApplication([])
# QPushButton里面的参数是按钮上会显示的文字
button = QPushButton("点我!")
button.clicked.connect(say_hello)
button.show()
app.exec()
| RamboKingder/PySide6 | button-2.py | button-2.py | py | 524 | python | zh | code | 2 | github-code | 6 |
16194751087 | import urllib
import json
import pandas as pd
from pandas.io.json import json_normalize
from rdflib import URIRef, BNode, Literal, Graph
from rdflib import Namespace
from rdflib.namespace import RDF, FOAF, RDFS, XSD
from datetime import datetime
#api key = 57ab2bbab8dda80e00969c4ea12d6debcaddd956 for jsdeux api
#let's create RDF in TURTLE------------------------------------
# namesoaces we will use
ex = Namespace('http://www.semweb.com/2001-schema#')
mobVoc = Namespace('http://schema.mobivoc.org/')
geoNames = Namespace('http://www.geonames.org/ontology#')
addr = Namespace('http://schemas.tails.com/2005#adresss/schema#')
geo = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
vcard = Namespace('http://www.w3.org/2006/vcard/ns#')
stPty = Namespace('http://www.semweb.org/2006/BycicleStation/property#')
#create defaultgraph
g = Graph()
cities = ['valence', 'marseille', 'lyon', 'nantes', 'toulouse']
for city in cities:
#request ti api
url = urllib.request.urlopen('https://api.jcdecaux.com/vls/v1/stations?contract='+str(city)+'&apiKey=57ab2bbab8dda80e00969c4ea12d6debcaddd956')
#loaded data
data = json.loads(url.read().decode(url.info().get_param('charset') or 'utf-8'))
#parse loaded and generate rdf turtle
for i in range(len(data)):
URIReff = URIRef('http://www.semweb.com/URIRef/'+data[i]['contract_name']+'/'+str(data[i]['number']))
name = Literal(data[i]['name'], datatype=XSD.string)
city = Literal(data[i]['contract_name'], lang='fr')
address = Literal(data[i]['address'], lang="fr")
lat = Literal(data[i]['position']['lat'], datatype = XSD.decimal)
lon = Literal(data[i]['position']['lng'], datatype = XSD.decimal)
avaibility = BNode()
avail_bikes = Literal(data[i]['available_bikes'], datatype = XSD.integer)
total_bikes = Literal(data[i]['bike_stands'], datatype = XSD.integer)
banking = Literal(data[i]['banking'], datatype = XSD.boolean)
date = Literal("12-09-2019T13:05", datatype = XSD.date)
status = Literal(data[i]['status'], datatype = XSD.string)
last_update = Literal(datetime.fromtimestamp(data[i]['last_update']/1000).strftime('%Y-%m-%dT%I:%M:%S'), datatype = XSD.dateTime)
#here name space manager.
g.namespace_manager.bind('geo', geo, override=False)
g.namespace_manager.bind('vcard', vcard, override=False)
g.namespace_manager.bind('geoNames', geoNames, override=False)
g.namespace_manager.bind('addr', addr, override=False)
g.namespace_manager.bind('mobVoc', mobVoc, override=False)
g.namespace_manager.bind('ex', ex, override=False)
g.namespace_manager.bind('stPty', stPty, override=False)
#adding prepared static data to graph
g.add((URIReff, RDF.type, mobVoc.BikeParkingStation))
g.add((URIReff, RDFS.label, name))
g.add((URIReff, addr.streetAdress, address))
g.add((URIReff, vcard.inCity, city))
g.add((URIReff, geo.lat, lat))
g.add((URIReff, geo.lon, lon))
#adding dynamic prepared data to graph(blank node)
g.add((URIReff, ex.hasAvaibility, avaibility))
g.add((avaibility, RDF.type, mobVoc.Avaibility))
g.add((avaibility, stPty.avBicyce, avail_bikes))
g.add((avaibility, stPty.totBicycle, total_bikes))
g.add((avaibility, stPty.paymentCard, banking))
g.add((avaibility, stPty.status, status))
g.add((avaibility, stPty.lastUpdate, last_update))
g.serialize(destination='byke_data.ttl',format="turtle")
print('byke_data.ttl generated')
#-----------------here i found some data about allowed terasse in toulouse. It is very poor data. I need some time to improve
with open('terrasses-autorisees-ville-de-toulouse.geojson') as f:
data = json.load(f)
ex = Namespace('http://www.semweb.com/2001-schema#')
tur = Namespace('http://schema.tur.org/')
addr = Namespace('http://schemas.tails.com/2005#adresss/schema#')
geo = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
vcard = Namespace('http://www.w3.org/2006/vcard/ns#')
g = Graph()
from urllib.parse import quote
for i in range(len(data[0]['features'])):
try:
URIReff = URIRef(quote('<http://semweb.com/get/'+data[0]['features'][i]['properties']['code_int']['id']+'>'))
except KeyError:
continue
city = Literal(data[0]['features'][i]['properties']['commune'], lang='fr')
domain_activite = (data[0]['features'][i]['properties']['domaine_activite'])
address = Literal(data[0]['features'][i]['properties']['nom_voie'], lang="fr")
try:
nature_activite = Literal(data[0]['features'][i]['properties']['nature_activite'], lang='fr')
except KeyError:
continue
lat = Literal(data[0]['features'][i]['properties']['x'], datatype = XSD.decimal)
lon = Literal(data[0]['features'][i]['properties']['y'], datatype = XSD.decimal)
g.namespace_manager.bind('geo', geo, override=False)
g.namespace_manager.bind('vcard', vcard, override=False)
g.namespace_manager.bind('addr', addr, override=False)
g.namespace_manager.bind('tur', tur, override=False)
g.namespace_manager.bind('ex', ex, override=False)
g.add((URIReff, RDF.type, tur.Restraunte))
g.add((URIReff, addr.streetAdress, address))
g.add((URIReff, vcard.inCity, city))
g.add((URIReff, geo.lat, lat))
g.add((URIReff, geo.lon, lon))
g.add((URIReff, RDFS.comment, nature_activite))
#anyway we are adding terasse data to triplestore
g.serialize(destination='terasse.ttl',format="turtle")
| zhantileuov/rdf_project | generate.py | generate.py | py | 5,906 | python | en | code | 0 | github-code | 6 |
11463544163 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 12:08:53 2022
@author: sampasmann
"""
import sys
sys.path.append("../../")
import os
from src.init_files.mg_init import MultiGroupInit
import numpy as np
import matplotlib.pyplot as plt
Nx = 1
data12 = MultiGroupInit(numGroups=12, Nx=Nx)
data70 = MultiGroupInit(numGroups=70, Nx=Nx)
data618 = MultiGroupInit(numGroups=618, Nx=Nx)
script_dir = os.path.dirname(__file__)
rel_path = "../../src/materials/HDPE/"
abs_file_path = os.path.join(script_dir, rel_path)
centers12 = np.genfromtxt(abs_file_path+"group_centers_12G_HDPE.csv", delimiter=",")
centers70 = np.genfromtxt(abs_file_path+"group_centers_70G_HDPE.csv", delimiter=",")
centers618 = np.genfromtxt(abs_file_path+"group_centers_618G_HDPE.csv", delimiter=",")
edges12 = np.genfromtxt(abs_file_path+"group_edges_12G_HDPE.csv", delimiter=",")
edges70 = np.genfromtxt(abs_file_path+"group_edges_70G_HDPE.csv", delimiter=",")
edges618 = np.genfromtxt(abs_file_path+"group_edges_618G_HDPE.csv", delimiter=",")
dE12 = abs(edges12[1:] - edges12[:-1])
dE70 = abs(edges70[1:] - edges70[:-1])
dE618 = abs(edges618[1:] - edges618[:-1])
y12 = (data12.true_flux[0,:]/dE12)
y12 /= np.sum(data12.true_flux[0,:])
y70 = (data70.true_flux[0,:]/dE70)
y70 /= np.sum(data70.true_flux[0,:])
y618 = (data618.true_flux[0,:]/dE618)
y618 /= np.sum(data618.true_flux[0,:])
centers12[-1] = np.min(centers618)
centers70[-1] = np.min(centers618)
plt.figure(dpi=300)
plt.suptitle('HDPE Group Centers Divided by Energy Bin Width')
size = 3
where = 'mid'
drawstyle='steps-mid'
plt.step(centers12, y12, '-o', where=where,drawstyle=drawstyle, markersize=size,label='G=12')
plt.step(centers70, y70, where=where, drawstyle=drawstyle, markersize=size,label='G=70')
plt.step(centers618, y618,where=where, drawstyle=drawstyle, markersize=size,label='G=618')
plt.legend()
plt.xscale('log')
plt.yscale('log')
plt.xlabel('E (MeV)')
plt.ylabel(r'$\phi(E)/E$')
| spasmann/iQMC | post_process/plotting/mg_solutions.py | mg_solutions.py | py | 1,957 | python | en | code | 2 | github-code | 6 |
35835057096 | from re import compile
from utils import BasicError
# Class for Tokens
class Token():
# Token type will have a name and a value
def __init__(self, type_name, value, pos_start, pos_end):
self.type = type_name
self.value = value
self.pos_start = pos_start
self.pos_end = pos_end
def __repr__(self):
return f"{self.type}:{self.value}"
# Class for Lexer
class Lexer():
# Initializes lexer
# Skip refers to any stream of chars to be ignored by lexer
def __init__(self, token_types=list(), skip="\s+"):
self.token_types = token_types
self.skip = compile(skip)
# Goes to next token
def next_token(self):
# Ignore whitespace
skip_exist = self.skip.match(self.program, self.position)
if skip_exist:
self.position = skip_exist.end()
# Checks if we are at the very end of the program to be lexed
if self.position >= len(self.program):
return None
# Iterates through token_types to check if any token is found
for tkn_t in self.token_types:
result = tkn_t["regx"].match(self.program, self.position)
if result:
# Create a Token Object having value of the first match
tkn = Token(tkn_t["name"], result.group(0),
self.position,
self.position + len(result.group(0)) - 1)
# Check if user has provided a modifier function
if tkn_t["mod"]:
tkn.value = tkn_t["mod"](tkn.value)
self.position = result.end()
return tkn
raise BasicError(f"Lexer Error: Unknown Token at {self.position + 1}",
self.position, self.position)
# Return List of Tokens
def tokenize(self, program):
self.program = program
self.position = 0
list_token = []
while True:
# Go through the string
# Generating Tokens
# Unti EOL
tkn = self.next_token()
if tkn is None:
break
list_token.append(tkn)
return list_token
# Register token types for a lexer
def register(self, name, regx, modifier=None):
self.token_types.append({"name": name, "regx": compile(regx),
"mod": modifier})
| shaleen111/pyqb | lexer.py | lexer.py | py | 2,416 | python | en | code | 0 | github-code | 6 |
42283221124 | class FpMethodResult():
clone_parts_1: list # части с заимствованиями из первого файла
clone_parts_2: list # части с заимствованиями из второго файла
clone_pct: float # процент заимствований
def __init__(self, cl_pt1, cl_pt2, clp_pct) -> None:
self.clone_parts_1 = cl_pt1
self.clone_parts_2 = cl_pt2
self.clone_pct = clp_pct
def print(self) -> None:
print(self.clone_parts_1)
print(self.clone_parts_2)
print(self.clone_pct)
| Urdeney/Diploma | clonus/methods/fp_method.py | fp_method.py | py | 596 | python | ru | code | 0 | github-code | 6 |
36667789639 | import numpy as np
import scipy.stats as stats
# 1 -----------------------
print("Task 1")
# 1) Даны значения величины заработной платы заемщиков банка (zp) и значения их поведенческого
# кредитного скоринга (ks):
# zp = [35, 45, 190, 200, 40, 70, 54, 150, 120, 110],
# ks = [401, 574, 874, 919, 459, 739, 653, 902, 746, 832].
# Найдите ковариацию этих двух величин с помощью элементарных действий, а затем с помощью функции
# cov из numpy
# Полученные значения должны быть равны.
# Найдите коэффициент корреляции Пирсона с помощью ковариации и среднеквадратичных отклонений
# двух признаков, а затем с использованием функций из библиотек numpy и pandas.
zp = [35, 45, 190, 200, 40, 70, 54, 150, 120, 110]
ks = [401, 574, 874, 919, 459, 739, 653, 902, 746, 832]
# Ковариация
zp_ks = []
for i in range(len(zp)):
zp_ks.append(zp[i]*ks[i])
print(zp_ks)
M_zp_ks = np.mean(zp_ks) # 81141.7
M_zp = np.mean(zp) # 101.4
M_ks = np.mean(ks) # 709.9
print(M_zp_ks, M_zp, M_ks)
cov = M_zp_ks - M_zp * M_ks # 9157.84
print(cov)
print(np.cov(zp,ks)) # 10175.3778
print(np.cov(zp,ks,ddof=0)) # 9157.84
# Корреляция
sigma_zp = np.std(zp,ddof=0)
sigma_ks = np.std(ks,ddof=0)
print( sigma_zp, sigma_ks)
cov = np.cov(zp,ks,ddof=0)
r = cov/(sigma_ks*sigma_zp) # 0.8875
print(r)
print(np.corrcoef(zp,ks)) # 0.8875
df = pd.DataFrame({'zp':zp,'ks':ks})
print(df['zp'].corr(df['ks'])) # 0.8875 | SofyaSofya21/tprob_mathstat_hw8 | task1.py | task1.py | py | 1,748 | python | ru | code | 0 | github-code | 6 |
21610135351 | import requests
import re
import json
from nonebot import on_command, CommandSession
@on_command('lol新闻', aliases=('lol新闻'))
async def weather(session: CommandSession):
url = "http://l.zhangyoubao.com/news/"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.6) Gecko/20040206 Firefox/0.8'
}
reponse = requests.get(url, headers=headers)
reponse.encoding = "utf-8"
html = reponse.text
if reponse.status_code == 200:
pattern = re.compile('<h2><a class="omit" target="_blank" href="(.*)" title=".*">(.*)</a></h2>')
#html = reponse.text
items = re.findall(pattern, html)
#print(items)
LMG = [];
for item in items:
Lmg = item[1].strip()
Lmg1 = item[0]
Lmg2 = Lmg + " " + Lmg1
LMG.append(Lmg2)
Lmg4 = LMG[0:5]
Lmg3 = '\n'.join(Lmg4)
#print(Lmg3)
await session.send(Lmg3) | Lmg66/QQrobot | awesome-bot/awesome/plugins/lol.py | lol.py | py | 980 | python | en | code | 3 | github-code | 6 |
8662253484 | import logging
import sys
import tarfile
import tempfile
from urllib.request import urlopen
from zipfile import ZipFile
from pathlib import Path
TF = "https://github.com/tensorflow/tflite-micro/archive/80cb11b131e9738dc60b2db3e2f1f8e2425ded52.zip"
CMSIS = "https://github.com/ARM-software/CMSIS_5/archive/a75f01746df18bb5b929dfb8dc6c9407fac3a0f3.zip"
CMSIS_DSP = "https://github.com/ARM-software/CMSIS-DSP/archive/refs/tags/v1.15.0.zip"
CMSIS_NN = "https://github.com/ARM-software/CMSIS-NN/archive/refs/tags/23.08.zip"
CMSIS_ENSEMBLE = "https://github.com/alifsemi/alif_ensemble-cmsis-dfp/archive/5bfce4020fa27d91fcd950725d35ecee8ba364ad.zip"
CMSIS_ENSEMBLE_B = "https://github.com/alifsemi/alif_ensemble-cmsis-dfp/archive/refs/tags/v0.9.6.zip"
BOARDLIB = "https://github.com/alifsemi/alif_boardlib/archive/64067e673171fb7a80272421e537a5f8064bb323.zip"
ETHOS_U_CORE_DRIVER = "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git/snapshot/ethos-u-core-driver-23.08.tar.gz"
ETHOS_U_CORE_PLATFORM = "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-platform.git/snapshot/ethos-u-core-platform-23.08.tar.gz"
LVGL = "https://github.com/lvgl/lvgl/archive/refs/tags/v8.3.7.zip"
ARM2D = "https://github.com/ARM-software/Arm-2D/archive/refs/tags/v1.1.3.zip"
def download(url_file: str, post_process=None):
with urlopen(url_file) as response, tempfile.NamedTemporaryFile() as temp:
logging.info(f"Downloading {url_file} ...")
temp.write(response.read())
temp.seek(0)
logging.info(f"Finished downloading {url_file}.")
if post_process:
post_process(temp)
def unzip(file, to_path):
with ZipFile(file) as z:
for archive_path in z.infolist():
archive_path.filename = archive_path.filename[archive_path.filename.find("/") + 1:]
if archive_path.filename:
z.extract(archive_path, to_path)
target_path = to_path / archive_path.filename
attr = archive_path.external_attr >> 16
if attr != 0:
target_path.chmod(attr)
def untar(file, to_path):
with tarfile.open(file) as z:
for archive_path in z.getmembers():
index = archive_path.name.find("/")
if index < 0:
continue
archive_path.name = archive_path.name[index + 1:]
if archive_path.name:
z.extract(archive_path, to_path)
def main(dependencies_path: Path):
download(CMSIS,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis"))
download(CMSIS_DSP,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-dsp"))
download(CMSIS_NN,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-nn"))
download(CMSIS_ENSEMBLE,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-ensemble"))
download(CMSIS_ENSEMBLE_B,
lambda file: unzip(file.name, to_path=dependencies_path / "cmsis-ensemble-b"))
download(BOARDLIB,
lambda file: unzip(file.name, to_path=dependencies_path / "boardlib"))
download(ETHOS_U_CORE_DRIVER,
lambda file: untar(file.name, to_path=dependencies_path / "core-driver"))
download(ETHOS_U_CORE_PLATFORM,
lambda file: untar(file.name, to_path=dependencies_path / "core-platform"))
download(TF,
lambda file: unzip(file.name, to_path=dependencies_path / "tensorflow"))
download(LVGL,
lambda file: unzip(file.name, to_path=dependencies_path / "lvgl"))
download(ARM2D,
lambda file: unzip(file.name, to_path=dependencies_path / "Arm-2D"))
if __name__ == '__main__':
logging.basicConfig(filename='download_dependencies.log', level=logging.DEBUG, filemode='w')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
download_dir = Path(__file__).parent.resolve() / "dependencies"
if download_dir.is_dir():
logging.info(f'{download_dir} exists. Skipping download.')
else:
main(download_dir)
| alifsemi/alif_ml-embedded-evaluation-kit | download_dependencies.py | download_dependencies.py | py | 4,091 | python | en | code | 1 | github-code | 6 |
24332065584 | # We import pandas into Python
import pandas as pd
google_stock = pd.read_csv(r'learn_python\pandas\statistics_from_stock_data\GOOG.csv', index_col=['Date'], usecols=['Date', 'Adj Close'], parse_dates=True)
apple_stock = pd.read_csv(r'learn_python\pandas\statistics_from_stock_data\AAPL.csv', index_col=['Date'], usecols=['Date', 'Adj Close'], parse_dates=True)
amazon_stock = pd.read_csv(r'learn_python\pandas\statistics_from_stock_data\AMZN.csv', index_col=['Date'], usecols=['Date', 'Adj Close'], parse_dates=True)
# We create calendar dates between '2000-01-01' and '2016-12-31'
dates = pd.date_range('2004-08-19', '2016-12-31')
# We create and empty DataFrame that uses the above dates as indices
all_stocks = pd.DataFrame(index = dates)
# Change the Adj Close column label to Google
google_stock = google_stock.rename(columns = {'Adj Close' : 'Google'})
#print(google_stock.head())
# Change the Adj Close column label to Apple
apple_stock = apple_stock.rename(columns = {'Adj Close' : 'Apple'})
#print(all_stocks.head())
# Change the Adj Close column label to Amazon
amazon_stock = amazon_stock.rename(columns = {'Adj Close' : 'Amazon'})
# We join the Google stock to all_stocks
all_stocks = all_stocks.join(google_stock)
# We join the Apple stock to all_stocks
all_stocks = all_stocks.join(apple_stock)
# We join the Amazon stock to all_stocks
all_stocks = all_stocks.join(amazon_stock)
print(all_stocks.head())
| funnyfeet434/Learn_AI | learn_python/pandas/statistics_from_stock_data/statistics_from_stock.py | statistics_from_stock.py | py | 1,431 | python | en | code | 0 | github-code | 6 |
20025251905 | import os
import re
def create_current_file_message(filename):
basename = os.path.basename(filename)
return "{:<30.30} => ".format(basename)
def create_insert_message(sql_query, row_count, execution_time=None):
""" Create message on how many lines inserted into which table """
if row_count >= 0:
# NOTE: if multiple queries, then rowcount only last number of inserted/updated rows
match = re.search(r'^\s*((?:INSERT )?INTO|CREATE TABLE|DELETE\s+FROM|UPDATE)\s+(.+?)\s',
sql_query,
re.IGNORECASE | re.MULTILINE
)
if match:
statement = match.group(1).upper()
table_into = match.group(2)
else:
statement = ''
table_into = '?'
if statement == 'INTO':
prefix = 'INTO'
elif 'DELETE' in statement:
prefix = 'DELETE'
elif 'CREATE' in statement:
prefix = 'CREATE'
elif 'UPDATE' in statement:
prefix = 'UPDATE'
else:
prefix = ''
return create_message(table_into, row_count, execution_time, prefix)
return create_message(None, row_count, execution_time)
def create_message(table_into, row_count, execution_time, prefix='INTO'):
if table_into:
table_into_message = prefix + ' ' + table_into
else:
table_into_message = 'Nothing inserted'
message = '{:<40.40} {:>9,} [{:>8.2f} s'.format(table_into_message, row_count, execution_time)
if row_count > 0:
message += '| {:>.1e} s/#]'.format(execution_time/row_count)
else:
message += ']'
return message
| thehyve/ohdsi-etl-caliber | python/util/message_creation.py | message_creation.py | py | 1,690 | python | en | code | 4 | github-code | 6 |
13438082129 | """
Boston house prices dataset
"""
import sklearn.datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
# 보스턴 집값 데이터 세트 로딩
skl_data = sklearn.datasets.load_boston(return_X_y=False)
print(type(skl_data)) # Bunch: 파이썬의 Dict와 비슷한 타입
print(skl_data.keys())
print(skl_data.feature_names)
# 데이터와 타겟을 구분
X = skl_data.data
y = skl_data.target
print('X shape: ', X.shape)
print('y shape: ', y.shape)
print('len(X):', len(X))
print('len(y):', len(y))
features = skl_data.feature_names
# 데이터 탐색 -> y ~ feature 산점도 그래프
fig, ax = plt.subplots(3, 5)
# ax: 3x4 형태의 2차원 배열(ndarray)
print('fig: ', fig)
# print('ax: ', ax)
ax_flat = ax.flatten()
for i in range(len(features)):
subplot = ax_flat[i]
subplot.scatter(X[:, i], y)
subplot.set_title(features[i])
plt.show()
# 학습 세트/ 검증 세트 나눔
np.random.seed(1217)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(f'X_train len: {len(X_train)}, X_test len:{len(X_test)}, y_train len:{len(y_train)}, y_test len:{len(y_test)}')
# 학습 세트를 사용해서 선형 회귀 - 단순 선형 회귀, 다중 선형 회귀
# price = b0 ++ b1 * rm: 주택 가격 ~ 방의 개(rm)
X_train_rm = X_train[:, np.newaxis, 5] # np.newaxis:
X_test_rm = X_test[:, np.newaxis, 5] # 2차원 배열로 만들어줌
print(f'X_train_rm: {X_train_rm.shape}, X_test_rm: {X_test_rm.shape} ')
lin_reg = LinearRegression() # Linear Regression 객체 생성
lin_reg.fit(X_train_rm, y_train) # fit(학습) -> b0, b1 찾음
print(f'intercept: {lin_reg.intercept_}, coefficient: {lin_reg.coef_}')
# 검증 세트를 사용해서 예측 -> 그래프
y_pred_rm = lin_reg.predict(X_test_rm)
# 실제값(scatter), 예측값(plot) 그래프
plt.scatter(X_test_rm, y_test) # 실제값 y_test
plt.plot(X_test_rm, y_pred_rm, 'r-')
plt.title('Price ~ RM')
plt.xlabel('RM')
plt.ylabel('Price')
plt.show()
# MSE: Mean Square Error 계산
# 오차 제곱들의 평균: MSE
# error = y - y_hat, error**2 = (y-y_hat)**2
# MSE = sum(error**2 = (y-y_hat)**2) / 개수
mse = mean_squared_error(y_test, y_pred_rm)
# RMSE(Squared-Root MSE)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
# R2-score 계산
r2_1 = lin_reg.score(X_test_rm, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_rm) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ LSTAT 선형회귀: price = b0 + b1 * lstat
# b0, b1 ?
X_train_lstat = X_train[:, np.newaxis, 12] # 학습 세트
X_test_lstat = X_test[:, np.newaxis, 12] # 검증 세트
lin_reg.fit(X_train_lstat, y_train) # 모델 fit, train
print(f'intercept:{lin_reg.intercept_}, coefficients: {lin_reg.coef_}')
y_pred_lstat = lin_reg.predict(X_test_lstat) # 예측, 테스트
plt.scatter(X_test_lstat, y_test) # 실제값 산점도 그래프
plt.plot(X_test_lstat, y_pred_lstat, 'r-')
plt.title('Price ~ LSTAT')
plt.xlabel('LSTAT')
plt.ylabel('Price')
plt.show()
mse = mean_squared_error(y_test, y_pred_lstat)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
r2_1 = lin_reg.score(X_test_lstat, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_lstat) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ LSTAT + LSTAT**2 선형 회귀
# Price = b0 + b1 * lstat + b2 * lstat**2
poly = PolynomialFeatures(degree=2, include_bias=False)
# 데이터에 다항식 항들을 컬럼으로 추갖해주는 클래스 객체
X_train_lstat_poly = poly.fit_transform(X_train_lstat)
# 검증 세트에 다항식 항을 추가
X_test_lstat_poly = poly.fit_transform(X_test_lstat)
lin_reg.fit(X_train_lstat_poly, y_train)
print(f'intercept:{lin_reg.intercept_}, coefficient:{lin_reg.coef_}')
y_pred_lstat_poly = lin_reg.predict(X_test_lstat_poly)
plt.scatter(X_test_lstat, y_test) # 실제값
xs = np.linspace(X_test_lstat.min(), X_test_lstat.max(), 100).reshape((100, 1))
xs_poly = poly.fit_transform(xs)
ys = lin_reg.predict(xs_poly)
plt.plot(xs, ys, 'r')
# plt.plot(X_test_lstat, y_pred_lstat_poly, 'r') # 예측값
plt.title('Price ~ lstat + lstat^2')
plt.xlabel('LSTAT')
plt.ylabel('Price')
plt.show()
mse = mean_squared_error(y_test, y_pred_lstat_poly)
rmse = np.sqrt(mse)
print('Price ~ RMSE=', rmse)
r2_1 = lin_reg.score(X_test_lstat_poly, y_test) # score 함수: R-Square 값 계산
print('Price ~ r2_1: ', r2_1)
r2_2 = r2_score(y_test, y_pred_lstat_poly) # 결정 계수 계산 Coefficient of determination
print('Price ~ r2_2: ', r2_2)
# Price ~ RM + LSTAT 선형 회귀: price = b0 + b1 * rm + b2 * lstat
X_train_rm_lstat = X_train[:, [5, 12]]
X_test_rm_lstat = X_test[:, [5, 12]]
print(X_train_rm_lstat[:5])
lin_reg.fit(X_train_rm_lstat, y_train) # fit/train
print(f'intercept: {lin_reg.intercept_}, coefficients: {lin_reg.coef_}')
y_pred_rm_lstat = lin_reg.predict(X_test_rm_lstat) # predict/test
print(y_test[:5], y_pred_rm_lstat[:5])
mse = mean_squared_error(y_test, y_pred_rm_lstat)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_rm_lstat)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
print('-====================================')
# Price ~ RM + LSTAT + RM**2 + RM * LSTAT + LSTAT**2
# Price = b0 + b1 * rm + b2 * lstat + b3 * rm**2 + b4 * rm * lstat + b5 * lstat **2
# 학습 세트에 다항식항(컬럼)을 추가
X_train_rm_lstat_poly = poly.fit_transform(X_train_rm_lstat)
# 테스트 세트에 다항식항(컬럼)을 추가
X_test_rm_lstat_poly = poly.fit_transform(X_test_rm_lstat)
print(X_test_rm_lstat_poly[:2])
lin_reg.fit(X_train_rm_lstat_poly, y_train)
print(f'intercept: {lin_reg.intercept_}, coef: {lin_reg.coef_}')
y_pred_rm_lstat_poly = lin_reg.predict(X_test_rm_lstat_poly)
mse = mean_squared_error(y_test, y_pred_rm_lstat_poly)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_rm_lstat_poly)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
print('y true:', y_test[:5])
print('y pred:', y_pred_rm_lstat_poly[:5])
# Price ~ RM + LSTAT + STAT**2
# Price = b0 + b1 * rm + b2 * lstat + b3 * lstat**2
X_train_last = np.c_[X_train_rm, X_train_lstat_poly]
X_test_last = np.c_[X_test_rm, X_test_lstat_poly]
print('X_train_last:', X_train_last[:2], '\n X_test_last: ', X_test_last[:2])
lin_reg.fit(X_train_last, y_train) # fit/train
print(f'Price ~ RM + LSTAT + LSTAT**2: intercept: {lin_reg.intercept_}, coef {lin_reg.coef_}')
y_pred_last = lin_reg.predict(X_test_last) # 예측/테스트
print('y true:', y_test[:5])
print('y predict:', y_pred_last[:5].round(2))
mse = mean_squared_error(y_test, y_pred_last)
rmse = np.sqrt(mse)
r2 = r2_score(y_test, y_pred_last)
print(f'Price ~ RM + LSTAT: RMSE = {rmse}, R**2 = {r2}')
| i-hs/lab-python | scratch13/ex05.py | ex05.py | py | 6,999 | python | en | code | 0 | github-code | 6 |
74048870269 | import pandas as pd
import os
from calendar import monthrange
from datetime import datetime,timedelta
import re
import numpy as np
from models.fed_futures_model.backtestloader import BacktestLoader
from models.fed_futures_model.fff_model import FederalFundsFuture
class Backtest():
def __init__(self, path):
self.loader = BacktestLoader(path)
def load_month(self, meeting_date:datetime):
ff_curr = self.loader.get_curr_data(meeting_date)
ff_prior = self.loader.ff_month_before(meeting_date)
ff_after = self.loader.ff_month_after(meeting_date)
prev_month_date = self.cycle_month(meeting_date, step=-1).strftime("%Y-%m")
fomc_type = 1 if len(self.loader.fomc_dates.loc[prev_month_date]) > 0 else 2
meeting_date = self.loader.fomc_dates.loc[meeting_date.strftime("%Y-%m")].index[0]
fff = FederalFundsFuture()
fff.initiate_model(meeting_date, ff_prior, ff_curr, ff_after, meeting_date, fomc_type)
return fff
def run_month(self, meeting_date:datetime):
fff = self.load_month(meeting_date)
no_hike_prob, hike_prob = fff.calculate_hike_prob()
prob_change = [no_hike_prob, hike_prob]
return prob_change,fff
def find_range(self,implied_rate, probs):
int_ranges = [0.25,0.5,0.75,1,1.25,1.5,1.75,2]
values = [0,0,0,0,0,0,0,0]
for i in range(len(int_ranges)-1):
if int_ranges[i] > implied_rate:
level = i
break
for prob in range(len(probs)):
values[level + prob] = probs[prob]
return values
def predict(self):
today = datetime.now().strftime("%Y-%m")
meeting_dates = self.loader.fomc_dates.loc[today:]
all_predictions = {}
pred_values = {}
raw_probs = {}
for dt in meeting_dates.index:
print(f"Loading: {dt} FOMC Meeting...")
dt = pd.to_datetime(dt)
probs,fff = self.run_month(dt)
result = self.carry(probs)
implied_rate = fff.implied_rate
v = self.find_range(implied_rate, result)
all_predictions[dt] = v
pred_values[dt] = [fff.ffer_end]
raw_probs[dt] = probs
final_result = pd.DataFrame.from_dict(all_predictions).T
final_result.columns = ['0-25 BPS','25-50 BPS','50-75 BPS',
'75-100 BPS','100-125 BPS','125-150 BPS',
'150-175 BPS', '175-200 BPS']
pred_values = pd.DataFrame.from_dict(pred_values).T
pred_values = pred_values.reset_index()
pred_values.columns = ['Date', 'Prediction']
raw_probs = pd.DataFrame.from_dict(raw_probs).T.reset_index()
raw_probs.columns = ['Date', 'No Hike', 'Hike']
return final_result, pred_values, raw_probs
def carry(self, sample,cap=1):
result = sample.copy()
if result[0] < 0:
result.append(0)
result = np.array([result])
for c in range(1,result.shape[1]):
result[:,c] += np.maximum(result[:,c-1]-cap,0)
result[:,:-1] = np.minimum(result[:,:-1],cap)
if result[:,0] < 0:
result[:,0] = 0
for i in range(1,result.shape[1]-1):
if result[:,i] == 1:
result[:,i] = 1 - result[:,i+1]
return result[0].tolist()
def cycle_month(self,date: datetime, step):
new_date = date + step * timedelta(days=monthrange(date.year, date.month)[1] )
return new_date
| limjoobin/bt4103-rate-decision-index | rate_decision_index/models/fed_futures_model/backtest.py | backtest.py | py | 3,664 | python | en | code | 0 | github-code | 6 |
71663956668 | # A = X = Rock = 0
# B = Y = Paper = 1
# C = Scissors = 2
# Rock (0) bests Scissors (2)
# Paper (1) beats Rock (0)
# Scissors (2) beats Paper (1)
beats = (2, 0, 1)
def score_round(opponent, player):
score = player + 1
if player == opponent:
score += 3
elif beats[player] == opponent:
score += 6
return score
# Pt 1
# score = 0
# with open('input.txt') as input_file:
# for line in input_file:
# opponent, player = line.split(' ')
# opponent = ord(opponent.strip()) - ord('A')
# player = ord(player.strip()) - ord('X')
# score += score_round(opponent, player)
# # Pt 2
score = 0
with open('input.txt') as input_file:
for line in input_file:
opponent, win_lose_draw = line.split(' ')
opponent = ord(opponent.strip()) - ord('A')
win_lose_draw = ord(win_lose_draw.strip()) - ord('X')
player = opponent
if win_lose_draw == 0:
# Lose
player = beats[opponent]
elif win_lose_draw == 1:
# Draw
pass
else:
# Lose
for _ in range(2):
player = (player + 1) % 3
if beats[player] == opponent:
break
score += score_round(opponent, player)
#print(score)
print(score) | jacobschaer/advent_of_code_2022 | day_2/aoc2.py | aoc2.py | py | 1,129 | python | en | code | 0 | github-code | 6 |
10189584256 | from random import random
def simulatedChampionshipWinner(players):
reset(players)
championshipEnd = False
master = 0
numPlayers = len(players)
challengers = players.copy()
challengers.pop(0)
while not championshipEnd:
participant = challengers.pop(0)
if simulatedMatchWinner(participant, players[master]) == master:
players[master].addWin()
challengers.append(participant)
if isChampion(players[master], numPlayers):
return master
else:
if simulatedMatchWinner(participant, players[master]) == participant.index:
players[master].restartCounter()
challengers.append(players[master])
master = participant.index
else:
players[master].addWin()
challengers.append(participant)
if isChampion(players[master], numPlayers):
return master
def simulatedMatchWinner(player, master):
if random() < player.skill/(player.skill+ master.skill):
return player.index
return master.index
def isChampion(master, numPlayers):
if master.winsInARow >= numPlayers -1:
return True
return False
def reset(players):
for i in range(len(players)):
players[i].restartCounter() | peulsilva/giants-steps-summer | multiplePlayers.py | multiplePlayers.py | py | 1,358 | python | en | code | 0 | github-code | 6 |
8224455984 | # MAC0318 Intro to Robotics
# Please fill-in the fields below with every team member info
#
# Name: José Lucas Silva Mayer
# NUSP: 11819208
#
# Name: Willian Wang
# NUSP: 11735380
#
# Any supplemental material for your agent to work (e.g. neural networks, data, etc.) should be
# uploaded elsewhere and listed down below together with a download link.
#
#
#
# ---
#
# Final Project - The Travelling Mailduck Problem
#
# Don't forget to run this file from the Duckievillage root directory path (example):
# cd ~/MAC0318/duckievillage
# conda activate duckietown
# python3 assignments/challenge/challenge.py assignments/challenge/examples/challenge_n
#
# Submission instructions:
# 0. Add your names and USP numbers to the file header above.
# 1. Make sure that any last change hasn't broken your code. If the code crashes without running you'll get a 0.
# 2. Submit this file via e-disciplinas.
import pyglet
from pyglet.window import key
import numpy as np
import math
import random
from duckievillage import create_env, FRONT_VIEW_MODE
import cv2
import tensorflow
class Agent:
def __init__(self, env):
self.env = env
self.radius = 0.0318
self.baseline = env.unwrapped.wheel_dist/2
self.motor_gain = 0.68*0.0784739898632288
self.motor_trim = 0.0007500911693361842
self.initial_pos = env.get_position()
# load model of object detection
self.model_od = tensorflow.keras.models.load_model('project/models/od.h5')
# load model of dodge
self.model_dodge = tensorflow.keras.models.load_model('project/models/ddg.h5')
# define steps of dodge
self.dodge_steps = 7
self.dodge_count = 0
# load lane following model
self.model_lf = tensorflow.keras.models.load_model('project/models/lf.h5')
self.score = 0
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
self.key_handler = key_handler
# Color segmentation hyperspace
self.inner_lower = np.array([22, 93, 160])
self.inner_upper = np.array([45, 255, 255])
self.outer_lower = np.array([0, 0, 130])
self.outer_upper = np.array([179, 85, 255])
def preprocess(self, image):
""" Returns a 2D array mask color segmentation of the image """
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # obtain HSV representation of image
# filter out dashed yellow "inner" line
inner_mask = cv2.inRange(hsv, self.inner_lower, self.inner_upper)//255
# filter out solid white "outer" line
outer_mask = cv2.inRange(hsv, self.outer_lower, self.outer_upper)//255
# Note: it is possible to filter out pixels in the RGB format
# by replacing `hsv` with `image` in the commands above
# produces combined mask (might or might not be useful)
mask = cv2.bitwise_or(inner_mask, outer_mask)
self.masked = cv2.bitwise_and(image, image, mask=mask)
return inner_mask, outer_mask
def get_pwm_control(self, v: float, w: float)-> (float, float):
''' Takes velocity v and angle w and returns left and right power to motors.'''
V_l = (self.motor_gain - self.motor_trim)*(v-w*self.baseline)/self.radius
V_r = (self.motor_gain + self.motor_trim)*(v+w*self.baseline)/self.radius
return V_l, V_r
def send_commands(self, dt: float):
''' Agent control loop '''
# acquire front camera image
img = self.env.front()
# run image processing routines
P, Q = self.preprocess(img) # returns inner and outter mask matrices
# transform image to shape (60, 80, 3)
img_inference = cv2.resize(img, (80, 60))
img_inference = np.expand_dims(img_inference, axis=0)
# if the duckie is in the middle of a dodge, continue it
if self.dodge_count > 0:
prediction = self.model_dodge.predict(img_inference, verbose=False)
v, w = prediction[0][0] * 0.03125, prediction[0][1] * 0.022
pwm_left, pwm_right = self.get_pwm_control(v, w)
self.env.step(pwm_left, pwm_right)
self.dodge_count -= 1
return
# predict object detection directives
prediction = self.model_od.predict(img_inference, verbose=False)
take_care = prediction[0][0] > 0.7
if take_care:
# activate the dodge mode if image has a duckie
# set the dodge count to the number of steps
self.dodge_count = self.dodge_steps
return
# resize masks P and Q to (60, 80)
p_resized = cv2.resize(P, (80, 60))
q_resized = cv2.resize(Q, (80, 60))
# create a 2-channel image with the masks
mask = np.zeros((60, 80, 2))
mask[:, :, 0] = p_resized
mask[:, :, 1] = q_resized
# cut off the 30% top pixels
mask = mask[(3 * mask.shape[0])//10:, :, :]
masks = np.expand_dims(mask, axis=0)
# predict lane following directives
prediction_lf = self.model_lf.predict(masks, verbose=False)
v, w = prediction_lf[0][0] * 1.25, prediction_lf[0][1]
pwm_left, pwm_right = self.get_pwm_control(v, w)
self.env.step(pwm_left, pwm_right)
# for visualization
self.env.render('human')
| josemayer/pato-wheels | project/agent.py | agent.py | py | 5,379 | python | en | code | 0 | github-code | 6 |
32915067412 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Group(models.Model):
title = models.TextField(max_length=200, verbose_name='Название')
slug = models.SlugField(unique=True, verbose_name='Идентификатор')
description = models.TextField(verbose_name='Описание')
class Meta:
verbose_name = 'Группа'
verbose_name_plural = 'Группы'
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField(verbose_name='Текст')
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,)
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='posts',
verbose_name='Автор')
image = models.ImageField(
upload_to='posts/',
null=True, blank=True,
verbose_name='Изображение')
group = models.ForeignKey(
Group, on_delete=models.CASCADE, related_name='posts',
blank=True, null=True, verbose_name='Группа'
)
class Meta:
verbose_name = 'Пост'
verbose_name_plural = 'Посты'
def __str__(self):
return self.text
class Comment(models.Model):
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='comments',
verbose_name='Автор')
post = models.ForeignKey(
Post, on_delete=models.CASCADE,
related_name='comments',
verbose_name='Пост')
text = models.TextField()
created = models.DateTimeField(
verbose_name='Дата добавления',
auto_now_add=True, db_index=True)
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
def __str__(self):
return self.text
class Follow(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follows_user',
verbose_name='Подписавшийся пользователь')
following = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='follows', blank=False,
verbose_name='Пользователь, на которого подписаны')
class Meta:
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
unique_together = ["user", "following"]
def __str__(self):
return f'{self.user.username} отслеживает {self.following.username}'
| dew-77/api_final_yatube | yatube_api/posts/models.py | models.py | py | 2,618 | python | en | code | 0 | github-code | 6 |
73102772669 | """
Faça um programa que leia seis valores numéricos atribuindo-os à duas variáveis do tipo
lista com três elementos cada. Cada variável irá representar um vetor, informe o produto
escalar e o produto vetorial destes vetores.
"""
lista3 = []
lista4 = []
for x in range(3):
lista3.append(int(input("Digite um numero para primeira lista: ")))
for x in range(3):
lista4.append(int(input("Digite um numero para segunda lista: ")))
sum(lista3 + lista4) * 2
lista3 + lista4 | devmarcosvinicius/UDF | 1º Semestre/Programação de Computadores/Simulado/3.py | 3.py | py | 502 | python | pt | code | 0 | github-code | 6 |
14965343125 | """
This file defines the Variable, a class used for basic mathematical operations
and gradient calculations.
Authors: MILES MCCAIN and LIV MARTENS
License: GPLv3
"""
import random
import numpy as np
import math
class Variable():
def __init__(self, eval_=None, grad=None, representation=None, name=None):
self.identifier = hash(random.random())
if eval_ != None:
self.eval_ = eval_
if grad != None:
self.grad = grad
self.representation = representation
self.name = name
def name(self, name):
"""Name the variable for pretty printing. Doesn't affect `eval_`.
Arguments:
name {string} -- the name of the variable
"""
self.name = name
def __repr__(self):
if self.representation != None:
return self.representation()
if self.name != None:
return self.name
return "<%s>" % str(hash(self))[:3]
def __hash__(self):
return self.identifier
def eval_(self, values):
"""Evaluate the variable with the given values.
Arguments:
values {dictionary} -- a dictionary of values, where the
keys are variable objects and their values are floats
Returns:
float -- the value of the evaluated variable
"""
return values[self]
def __call__(self, values):
"""Evaluate the variable with the given values.
This is an alias for `eval_`.
Arguments:
values {dictionary} -- a dictionary of values, where the
keys are variable objects and their values are floats
Returns:
float -- the value of the evaluated variable
"""
return self.eval_(values)
def ranged_eval(self, values, min=None, max=None, precondition=None):
"""Perform a ranged evaluation of the variable on the given values.
This method allows a range to be specified as well as a precondition,
which helps to prevent floating point rounding errors.
Arguments:
values {dictionary} -- a dictionary of the values (see `eval_`)
Keyword Arguments:
min {float} -- the miniumum output value (not enforced if None) (default: {None})
max {float} -- the maxiumum output value (not enforced if None) (default: {None})
precondition {function} -- a function that returns true or false on the pre-ranged value (default: {None})
Raises:
Exception -- if the precondition fails
Returns:
float -- the evaluated value
"""
value = self.eval_(values)
if precondition != None:
if not precondition(value):
raise Exception("precondition not met (value=%s)" % value)
if min != None and value <= min:
value = np.nextafter(min, min + 1)
if max != None and value >= max:
value = np.nextafter(max, max - 1)
return value
def grad(self, values):
"""Calculate the gradient at any given set of values.
Arguments:
values {dictionary} -- the values of the variable
(only include the required values!)
Returns:
np.array -- the gradient vector
"""
self_location = self.order(values)
pre_self = self_location
post_self = len(values) - 1 - self_location
gradient_array = [0]*pre_self + [1] + [0]*post_self
return np.array(gradient_array)
@staticmethod
def exp(var):
"""Exponentiate the variable (e ** var)
Arguments:
var {Variable, float, int} -- the variable to exponentiate
Returns:
Variable -- a variable that has been exponentiated
"""
if isinstance(var, Variable):
return Variable(eval_=lambda values: math.e ** var.eval_(values),
grad=lambda values: (
math.e ** var.eval_(values))*var.grad(values),
representation=lambda: "(e ** %s)" % str(var))
if isinstance(var, (float, int)):
return math.e ** var
@staticmethod
def log(var):
"""Take the logarithm (base e) of the given variable.
Arguments:
var {Variable, float, int} -- the variable to take the logarithm of
Returns:
Variable -- a variable that represents the log of the given variable
"""
if isinstance(var, Variable):
return Variable(eval_=lambda values: math.log(var.ranged_eval(values, min=0, precondition=lambda k: k >= 0)),
# the precondition for eval_ deals with floating point rounding errors while still showing errors
grad=lambda values: (var.ranged_eval(
values, min=0, precondition=lambda k: k >= 0) ** -1)*var.grad(values),
representation=lambda: "ln(%s)" % str(var))
if isinstance(var, (float, int)):
return math.log(var)
def __add__(self, other):
"""Add two variables together.
Arguments:
other {Variable, float, int} -- the variable to add
Returns:
Variable -- the added variables
"""
if isinstance(other, Variable):
return Variable(eval_=lambda values: self.eval_(values) + other.eval_(values),
grad=lambda values: self.grad(
values) + other.grad(values),
representation=lambda: "(%s + %s)" % (str(self), str(other)))
if isinstance(other, (float, int)):
return Variable(eval_=lambda values: self.eval_(values) + other,
grad=lambda values: self.grad(values),
representation=lambda: "(%s + %s)" % (str(self), str(other)))
def __radd__(self, other):
return self + other
def __sub__(self, other):
"""Subtract a variable from another.
Arguments:
other {Variable, float, int} -- the variable to subtract
Returns:
Variable -- the result of the subtraction operation
"""
return self + (other * -1)
def __rsub__(self, other):
return self * -1 + other
def __mul__(self, other):
"""Multiply two variables together.
Arguments:
other {Variable, float, int} -- the variable to multiply by
Returns:
Variable -- the result of the multiplication operation
"""
if isinstance(other, Variable):
return Variable(eval_=lambda values: self.eval_(values) * other.eval_(values),
grad=lambda values: self.grad(
values)*other.eval_(values) + other.grad(values)*self.eval_(values),
representation=lambda: "(%s * %s)" % (str(self), str(other)))
if isinstance(other, (float, int)):
return Variable(eval_=lambda values: self.eval_(values) * other,
grad=lambda values: self.grad(values) * other,
representation=lambda: "(%s * %s)" % (str(self), str(other)))
def __rmul__(self, other):
return self * other
def __pow__(self, other):
"""Raise a variable to the power of a _constant_.
Arguments:
other {float, int} -- the value to raise the variable to
Returns:
Variable -- the variable raised to the given power
"""
if isinstance(other, Variable):
# logarithmic differentiation? TODO
return NotImplemented
if isinstance(other, (float, int)):
return Variable(eval_=lambda values: self.eval_(values) ** other,
grad=lambda values: (
other)*(self.eval_(values) ** (other - 1))*self.grad(values),
representation=lambda: "(%s ** %s)" % (str(self), str(other)))
# __rpow__ not implemented; we simply don't have the rules for it
def __truediv__(self, other):
"""Divide a variable by another.
Arguments:
other {Variable, float, int} -- the denominator value/variable
Returns:
Variable -- a variable of the result of the division operation
"""
reciprocal = other ** -1
return self * reciprocal
def __rtruediv__(self, other):
reciprocal = self ** -1
return self * reciprocal
def order(self, values):
"""Returns the order of the variable in the given list of values,
as it would be returned by the gradient function, for example.
Arguments:
values {dictionary} -- the dictionary of values
Returns:
int -- the index of the variable within the dictionary (and in,
for example, a gradient vector)
"""
order = sorted([hash(key) for key in values.keys()])
return order.index(hash(self))
| milesmcc/csc630-machine-learning | compgraphs/variable.py | variable.py | py | 9,156 | python | en | code | 0 | github-code | 6 |
70199866108 | class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def get_first(self):
return self.head
def add_last(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
temp = self.head
while temp.next:
temp = temp.next
temp.next = new_node
def add_first(self, data):
new_node = Node(data, self.head)
self.head = new_node
def get(self, data):
temp = self.head
while temp:
if temp.data == data:
return temp
else:
temp = temp.next
return None
def get_last(self):
temp = self.head
while temp:
if temp.next:
temp = temp.next
else:
return temp
return None
def add(self, prev_data, data):
prev_node = self.get(prev_data)
if prev_node is None:
raise ValueError("prev_node is None! Cannot add data to it!")
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
def remove(self, data):
temp = self.head
if temp and temp.data == data:
self.head = temp.next
return True
prev = None
while temp:
if temp.data == data:
break
prev = temp
temp = temp.next
if temp is None:
return False
prev.next = temp.next
return True
def size(self):
temp = self.head
counter = 0
while temp:
counter += 1
temp = temp.next
return counter
def to_array(self):
arr = list()
temp = self.head
while temp:
arr.append(temp.data)
temp = temp.next
return arr
if __name__ == "__main__":
linked_list = LinkedList()
for value in range(5):
linked_list.add_last(value)
print(f"Linked list: {linked_list.to_array()}")
# insert a node at the front
linked_list.add_first(55)
print(f"Linked list: {linked_list.to_array()}")
# insert a node after a given node
linked_list.add(3, 21)
print(f"Linked list: {linked_list.to_array()}")
print("-----------------------------")
# this example was used to show both get_first() and get() methods
linked_list.remove(55)
if linked_list.get_first() == linked_list.get(0):
print("Item 0 is the head of linked list")
print(f"The length of linked list: {linked_list.size()}")
print("-----------------------------")
linked_list.remove(linked_list.get_last().data)
print(f"The data of last node is {linked_list.get_last().data}")
print(f"The length of linked list: {linked_list.size()}")
print("-----------------------------")
while linked_list.size() != 0:
linked_list.remove(linked_list.get_first().data)
if not linked_list.remove(0):
print("Cannot be removed any item because of empty linked list!")
| hazalonler/data-structure-implementation | src/linked_list_impl.py | linked_list_impl.py | py | 3,202 | python | en | code | 1 | github-code | 6 |
71913648828 | import numpy as np
import f_info
from f_info import*
def refine_cutting_plane(k, current_agent, i, dim, Na):
m = k-1
x = current_agent['x_memory']
g = current_agent['g_memory']
f = current_agent['f_memory']
current_query = x[i*dim:(i+1)*dim, m-1]
tilde_gjm_i = np.empty(shape=(0,0))
tilde_fjm_i = np.empty(shape=(0,0))
if k>2:
tilde_fjm_i = np.zeros((Na, m))
tilde_gjm_i = np.zeros((Na,(k-1)*dim))
t = 0
xim = np.empty(shape=(0,0))
while t < k:
xit = x[i*dim:(i+1)*dim, t]
xim = np.vstack((xim, xit))
for j in range(Na):
xjm = x[i*dim:(i+1)*dim, 0:k-1]
gjm = g[i*dim:(i+1)*dim, 0:k-1]
fjm = f[j+1, 0:m]
if j != i:
x_tem = xit - xjm
m = k - 1
sum_sol = np.zeros((1, m))
for p in range(m):
sum_sol[0, p] = gjm[:, p].T * x_tem[:, p]
f_tem = fjm + sum_sol
tilde_fjm_i_elem = np.max(f_tem)
f_idx = np.unravel_index(np.argmax(f_tem), f_tem.shape)
tilde_fjm_i[j, t] = tilde_fjm_i_elem
tilde_gjm_i_elem = gjm[:, f_idx]
tilde_gjm_i[j, t*dim:(t+1)*dim] = tilde_gjm_i_elem.T
else:
tilde_fjm_i[j, t] = fjm[0, t]
tilde_gjm_i[j, t*dim:(t+1)*dim] = gjm[:, t].T
t += 1
else:
for j in range(Na):
xjm = x[j*dim:(j+1)*dim, 0]
gjm = g[j*dim:(j+1)*dim, 0]
tilde_gjm_i = np.vstack((tilde_gjm_i, gjm.T))
fjm = gjm.T*(current_query-xjm)+f[j, 0]
tilde_fjm_i = np.vstack((tilde_fjm_i, fjm))
xim = current_query
return tilde_gjm_i, tilde_fjm_i, xim | zty0312/Distributed-Cutting-Plane-Consensus | prepare_algorithm.py | prepare_algorithm.py | py | 1,902 | python | en | code | 0 | github-code | 6 |
71432253948 | import click
group = click.Group("jaqsmds")
@group.command(help="Run auth server for jaqs.data.DataApi client.")
@click.argument("variables", nargs=-1)
@click.option("-a", "--auth", is_flag=True, default=False)
def server(variables, auth):
from jaqsmds.server.server import start_service
env = {}
for item in variables:
r = item.split("=")
if len(r) == 2:
env[r[0]] = r[1]
start_service(auth, **env)
def catch_db(string):
if string:
return dict(map(lambda s: s.split("="), string.replace(" ", "").split("&")))
else:
return {}
if __name__ == '__main__':
group() | cheatm/jaqsmds | jaqsmds/entry_point.py | entry_point.py | py | 642 | python | en | code | 4 | github-code | 6 |
3400836706 | # -*- coding: utf-8 -*-
from flask import Flask
from pydoc import locate
class ConstructApp(object):
def __init__(self):
self.extensions = {}
self.web_app = self.init_web_app()
def __call__(self, settings, force_init_web_app=False):
if force_init_web_app is True:
self.web_app = self.init_web_app()
self.set_settings(settings)
@staticmethod
def init_web_app():
return Flask(__name__, static_url_path='/static',
static_folder='static') # Создаем экземпляр класса Flask-приложения
def set_settings(self, settings):
self.web_app.url_map.strict_slashes = settings.TRAILING_SLASH # Указываем игнорирововать слеша в конце url
self.web_app.config.from_object(settings) # Передаём остальные настройки в приложение
def init_extensions(self):
extensions = self.web_app.config['APP_EXTENSIONS']
if not isinstance(extensions, tuple):
raise TypeError('The extensions must be a tuple')
for path in extensions:
ex = locate(path)(self)
if ex.extension is NotImplemented:
raise NotImplementedError('The extension is not implemented')
else:
if hasattr(self.web_app, ex.name):
raise AttributeError(f'The base application already has extension "{ex.name}"')
setattr(self, ex.name, ex.extension)
self.extensions[ex.name] = ex
ex.configurate_extension()
APP = ConstructApp()
| tigal/mooc | application.py | application.py | py | 1,660 | python | en | code | 0 | github-code | 6 |
39691118341 | #!/usr/bin/env python
import sys
from xml.etree import ElementTree
def run(files):
first = None
for filename in files:
data = ElementTree.parse(filename).getroot()
if first is None:
first = data
else:
first.extend(data)
if first is not None:
print(ElementTree.tostring(first).decode('utf-8'))
if __name__ == "__main__":
run(sys.argv[1:])
| cheqd/cheqd-node | .github/scripts/xml_combine.py | xml_combine.py | py | 412 | python | en | code | 61 | github-code | 6 |
12731825315 | # series into dataframes
import pandas as pd
import numpy as np
s = pd.Series(np.arange(4))
print(s)
s= pd.Series([1.0,2.0,3.0],index=['x','y','z'])
print(s)
s= pd.Series({'a':1,'b':2,'c':3,'d':4})
print(s)
s=pd.Series([1,2,3,4],['t','x','y','z'])
print(np.sqrt(s))
#concat 2 series
names=pd.Series(['Einstein','Marie Curie'],name='name')
categ=pd.Series(['Physics','Chemistry'],name='category')
df = pd.concat([names,categ],axis=1)
print(df.head())
#creating a panel of dataframes
df1= pd.DataFrame({'foo':[1,2,3],'bar':['x','y','z']})
df2= pd.DataFrame({'dat':[7,9,8,2],'shi':['i','j','k','l']})
#pn = pd.Panel({'item1':df1, 'item2':df2})
#print(pn)
''' The following error outputs:
pandas_test.py:22: FutureWarning: The Panel class is removed from pandas.
Accessing it from the top-level namespace will also be removed in the next
version.
pn = pd.Panel({'item1':df1, 'item2':df2})
Traceback (most recent call last):
File "pandas_test.py", line 22, in <module>
pn = pd.Panel({'item1':df1, 'item2':df2})
TypeError: Panel() takes no arguments
'''
| ndlopez/learn_python | source/pandas_test.py | pandas_test.py | py | 1,058 | python | en | code | 0 | github-code | 6 |
20856359623 | """
Training code for harmonic Residual Networks.
Licensed under the BSD License [see LICENSE for details].
Written by Matej Ulicny, based on pytorch example code:
https://github.com/pytorch/examples/tree/master/imagenet
"""
import argparse
import os
import random
import shutil
import time
import warnings
import csv
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--harm_root', action='store_true',
help='whether to use harmonic block instead of root conv layer.')
parser.add_argument('--harm_res_blocks', action='store_true',
help='whether to use harmonic blocks instead of residual blocks.')
parser.add_argument('--pool', default='', type=str,
help="pooling type after the first layer: 'avg' or 'max', if none"
" specified increased stride is used instead of pooling.")
parser.add_argument('--levels', default=[None, None, None, None], nargs='+',
help="a list of lambda values used to compress harmonic blocks"
" specified for each of the 4 sets of blocks")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# convering level values to int or None
for i in range(len(args.levels)):
if not args.levels[i] is None:
try:
args.levels[i] = int(args.levels[i])
except ValueError:
args.levels[i] = None
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=args.pretrained, harm_root=args.harm_root, harm_res_blocks=args.harm_res_blocks, pool=args.pool, levels=args.levels)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
best_acc1 = best_acc1.to(args.gpu)
state_dict = model.state_dict()
#loaded_dict = {'module.'+k: v for k, v in checkpoint['state_dict'].items() if 'module.'+k in state_dict}
loaded_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in state_dict}
state_dict.update(loaded_dict)
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
save_score([top1.avg.cpu().data.numpy(), top5.avg.cpu().data.numpy()], args.arch+'_train.csv')
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
save_score([top1.avg.cpu().data.numpy(), top5.avg.cpu().data.numpy()], args.arch+'_val.csv')
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def save_score(score, filename):
with open(filename, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(score)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| matej-ulicny/harmonic-networks | imagenet/main.py | main.py | py | 13,809 | python | en | code | 55 | github-code | 6 |
5593654816 | import telegram
import google
import logging
import base64
import io
from requests_html import HTMLSession
from google.cloud import firestore
from bs4 import BeautifulSoup
from PIL import Image
from time import sleep
from Spider import get_all_course
from UESTC_Login import _login, get_captcha
def __Bot_token():
f = open("token.txt","r")
token = f.read()
f.close()
return token
def course_print(mycourse, update):#构造一个好看的字符串发送给用户
chat_id = update.message.chat_id
week={
"0":"Monday",
"1":"Tuesday",
"2":"Wednsday",
"3":"Thirsday",
"4":"Friday",
"5":"Saturday",
"6":"Sunday",
}
for course in mycourse:
info = course[0]
time = course[1]
out = "{}\n{} {}\nweek:".format(info[0],info[1],info[2])
i = 0
while(i <= 20):
if(info[3][i] == 1):
out = out + " {}".format(i)
i += 1
out = out + '\n' + week[str(time[0][0])] + " "
out = out + "class no."
for classes in time:
out = out + " {}".format(classes[1]+1)
#print(out)
_bot_send_text(chat_id, out)
"""
print(info)
#out = out + str(info) + "\n"
out = out + str(info[0]) + '\n' + str(info[1]) + str(info[2]) +'\n' + str(info[3]) + "\n"
for t in time:
out = out + str(t) + "\n"
out = out + "\n"
"""
_bot_send_text(chat_id, "Demo version. To be continued....")
#bot.send_message(chat_id=update.message.chat_id, text=out)
def _bot_send_text(chat_id, text):
global bot
bot.send_message(chat_id = chat_id, text = text)
def _firestore_update(chat_id ,dict):
#将字典中的内容放入google firestore
global db
doc_ref = db.collection(u'uestc_calendar_bot').document(str(chat_id))
doc_ref.set(dict, merge=True)
def _firestore_read(chat_id):
global db
doc_ref = db.collection(u'uestc_calendar_bot').document(str(chat_id))
doc = doc_ref.get().to_dict()
return doc
def _Process_Start(update):
#打印欢迎界面
chat_id = update.message.chat_id
dicts = {
'user_id': chat_id,
'status': 0
}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id,
text=""" Welcome to YouESTC alarm clock!
This bot is used to query your timetable and alarm you before class.
Commands:
/login : to login into uestc""")
def _Process_Login(update):
chat_id = update.message.chat_id
dicts = {'status': 1}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id, "please input your UESTC student number:")
def _Process_Account(update):
#处理输入的帐号
chat_id = update.message.chat_id
dicts = {
'status': 2,
'account': update.message.text
}
_firestore_update(chat_id, dicts)
_bot_send_text(chat_id, "please input your password:")
def _Process_Password(update):
#处理输入的密码
chat_id = update.message.chat_id
doc = _firestore_read(chat_id)
account = doc['account']
passwd = update.message.text
dicts = {'passwd': base64.b64encode(passwd.encode('utf-8'))}
_firestore_update(chat_id, dicts)
bot.send_message(chat_id=update.message.chat_id, text="please input your captcha below:")
bot.send_message(chat_id=update.message.chat_id, text="Pulling captcha photo...")
form, img, new_session = get_captcha(account, passwd) #请求验证码图片
#f = open("captcha.png", "wb")
#f.write(img)
#f.close()
img_b64encode = base64.b64encode(img.encode('utf-8')) # base64编码
img_b64decode = base64.b64decode(img_b64encode) # base64解码
image = io.BytesIO(img_b64decode)
#f = open("captcha.png", "rb")
bot.send_photo(chat_id=chat_id, photo=image)
# 发送验证码图片给用户
dicts = {
'form': form,
'cookies': new_session.cookies.get_dict(),
'status': 3
}
_firestore_update(chat_id, dicts)
def _Process_Captcha(update):
#处理输入的验证码
chat_id = update.message.chat_id
_bot_send_text(chat_id, "Attempting to login...")
doc = _firestore_read(chat_id)
cookies = doc['cookies']
form = doc['form']
captcha = update.message.text
new_session, res = _login(form, captcha, cookies)
if(res == 0):
_bot_send_text(chat_id, "Login success! Pulling data...")
mycourse = get_all_course(new_session)
course_print(mycourse, update)
elif(res == 1):
_bot_send_text(chat_id, "Password wrong!")
elif(res == 2):
_bot_send_text(chat_id, "Captcha wrong!")
else:
_bot_send_text(chat_id, "Student number wrong!")
dicts = {'status': 0}
_firestore_update(chat_id, dicts)
def Text_Process(update):
doc_ref = db.collection(u'uestc_calendar_bot').document(str(update.message.chat_id))
try:#如果之前没有记录,自动跳转到start菜单
doc = doc_ref.get().to_dict()
except google.cloud.exceptions.NotFound:
_Process_Start(update)
return
status = doc['status']
if(status == 0):
_Process_Start(update)
elif(status == 1):
_Process_Account(update)
elif(status == 2):
_Process_Password(update)
elif(status == 3):
_Process_Captcha(update)
_bot_send_text(update.message.chat_id, "收到啦!")
def Command_Process(update): #用来处理指令
command = update.message.text
command_list = {
'/start': _Process_Start,
'/login': _Process_Login
}
if(command in command_list):
command_list[command](update)
elif(command[0] == '/'):
_Process_Start(update)
else:
Text_Process(update)
#不是命令,跳转到对文本的处理函数里去
if(__name__ == "__main__"):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO)
# 记录日志
global bot
global db
#定义全局变量,使得所有函数用一个变量
token = __Bot_token()
bot = telegram.Bot(token = token)#bot
print(bot.get_me())
# 登录telegram
db = firestore.Client()
# 登录google filestore
while(1):
updates = bot.get_updates()
if(updates != []):
for update in updates:
Command_Process(update)
bot.get_updates(limit = 1, offset = update.update_id+1)
print(update.message.text, " ", update.message.chat_id)
else:
sleep(0.01)
#$env:GOOGLE_APPLICATION_CREDENTIALS="G:\github\telebot\key\My First Project-2035ff2d3024.json" | mrh929/uestc_calendar_bot | calendar/main.py | main.py | py | 6,925 | python | en | code | 0 | github-code | 6 |
72784467067 | # https://www.codewars.com/kata/630647be37f67000363dff04
def draw(deck):
print_deck(deck, True) # Using unicode characters
print_deck(deck, False) # Using regular characters
drawn_cards = []
while len(deck) > 1:
drawn_cards.append(deck.pop(0))
if deck:
deck.append(deck.pop(0))
return drawn_cards + deck
| blzzua/codewars | 7-kyu/playing_cards_draw_order_–_part_1.py | playing_cards_draw_order_–_part_1.py | py | 357 | python | en | code | 0 | github-code | 6 |
37009441089 | # coding=utf-8
import pymysql
from com.petstore.dao.base_dao import BaseDao
"""订单明细管理DAO"""
class OrderDetailDao(BaseDao):
def __init__(self):
super().__init__()
def create(self, orderdetail):
"""创建订单明细,插入到数据库"""
try:
with self.conn.cursor() as cursor:
sql = 'insert into orderdetails (orderid, productid,quantity,unitcost) ' \
'values (%s,%s,%s,%s)'
affectedcount = cursor.execute(sql, orderdetail)
print('成功插入{0}条数据'.format(affectedcount))
# 提交数据库事务
self.conn.commit()
except pymysql.DatabaseError as e:
# 回滚数据库事务
self.conn.rollback()
print(e)
finally:
self.close()
| wanglun0318/petStore | com/petstore/dao/order_detail_dao.py | order_detail_dao.py | py | 862 | python | en | code | 0 | github-code | 6 |
74309701307 | from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.color("white")
self.penup()
self.hideturtle()
self.highscore = 0
self.update_score()
def update_score(self):
self.goto(x=-50, y=320)
self.write(f"Score = {self.score}", align="center",
font=("Arial", 10, "normal"))
self.goto(x=+100, y=320)
with open("highscore.txt", "r+") as file:
myscore = file.read()
self.highscore = int(myscore)
self.write(f"High Score = {self.highscore}", align="center",
font=("Arial", 10, "normal"))
def increasescore(self):
self.clear()
self.score += 1
self.update_score()
def calchighscore(self):
if self.score > self.highscore:
self.highscore = self.score
with open("highscore.txt", "w") as file:
file.write(f"{self.highscore}")
self.clear()
self.update_score()
def gameOver(self):
self.goto(0, 0)
self.write("Game Over", align="center",
font=("Arial", 20, "normal"))
| shuklaritvik06/PythonProjects | Day - 24/scoreboard.py | scoreboard.py | py | 1,202 | python | en | code | 0 | github-code | 6 |
9622430105 | #!/usr/bin/env python3
import base64
import c3, c5
from itertools import combinations
def beautify(candidates: list):
'''
Pretty prints the candidates returned
'''
s = ''
for c in candidates:
s += 'Keysize: {}\tHamming Distance: {}\n'.format( c['keysize'], c['normalized_distance'])
return s
def hamming_distance(str1: str, str2: str) -> int:
'''
Calculates the total amount of differing bits between two bits.
returns (int) distance
'''
# declare bytes
b1 = bytes.fromhex(str1)
b2 = bytes.fromhex(str2)
count = 0
for i, j in zip(b1, b2):
xor = bin(i^j)
count += xor.count("1")
return count
def generate_keysize_candidates(data: str) -> list:
'''
Calculates the hamming distance for a variety of keysizes and returns
the top 4 candidates.
returns a list of candidates
'''
distance_candidates = []
ciphertext_bytes = base64.b64decode(data)
for keysize in range(2, 41):
blocks = [ciphertext_bytes[i:i+keysize] for i in range(0, len(ciphertext_bytes), keysize)][:4]
distance = 0
block_combinations = tuple(combinations(blocks, 2))
for (a,b) in block_combinations:
distance += hamming_distance(a.hex(), b.hex())
distance /= len(block_combinations)
normalized_distance = distance / keysize
distance_candidate = {
'keysize': keysize,
'normalized_distance': normalized_distance,
}
distance_candidates.append(distance_candidate)
return sorted(distance_candidates, key=lambda c: c['normalized_distance'])
def generate_blocks(filepath: str, keysize: int) -> list:
'''
Partitions the base64 encoded file into blocks of the given keysize
returns list of byte blocks (list)
'''
blocks = []
with open(filepath, 'r') as file:
b64_ciphertext = file.read()
b64_ciphertext_bytes = bytes(b64_ciphertext, 'utf-8')
ciphertext_bytes = base64.b64decode(b64_ciphertext_bytes)
while len(ciphertext_bytes) // keysize > 1:
block = ciphertext_bytes[:keysize]
ciphertext_bytes = ciphertext_bytes[keysize:]
blocks.append(block)
if len(ciphertext_bytes) > 0:
padding = keysize - len(ciphertext_bytes)
final_block = bytes(ciphertext_bytes[:] + (b'\x00'*padding))
blocks.append(final_block)
return blocks
def transpose_blocks(blocks: list, keysize: int) -> list:
'''
Transposes blocks into new blocks where the first bytes of every block is put
into the the first block, then the second bytes into the second block, the byte
n into block n.
returns a list of bytes
'''
transposed_blocks = []
for _ in range(keysize):
tmp = bytearray()
transposed_blocks.append(tmp)
for block in blocks:
for i in range(0, keysize):
transposed_blocks[i] += bytes([block[i]])
return transposed_blocks
def solve_repeating_xor(filepath: str, keysize: int) -> str:
'''
This will generate the best candidate for the key used
to encrypt the ciphertext using a repeating XOR.
returns str
'''
ciphertext_blocks = generate_blocks(filepath, keysize)
transposed_blocks = transpose_blocks(ciphertext_blocks, keysize)
key = ''
for b in transposed_blocks:
candidates = c3.singlebyte_xor_solve(b.hex())
key += chr(candidates['byte'])
return key
if __name__ == "__main__":
from pathlib import Path
path = str(Path(__file__).parent.absolute())
file = open(path + '/' + 'c6_input.txt', 'r')
txt = file.read()
file.close()
candidates = generate_keysize_candidates(txt)[:1]
keys = []
for candidate in candidates:
tmp_key = solve_repeating_xor(path + '/c6_input.txt', candidate['keysize'])
keys.append(tmp_key)
for key in keys:
print('\033[35mUsing Key:\033[39m', key)
print('\033[35mKeysize:\033[39m', len(key))
print('\033[35mResulting XOR\'d text:\033[39m', c5.repeating_xor(base64.b64decode(txt).decode(), key).decode())
| oatovar/Cryptopals-Solutions | c06.py | c06.py | py | 4,159 | python | en | code | 0 | github-code | 6 |
25602918486 | def pairs(s):
summa = 0
bitit = 0
edellinen = 0
pituus = 0
for i in range(0, len(s)):
pituus += 1
if s[i] == "1":
x = pituus * bitit + edellinen
summa += x
edellinen = x
bitit += 1
pituus = 0
return summa
if __name__ == "__main__":
print(pairs("100101")) # 10
print(pairs("101")) # 2
print(pairs("100100111001")) # 71
| Noppacase22/DSA-2022 | bitpairs.py | bitpairs.py | py | 431 | python | fi | code | 0 | github-code | 6 |
34348826764 | from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
YEAR_VALIDATION_ERROR = 'Нельзя добавить произведение из будущего'
SCORE_VALIDATION_ERROR = 'Оценка должна быть от 1 до 10'
class User(AbstractUser):
ADMIN = 'admin'
MODERATOR = 'moderator'
USER = 'user'
ROLE = (
(ADMIN, 'admin'),
(MODERATOR, 'moderator'),
(USER, 'user'),
)
bio = models.TextField(
blank=True,
verbose_name='О себе'
)
email = models.EmailField(
unique=True,
verbose_name='Адрес электронной почты'
)
role = models.CharField(
max_length=15,
choices=ROLE,
default=USER,
)
class Meta:
ordering = ('-pk',)
verbose_name = 'пользователь'
verbose_name_plural = 'пользователи'
@property
def is_moderator(self):
return self.role == self.MODERATOR
@property
def is_admin(self):
return (self.role == self.ADMIN or self.is_staff
or self.is_superuser)
def __str__(self):
return self.username
class Category(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название категории',
)
slug = models.SlugField(
unique=True,
verbose_name='Уникальный идентификатор категории',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'категирия'
verbose_name_plural = 'категории'
def __str__(self):
return self.name
class Genre(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название жанра',
)
slug = models.SlugField(
unique=True,
verbose_name='Уникальный идентификатор жанра',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'жанр'
verbose_name_plural = 'жанры'
def __str__(self):
return self.name
class Title(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название произведения',
)
year = models.PositiveSmallIntegerField(
verbose_name='Год создания',
validators=[
MaxValueValidator(
datetime.now().year,
message=YEAR_VALIDATION_ERROR
)
]
)
category = models.ForeignKey(
Category,
on_delete=models.SET_NULL,
related_name='titles',
blank=True,
null=True,
verbose_name='Категория',
)
description = models.TextField(
verbose_name='Описание',
blank=True,
null=True,
)
genre = models.ManyToManyField(
Genre,
through='GenreTitle',
through_fields=['title', 'genre']
)
class Meta:
ordering = ('-pk', 'name',)
verbose_name = 'произведение'
verbose_name_plural = 'произведения'
def __str__(self):
return (f'{self.name} '
f'({self.category})')
class GenreTitle(models.Model):
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
verbose_name='Произведение',
)
genre = models.ForeignKey(
Genre,
on_delete=models.CASCADE,
blank=True,
null=True,
verbose_name='Жанр',
)
class Meta:
ordering = ('-pk',)
verbose_name = 'Привязка жанров'
verbose_name_plural = 'Привязки жанров'
def __str__(self):
return (f'({self.title}->{self.genre})')
class Review(models.Model):
title = models.ForeignKey(
Title,
blank=True,
on_delete=models.CASCADE,
null=False,
related_name='reviews',
verbose_name='Произведение'
)
text = models.TextField(
verbose_name='Текст',
help_text='Заполните поле.',
)
author = models.ForeignKey(
User,
blank=True,
verbose_name='Автор',
on_delete=models.CASCADE,
null=False,
related_name='reviews',
)
score = models.PositiveSmallIntegerField(
help_text='Введите от 1 до 10',
default=10,
verbose_name='Оценка',
validators=(MinValueValidator(1, message=SCORE_VALIDATION_ERROR),
MaxValueValidator(10, message=SCORE_VALIDATION_ERROR))
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True,
)
def __str__(self):
return self.text[:15]
class Meta:
ordering = ('-pub_date',)
verbose_name_plural = 'Отзывы'
constraints = [
models.UniqueConstraint(fields=['author', 'title'],
name='title_review')
]
class Comment(models.Model):
text = models.TextField(
null=False,
verbose_name='Текст',
help_text='Заполните поле.',
)
author = models.ForeignKey(
User,
blank=False,
verbose_name='Автор',
on_delete=models.CASCADE,
null=False,
related_name='comments',
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True,
)
review = models.ForeignKey(
Review,
blank=False,
verbose_name='Отзыв',
on_delete=models.CASCADE,
null=False,
related_name='comments',
)
class Meta:
ordering = ('-pub_date',)
verbose_name_plural = 'Коментарии'
| RomanK74/api_yamdb | api/models.py | models.py | py | 6,054 | python | en | code | 0 | github-code | 6 |
34670378486 | import sys
from math import log
from copy import deepcopy
from typing import Dict, List
from lib.graph import Graph, read_input_csv
class TraceNode:
def __init__(self, id):
self.id = id
self.preds = []
def shortest_trace(graph, node):
"""
Compute the shortest attack trace to the specified attack goal node
Args:
graph (): a Graph object
node (): a specified attack goal node
Returns:
a tuple of (the minimum depth, the actual trace)
"""
res_node = TraceNode(node)
if graph.nodes[node].node_type == 'primitive':
return 0, res_node
# If the current node is an OR node, then take the minimum of predecessors
if graph.nodes[node].node_type == 'OR':
pred_list = graph.nodes[node].preds
(min_depth, min_pred_node) = shortest_trace(graph, pred_list[0])
res_node.preds.append(min_pred_node)
for i in range(1, len(pred_list)):
(cur_depth, cur_pred_node) = shortest_trace(graph, pred_list[i])
if cur_depth < min_depth:
min_depth = cur_depth
min_pred_node = cur_pred_node
res_node.preds.pop()
res_node.preds.append(min_pred_node)
return min_depth + 1, res_node
# If the current node is an AND node, then take the maximum of predecessors
if graph.nodes[node].node_type == 'AND':
pred_list = graph.nodes[node].preds
(max_depth, max_pred_node) = shortest_trace(graph, pred_list[0])
res_node.preds.append(max_pred_node)
for i in range(1, len(pred_list)):
(cur_depth, cur_pred_node) = shortest_trace(graph, pred_list[i])
res_node.preds.append(cur_pred_node)
if cur_depth > max_depth:
max_depth = cur_depth
return max_depth + 1, res_node
def blast_radius(graph):
"""
Compute the blast radius of each vulnerability in the attack graph
Args:
graph (): a Graph object representing the attack graph
Returns:
a dictionary from `vulnerability node id` to `the list of derivation nodes`
"""
class Vul:
def __init__(self, node_id, desc):
self.node_id = node_id
self.desc = desc
vul_list: List[Vul] = []
for node in graph.nodes:
if graph.nodes[node].node_type == 'primitive' and 'vulExists(' in graph.nodes[node].desc:
vul_list.append(Vul(node, graph.nodes[node].desc))
queue = []
node_vul_evidences: Dict[int, List[Dict[int, int]]] = {}
# Initialize node_vul_evidences for all of the primitive fact nodes
for node in graph.nodes:
node_vul_evidences[node] = [dict(zip(range(len(vul_list)), [0] * len(vul_list)))]
if graph.nodes[node].node_type == 'primitive' and 'vulExists(' in graph.nodes[node].desc:
for i in range(len(vul_list)):
if graph.nodes[node].desc == vul_list[i].desc:
node_vul_evidences[node][0][i] += 1
for child in graph.nodes[node].succ:
if child not in queue:
queue.append(child)
# Iteratively update the `node_vul_evidences` for nodes in the `queue`
while len(queue) != 0:
cur_node = queue.pop(0)
cur_vul_evidence = deepcopy(node_vul_evidences[graph.nodes[cur_node].preds[0]])
for i in range(1, len(graph.nodes[cur_node].preds)):
if graph.nodes[cur_node].node_type == 'AND':
cur_vul_evidence = merge_ve_and(cur_vul_evidence, node_vul_evidences[graph.nodes[cur_node].preds[i]], vul_list)
elif graph.nodes[cur_node].node_type == 'OR':
cur_vul_evidence = merge_ve_or(cur_vul_evidence, node_vul_evidences[graph.nodes[cur_node].preds[i]])
node_vul_evidences[cur_node] = cur_vul_evidence
for child in graph.nodes[cur_node].succ:
if child not in queue:
queue.append(child)
return determine_br(graph, node_vul_evidences, vul_list)
def merge_ve_or(vul_evidence1, vul_evidence2):
"""
Merge vulnerability evidences for two parent nodes. The child node is an`OR` node.
Args:
vul_evidence1 (): vulnerability evidence for parent node 1
vul_evidence2 (): vulnerability evidence for parent node 2
Returns:
the merged vulnerability evidence for the child `OR` node
Example:
>>> node_vul_evidences
{4: [{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]}
>>> vul_evidence1 = node_vul_evidences[4]
>>> vul_evidence1
[{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]
>>> foot_print1 = vul_evidence1[0]
>>> foot_print1
{1: 0, 2: 1, 3: 0}
>>> foot_print1 in vul_evidence1
True
"""
merged_vul_evidence = deepcopy(vul_evidence1)
for vul_footprint in vul_evidence2:
if vul_footprint not in vul_evidence1:
merged_vul_evidence.append(vul_footprint)
return merged_vul_evidence
def merge_ve_and(vul_evidence1, vul_evidence2, vul_list):
"""
Merge vulnerability evidences for two parent nodes. The child node is an`AND` node.
Args:
vul_evidence1 (): vulnerability evidence for parent node 1
vul_evidence2 (): vulnerability evidence for parent node 2
vul_list (): the list of all of the vulnerabilities in the given attack graph
Returns:
the merged vulnerability evidence for the child `AND` node
"""
merged_vul_evidence = []
for vul_footprint1 in vul_evidence1:
for vul_footprint2 in vul_evidence2:
merged_footprint = dict(zip(range(len(vul_list)), [0]*len(vul_list)))
for vul_index in range(len(vul_list)):
merged_footprint[vul_index] = max(vul_footprint1[vul_index], vul_footprint2[vul_index])
if merged_footprint not in merged_vul_evidence:
merged_vul_evidence.append(merged_footprint)
return merged_vul_evidence
def determine_br(graph, node_vul_evidences, vul_list):
"""
Determine the blast radius for each vulnerability in the attack graph
Args:
node_vul_evidences (): vulnerability evidence for all of the nodes in the attack graph
Example vul_evidences = {4: [{1: 0, 2: 1, 3: 0}, {1: 0, 2: 0, 3: 1}]} means node 4 has two
vulnerability footprints, the first one being {1: 0, 2: 1, 3: 0} and the second one being {1: 0, 2: 0, 3: 1}
vul_list (): the list of all of the vulnerabilities in the given attack graph
Returns:
blast radius for each vulnerability in the attack graph
Example:
>>> br = {3: [1, 5, 43, 49], 12: [], 17: [15], 21: [15], 26: [24, 36, 39, 41], 29: [], 32: [30, 33, 46, 51]}
>>> br[3]
[1, 5, 43, 49]
means the blast radius of `vulnerability node 3` contains derivation node 1, 5, 43, 49
"""
br = {}
for i in range(len(vul_list)):
br[vul_list[i].node_id] = []
for node in node_vul_evidences:
if graph.nodes[node].node_type == 'OR':
for foot_print in node_vul_evidences[node]:
vul_count = sum(foot_print.values())
if vul_count == 1:
key = find_key_from_dict(foot_print)
br[vul_list[key].node_id].append(node)
return br
def find_key_from_dict(d):
"""
Return the key of the dictionary whose corresponding value is 1.
Args:
d (): a dictionary such as {0: 1, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
Returns:
0
"""
for key in d:
if d[key] == 1:
return key
| pmlab-ucd/IOTA | python/graph_analyzer.py | graph_analyzer.py | py | 7,605 | python | en | code | 1 | github-code | 6 |
72416891708 | import os
import subprocess
from abc import ABC, abstractmethod
from datatig.models.git_commit import GitCommitModel
class RepositoryAccess(ABC):
@abstractmethod
def list_files_in_directory(self, directory_name: str) -> list:
return []
@abstractmethod
def get_contents_of_file(self, file_name: str) -> str:
return ""
@abstractmethod
def has_file(self, file_name: str) -> bool:
return False
class RepositoryAccessLocalFiles(RepositoryAccess):
def __init__(self, source_dir: str):
self._source_dir = source_dir
def has_file(self, file_name: str) -> bool:
full_start_dir = os.path.abspath(self._source_dir)
for path, subdirs, files in os.walk(self._source_dir):
for name in files:
full_filename = os.path.abspath(os.path.join(path, name))
found_filename = full_filename[len(full_start_dir) + 1 :]
if found_filename == file_name:
return True
return False
def list_files_in_directory(self, directory_name: str) -> list:
out = []
start_dir = os.path.join(self._source_dir, directory_name)
full_start_dir = os.path.abspath(start_dir)
for path, subdirs, files in os.walk(full_start_dir):
for name in files:
full_filename = os.path.abspath(os.path.join(path, name))
out.append(
{
"name": name,
"path_relative_to_dir": full_filename[
len(full_start_dir) + 1 :
],
}
)
return out
def get_contents_of_file(self, file_name: str) -> str:
with open(os.path.join(self._source_dir, file_name)) as fp:
return fp.read()
class RepositoryAccessLocalGit(RepositoryAccess):
def __init__(self, source_dir: str):
self._source_dir = source_dir
self._ref: str = "HEAD"
def set_ref(self, ref: str) -> None:
self._ref = ref if ref else "HEAD"
def has_file(self, file_name: str) -> bool:
process = subprocess.Popen(
["git", "ls-tree", "-r", self._ref],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._source_dir,
)
stdout, stderr = process.communicate()
for line in stdout.decode("utf-8").strip().split("\n"):
path_relative_to_repo = line.split("\t")[-1]
if path_relative_to_repo == file_name:
return True
return False
def list_files_in_directory(self, directory_name: str) -> list:
out = []
process = subprocess.Popen(
["git", "ls-tree", "-r", self._ref],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._source_dir,
)
stdout, stderr = process.communicate()
for line in stdout.decode("utf-8").strip().split("\n"):
path_relative_to_repo = line.split("\t")[-1]
if path_relative_to_repo.startswith(directory_name):
out.append(
{
"name": os.path.basename(path_relative_to_repo),
"path_relative_to_dir": path_relative_to_repo[
len(directory_name) + 1 :
],
}
)
return out
def get_contents_of_file(self, file_name: str) -> str:
process = subprocess.Popen(
["git", "show", self._ref + ":" + file_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._source_dir,
)
stdout, stderr = process.communicate()
return stdout.decode("utf-8").strip()
def get_current_commit(self) -> GitCommitModel:
process = subprocess.Popen(
["git", "rev-parse", self._ref],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._source_dir,
)
stdout, stderr = process.communicate()
output = stdout.decode("utf-8").strip()
refs = [self._ref] if self._ref != output else []
return GitCommitModel(output, refs)
def list_branches(self) -> list:
out = []
process = subprocess.Popen(
["git", "branch"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._source_dir,
)
stdout, stderr = process.communicate()
for line in stdout.decode("utf-8").strip().split("\n"):
line = line.strip()
if line.startswith("* "):
line = line[2:]
if line:
out.append(line)
return out
| DataTig/DataTig | datatig/repository_access.py | repository_access.py | py | 4,800 | python | en | code | 4 | github-code | 6 |
72249954427 | import json
import atexit
import subprocess
import yaml
import argparse
import textwrap
from dataclasses import dataclass
@dataclass
class Config:
num_nodes: int
config_path: str
def __init__(self, n: int, c: str):
self.num_nodes = n
self.config_path = c
self.place_holder_commands = ["cd ~/Downloads", "touch sample.txt"]
def make_parser():
parser_ = argparse.ArgumentParser(
prog="heiko-docker-test",
description=textwrap.dedent(
"""
heiko-docker-test allows you test your heiko config
locally with just docker.\n
It takes 2 arguments, number of nodes (containers) and
config_path (the path where the config is written to).\n
Using the provided args, it generates a config file which
connects to the containers. The config can then be further
modified to provide the required jobs to be run.\n
After modifying the config, deploy heiko to test.
"""
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser_.add_argument(
"-n",
"--number",
help="Number of nodes",
required=True,
action="store",
dest="number_of_nodes",
)
parser_.add_argument(
"-c",
"--config_path",
help="Path where the config should be generated",
required=True,
action="store",
dest="config",
)
return parser_
def genYAML(path, nodes, command):
stream = open(path, "w")
config = {"nodes": nodes, "jobs": [{"name": "job_1", "commands": command}]}
yaml.dump(config, stream)
print(
yaml.dump(
config,
)
)
stream.close()
parser = make_parser()
args = parser.parse_args()
n = int(args.number_of_nodes)
c = args.config
config = Config(n, c)
name = "heiko_node"
nodes = list()
print("Starting containers ..........")
for i in range(config.num_nodes):
nodes.append(dict())
node_name = name + str(i)
# spawn containers
p = subprocess.Popen(
["docker", "run", "-it", "-d", "--name", node_name, "heiko-node", "/bin/bash"]
)
p.wait()
nodes[i]["name"] = node_name
nodes[i]["username"] = "root"
nodes[i]["password"] = "yabe"
# gets networks
print("Network extraction")
out = subprocess.check_output(["docker", "network", "inspect", "bridge"])
network = json.loads(out)
for i in range(config.num_nodes):
node_name = name + str(i)
cid = subprocess.check_output(
["docker", "ps", "-a", "-q", "--no-trunc", "--filter", f"name={node_name}"]
)
# print(cid)
cid = cid.decode().strip()
nodes[i]["host"] = network[0]["Containers"][cid]["IPv4Address"].split("/")[0]
print()
print("YAML CONFIG")
genYAML(config.config_path, nodes, config.place_holder_commands)
def cleanup():
print("Stopping containers .........")
for i in range(config.num_nodes):
node_name = name + str(i)
p = subprocess.Popen(["docker", "stop", node_name])
p.wait()
print("Removing containeres ............")
for i in range(config.num_nodes):
node_name = name + str(i)
p = subprocess.Popen(["docker", "rm", node_name])
p.wait()
atexit.register(cleanup)
input("Press enter to stop workers")
| pesos/heiko | docker-networks.py | docker-networks.py | py | 3,338 | python | en | code | 13 | github-code | 6 |
28666209791 | import unittest
from name_function import get_formatted_name2
class NameTestCase(unittest.TestCase):
"""Tests for 'name_function.py"""
def test_first_last_name(self):
"""Do names like 'janis joplin' work?"""
# This is what the function is taking in as an example and comparing
formatted_name = get_formatted_name2('janis', 'joplin')
# Assert methods verify that the result you received is the same as the result you were expecting
# We know we are supposed to receive 'Janis Joplin' capitalized because we have the title() in name_function so it compares
# This answer to the answer in the other file
self.assertEqual(formatted_name, 'Janis Joplin')
if __name__ == '__main__':
unittest.main() | jerenteria/python_tests | test_name_function.py | test_name_function.py | py | 763 | python | en | code | 0 | github-code | 6 |
8263385619 | from pymongo import *
client = MongoClient("localhost", 27017)
db = client.tmp
collection = db.stu2
# 插入数据
# collection.insert({"name":"f", "gender":"f", "age":25})
# 修改操作
# collection.update({"name":"f"}, {"$set":{"name":"g"}})
# 删除数据
# collection.delete_one({"name":"g"})
# 查询数据
# cursor = collection.find()
# for s in cursor:
# print(s["name"])
cursor = collection.find({"age": {"$gt": 15}}).sort("age", -1).skip(1).limit(2)
for s in cursor:
print(s["name"], s["age"]) | luk0926/python | mongo/PyMongo.py | PyMongo.py | py | 517 | python | en | code | 0 | github-code | 6 |
74434743869 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from test_generator.models import *
# Create your views here.
def home(request):
if request.user.is_authenticated:
status = "You're currently logged in."
else:
status = "You're not currently logged in."
context = {'status': status}
return render(request, "tester_services/home.html", context)
# môže sa neskôr zmazať (def testujeme)
def testujeme(request):
thema = Themes.objects.create(theme_name="Biology")
@login_required
def my_tests(request):
user = request.user
tests = GTest.objects.filter(user_id=user.id)
context = {'my_tests': tests}
return render(request, 'tester_services/my_tests.html', context)
| alenamedzova/final_project | tester_services/views.py | views.py | py | 767 | python | en | code | 0 | github-code | 6 |
3151231607 | import sys
import form
from PyQt4 import QtCore, QtGui
import letters
import pygame
class ConnectorToMainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
super(ConnectorToMainWindow, self).__init__()
self.expected_letter = ''
self.timer = QtCore.QTimer()
self.ui = form.Ui_MainWindow()
self.ui.setupUi(self)
self.my_letters = letters.Letters()
QtCore.QObject.connect(self.ui.restart_button, QtCore.SIGNAL(form._fromUtf8("clicked()")),
self.on_restartbutton_click)
QtCore.QObject.connect(self.ui.audio_button, QtCore.SIGNAL(form._fromUtf8("clicked()")),
self.on_audiobutton_click)
QtCore.QMetaObject.connectSlotsByName(self)
self.on_restartbutton_click()
def on_restartbutton_click(self) :
self.my_letters.re_init()
self.ui.answer_window.clear()
self.ui.img_display.clear()
self.generate_a_letter()
def on_audiobutton_click(self) :
self.play_audio("./audio/letters/{}.mp3".format(self.expected_letter))
def generate_a_letter(self) :
self.ui.img_display.clear()
letter = self.my_letters.pick_a_letter()
self.play_audio("./audio/letters/{}.mp3".format(letter))
self.expected_letter = letter
self.ui.question_window.setText('{0} {1}'.format(letter.upper(), letter))
self.ui.answer_window.setFocus()
def verify_result(self) :
if str(self.ui.answer_window.toPlainText()).lower() == self.expected_letter :
self.display_image(QtGui.QPixmap("./imgs/happy.png"))
self.ui.answer_window.clear()
QtCore.QTimer.singleShot(2000, self.generate_a_letter)
else :
self.display_image(QtGui.QPixmap("./imgs/try_again.png"))
self.ui.answer_window.clear()
self.retry()
def retry(self) :
self.ui.question_window.setText('{0} {1}'.format(self.expected_letter.upper(),
self.expected_letter))
self.on_audiobutton_click()
self.ui.answer_window.setFocus()
def display_image(self, img) :
self.ui.img_display.setPixmap(img)
self.ui.img_display.show()
def play_audio(self, path) :
pygame.mixer.init(frequency = 8000, channels = 1)
pygame.mixer.music.load(path)
pygame.mixer.music.play()
def keyPressEvent(self, e):
self.ui.img_display.clear()
if e.key() != QtCore.Qt.Key_Return and e.key() != QtCore.Qt.Key_Enter:
if e.key() != QtCore.Qt.Key_Backspace:
self.ui.answer_window.insertPlainText(str(e.text()).upper())
else:
mstr = str(self.ui.answer_window.toPlainText())[:-1]
self.ui.answer_window.clear()
self.ui.answer_window.insertPlainText(mstr)
else:
self.verify_result()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myapp = ConnectorToMainWindow()
myapp.show()
sys.exit(app.exec_()) | Tal-Levy/homeSchooling | first_steps.py | first_steps.py | py | 3,110 | python | en | code | 0 | github-code | 6 |
1005006973 | import socket
import json
class pyWave:
configStr = "{ 'enableRawOutput': 'enableRawOutput', 'format': 'Json'}"
configByte = configStr.encode()
val = 0
def __init__(self, _host, _port):
self.host = _host
self.port = _port
def connect(self):
# This is a standard connection for an Internet socket
# AF.INET is how you declare an internet socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the Thinkgear Connector
client.connect((self.host, self.port))
# calling this client to implement elsewhere
client.send(self.configByte)
return client
def readData(self, _client):
# This loop just waits for messages via the socket
# while True:
# The connection could break for lots of reasons to wrapping this in a try / catch
try:
# When a message is received write it to the data var
# uses a buffer to transfer packets, buffer size is 2^10
data = _client.recv(1024)
data_json = json.loads(data)
eSenseData = data_json["eSense"]
attention = eSenseData["attention"]
self.val = attention
return self.val
# loop for
except Exception as e:
# print(str(e))
if str(e) == "'eSense'":
return self.val
else:
# print(e)
return self.val
# testing code
if __name__ == '__main__':
pywave = pyWave("localhost", 13854)
client = pywave.connect()
print("Waiting for data")
while True:
val = pywave.readData(client)
print(val)
| kittom/Mind-Control-Car | BrainWaveReader/pywave.py | pywave.py | py | 1,713 | python | en | code | 0 | github-code | 6 |
38008765446 | """
# Analysis utilities
This script belongs to the following manuscript:
- Mathôt, Berberyan, Büchel, Ruuskanen, Vilotjević, & Kruijne (in prep.)
*Causal effects of pupil size on visual ERPs*
This module contains various constants and functions that are used in the main
analysis scripts.
"""
import random
import sys
import multiprocessing as mp
import mne; mne.set_log_level(False)
import eeg_eyetracking_parser as eet
from eeg_eyetracking_parser import braindecode_utils as bdu, \
_eeg_preprocessing as epp
import numpy as np
import time_series_test as tst
from datamatrix import DataMatrix, convert as cnv, operations as ops, \
functional as fnc, SeriesColumn, io, MultiDimensionalColumn
from mne.time_frequency import tfr_morlet
import matplotlib as mpl
from matplotlib import pyplot as plt
from scipy.stats import mode
import logging; logging.basicConfig(level=logging.INFO, force=True)
FIXATION_TRIGGER = 1
CUE_TRIGGER = 2
INTERVAL_TRIGGER = 3
TARGET_TRIGGER = 4
RESPONSE_TRIGGER = 5
N_CHANNELS = 26
# Occipital
LEFT_OCCIPITAL = 'O1',
RIGHT_OCCIPITAL = 'O2',
MIDLINE_OCCIPITAL = 'Oz',
# Parietal
LEFT_PARIETAL = 'P3', 'P7', 'CP1'
RIGHT_PARIETAL = 'P4', 'P8', 'CP2'
MIDLINE_PARIETAL = 'Pz', 'POz'
# Central
LEFT_CENTRAL = 'T7', 'C3'
RIGHT_CENTRAL = 'T8', 'C4'
MIDLINE_CENTRAL = 'Cz',
# Frontal
LEFT_FRONTAL = 'FC1', 'F3', 'F7', 'FP1'
RIGHT_FRONTAL = 'FC2', 'F4', 'F8', 'FP2'
MIDLINE_FRONTAL = 'Fz', 'FPz'
# Only CP1 and CP2, which were the best channels
LEFT_CP = 'CP1',
RIGHT_CP = 'CP2',
MIDLINE_CP = tuple()
# Pz, POz, Oz
LEFT_OPM = 'Pz',
RIGHT_OPM = 'POz',
MIDLINE_OPM = 'Oz',
# Select a channel group for further processing. The main analyses focus on the
# the parietal group.
CHANNEL_GROUPS = 'parietal', 'occipital', 'frontal', 'central', 'CP', \
'occipital-parietal-midline'
# Allow the channel group to be specified on the command line
for arg in sys.argv:
if arg in CHANNEL_GROUPS:
CHANNEL_GROUP = arg
break
else:
CHANNEL_GROUP = 'parietal'
if CHANNEL_GROUP == 'parietal':
LEFT_CHANNELS = LEFT_PARIETAL
RIGHT_CHANNELS = RIGHT_PARIETAL
MIDLINE_CHANNELS = MIDLINE_PARIETAL
elif CHANNEL_GROUP == 'occipital':
LEFT_CHANNELS = LEFT_OCCIPITAL
RIGHT_CHANNELS = RIGHT_OCCIPITAL
MIDLINE_CHANNELS = MIDLINE_OCCIPITAL
elif CHANNEL_GROUP == 'frontal':
LEFT_CHANNELS = LEFT_FRONTAL
RIGHT_CHANNELS = RIGHT_FRONTAL
MIDLINE_CHANNELS = MIDLINE_FRONTAL
elif CHANNEL_GROUP == 'central':
LEFT_CHANNELS = LEFT_CENTRAL
RIGHT_CHANNELS = RIGHT_CENTRAL
MIDLINE_CHANNELS = MIDLINE_CENTRAL
elif CHANNEL_GROUP == 'CP':
LEFT_CHANNELS = LEFT_CP
RIGHT_CHANNELS = RIGHT_CP
MIDLINE_CHANNELS = MIDLINE_CP
elif CHANNEL_GROUP == 'occipital-parietal-midline':
LEFT_CHANNELS = LEFT_OPM
RIGHT_CHANNELS = RIGHT_OPM
MIDLINE_CHANNELS = MIDLINE_OPM
else:
raise ValueError(f'Invalid channel group: {CHANNEL_GROUP}')
ALL_CHANNELS = LEFT_CHANNELS + RIGHT_CHANNELS + MIDLINE_CHANNELS
FACTORS = ['inducer', 'bin_pupil', 'intensity', 'valid']
LABELS = ['00:blue:0:100:no',
'01:blue:0:100:yes',
'02:blue:0:255:no',
'03:blue:0:255:yes',
'04:blue:1:100:no',
'05:blue:1:100:yes',
'06:blue:1:255:no',
'07:blue:1:255:yes',
'08:red:0:100:no',
'09:red:0:100:yes',
'10:red:0:255:no',
'11:red:0:255:yes',
'12:red:1:100:no',
'13:red:1:100:yes',
'14:red:1:255:no',
'15:red:1:255:yes']
ALPHA = .05
N_CONDITIONS = 16 # 4 factors with 2 levels each
FULL_FREQS = np.arange(4, 30, 1)
NOTCH_FREQS = np.exp(np.linspace(np.log(4), np.log(30), 15))
DELTA_FREQS = np.arange(.5, 4, .5)
THETA_FREQS = np.arange(4, 8, .5)
ALPHA_FREQS = np.arange(8, 12.5, .5)
BETA_FREQS = np.arange(13, 30, .5)
PERTURB_TIMES = [(-.1, .47),
(.18, .74)]
SUBJECTS = list(range(1, 34))
SUBJECTS.remove(7) # technical error
SUBJECTS.remove(5) # negative inducer effect
SUBJECTS.remove(18) # negative inducer effect
DATA_FOLDER = 'data'
EPOCHS_KWARGS = dict(tmin=-.1, tmax=.75, picks='eeg',
preload=True, reject_by_annotation=False,
baseline=None)
# Plotting colors
RED = 'red'
BLUE = 'blue'
FACTOR_COLORS = {
'inducer': '#B71C1C',
'bin_pupil': '#4A148C',
'intensity': '#263238',
'valid': '#1B5E20'
}
# TFR plotting parameters
Y_FREQS = np.array([0, 4, 9, 25])
VMIN = -.2
VMAX = .2
CMAP = 'coolwarm'
# Plotting style
plt.style.use('default')
mpl.rcParams['font.family'] = 'Roboto Condensed'
# DATA_CHECKPOINT = 'checkpoints/18012023.dm'
DATA_CHECKPOINT = f'checkpoints/19072023-{CHANNEL_GROUP}.dm'
def read_subject(subject_nr):
"""A simple wrapper function that calls eet.read_subject() with the correct
parameters.
Parameters
----------
subject_nr: int
Returns
-------
tuple
A (raw, events, metadata) tuple
"""
return eet.read_subject(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
def get_tgt_epoch(raw, events, metadata, channels=None, tmin=-.1, tmax=.5,
baseline=(None, 0)):
"""A simple wrapper function that uses eet.autoreject_epochs() to get
an Epochs object around the target onset.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
channels: list or None, optional
A list of channel indices or None to select all channels
tmin: float, optional
tmax: float, optional
baseline: tuple, optional
Returns
-------
Epochs
"""
return eet.autoreject_epochs(
raw, eet.epoch_trigger(events, TARGET_TRIGGER), tmin=tmin, tmax=tmax,
metadata=metadata, picks=channels, baseline=baseline,
ar_kwargs=dict(n_jobs=8))
def get_fix_epoch(raw, events, metadata, channels=None):
"""A simple wrapper function that uses eet.autoreject_epochs() to get
an Epochs object around the fixation onset.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
channels: list or None, optional
A list of channel indices or None to select all channels
Returns
-------
Epochs
"""
return eet.autoreject_epochs(
raw, eet.epoch_trigger(events, FIXATION_TRIGGER), tmin=-.5, tmax=2.5,
metadata=metadata, picks=channels, ar_kwargs=dict(n_jobs=8))
def get_morlet(epochs, freqs, crop=(0, 2), decim=8, n_cycles=2):
"""A simple wrapper function that uses tfr_morlet() to extract
time-frequency data.
Parameters
----------
epochs: Epochs
freqs: array
An array of frequencies
crop: tuple, optional
A time window to crop after extracting the time-frequency data to
reduce edge artifacts.
decim: int, optional
Downsampling factor to reduce memory consumption
n_cycles: int, optional
The number of cycles of the morlet wavelet
Returns
-------
EpochsTFR
"""
morlet = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, n_jobs=-1,
return_itc=False, use_fft=True, average=False,
decim=decim,
picks=np.arange(len(epochs.info['ch_names'])))
morlet.crop(*crop)
return morlet
def z_by_freq(col):
"""Performs z-scoring across trials, channels, and time points but
separately for each frequency.
Parameters
----------
col: MultiDimensionalColumn
Returns
-------
MultiDimensionalColumn
"""
zcol = col[:]
for i in range(zcol.shape[2]):
zcol._seq[:, :, i] = (
(zcol._seq[:, :, i] - np.nanmean(zcol._seq[:, :, i]))
/ np.nanstd(zcol._seq[:, :, i])
)
return zcol
def subject_data(subject_nr):
"""Performs preprocessing for a single participant. This involves basic
EEG preprocessing and subsequent epoching and extraction of TFR data. The
result is a single DataMatrix that contains all information for final
analysis.
Parameters
----------
subject_nr: int
Returns
-------
DataMatrix
"""
print(f'Processing subject {subject_nr}')
raw, events, metadata = read_subject(subject_nr)
raw['PupilSize'] = area_to_mm(raw['PupilSize'][0])
dm = cnv.from_pandas(metadata)
print('- eeg')
tgt_epoch = get_tgt_epoch(raw, events, metadata)
dm.tgt_erp = cnv.from_mne_epochs(tgt_epoch)
tgt_tfr = get_morlet(
get_tgt_epoch(raw, events, metadata, baseline=None, tmax=1),
FULL_FREQS, crop=(0, .5), decim=4)
dm.tgt_tfr = cnv.from_mne_tfr(tgt_tfr)
dm.tgt_tfr = z_by_freq(dm.tgt_tfr)
fix_epoch = get_fix_epoch(raw, events, metadata)
fix_tfr = get_morlet(fix_epoch, FULL_FREQS)
dm.fix_erp = cnv.from_mne_epochs(fix_epoch)
dm.fix_tfr = cnv.from_mne_tfr(fix_tfr)
dm.fix_tfr = z_by_freq(dm.fix_tfr)
print('- pupils')
pupil_fix = eet.PupilEpochs(
raw, eet.epoch_trigger(events, FIXATION_TRIGGER), tmin=0, tmax=2,
metadata=metadata, baseline=None)
pupil_target = eet.PupilEpochs(
raw, eet.epoch_trigger(events, TARGET_TRIGGER), tmin=-.05, tmax=2,
metadata=metadata)
del raw
dm.pupil_fix = cnv.from_mne_epochs(pupil_fix, ch_avg=True)
dm.pupil_target = cnv.from_mne_epochs(pupil_target, ch_avg=True)
return dm
@fnc.memoize(persistent=True, key='merged-data')
def get_merged_data():
"""Merges data for all participants into a single DataMatrix. Uses
multiprocessing for performance.
Returns
-------
DataMatrix
"""
return fnc.stack_multiprocess(subject_data, SUBJECTS, processes=10)
def add_bin_pupil(raw, events, metadata):
"""Adds bin pupil to the metadata. This is a patch to allow decoding to
take bin pupil as a decoding factor into account.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
Returns
-------
tuple
A (raw, events, metadata) tuple where bin_pupil has been added as a
column to metadata.
"""
# This adds the bin_pupil pseudo-factor to the data. This requires that
# this has been generated already by `analyze.py`.
dm = io.readtxt('output/bin-pupil.csv')
dm = dm.subject_nr == metadata.subject_nr[0]
metadata.loc[16:, 'bin_pupil'] = dm.bin_pupil
dummy_factor = 192 * [0] + 192 * [1]
random.shuffle(dummy_factor)
metadata.loc[16:, 'dummy_factor'] = dummy_factor
return raw, events, metadata
def decode_subject(subject_nr):
"""A wrapper function around bdu.decode_subject() that performs overall
decoding for one subject.
Parameters
----------
subject_nr: int
Returns
-------
DataMatrix
See bdu.decode_subject()
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=FACTORS,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
def crossdecode_subject(subject_nr, from_factor, to_factor):
"""A wrapper function around bdu.decode_subject() that performs
cross-decoding for one subject.
Parameters
----------
subject_nr: int
from_factor: str
The factor to train on
to_factor: str
The factor to test on
Returns
-------
DataMatrix
See bdu.decode_subject()
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
if 'bin_pupil' in (from_factor, to_factor):
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=from_factor,
crossdecode_factors=to_factor, epochs_kwargs=EPOCHS_KWARGS,
trigger=TARGET_TRIGGER, window_stride=1, window_size=200, n_fold=4,
epochs=4, patch_data_func=add_bin_pupil)
return bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=from_factor,
crossdecode_factors=to_factor, epochs_kwargs=EPOCHS_KWARGS,
trigger=TARGET_TRIGGER, window_stride=1, window_size=200, n_fold=4,
epochs=4)
@fnc.memoize(persistent=True)
def blocked_decode_subject(subject_nr, factor, query1, query2):
"""Decodes a factor for a single subject, using two different queries to
separate the training and testing data.
Parameters
----------
subject_nr: int
factor: str
query1: str
A pandas-style query to select the training set
query2: str
A pandas-style query to select the testing set
Returns
-------
float
Decoding accuracy
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
train_data, train_labels, train_metadata = bdu.read_decode_dataset(
read_subject_kwargs, factor, EPOCHS_KWARGS, TARGET_TRIGGER, query1)
test_data, test_labels, test_metadata = bdu.read_decode_dataset(
read_subject_kwargs, factor, EPOCHS_KWARGS, TARGET_TRIGGER, query2)
clf = bdu.train(train_data, test_data)
y_pred = clf.predict(test_data)
resized_pred = y_pred.copy()
resized_pred.resize(
(len(test_data.datasets), len(test_data.datasets[0])))
resized_pred = mode(resized_pred, axis=1)[0].flatten()
y_true = [d.y[0] for d in test_data.datasets]
return np.mean([p == t for p, t in zip(resized_pred, y_true)])
def statsplot(rm):
"""A simple wrapper function that plots statistical values as a function of
time and factors.
Parameters
----------
rm: DataMatrix
A DataMatrix with statistical results as returned by time_series_test
functions.
"""
rm = rm[:]
rm.sign = SeriesColumn(depth=rm.p.depth)
colors = ['red', 'green', 'blue', 'orange']
for y, row in enumerate(rm[1:]):
for linewidth, alpha in [(1, .05), (2, .01), (4, .005), (8, .001)]:
row.sign[row.p >= alpha] = np.nan
row.sign[row.p < alpha] = y
plt.plot(row.sign, '-', label=f'{row.effect}, p < {alpha}',
linewidth=linewidth, color=colors[y])
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
def select_ica(raw, events, metadata, exclude_component=0):
"""A helper function that excludes (rather than selects, as the name
suggests) an independent component from the signal.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
exclude_component: int, optional
The index of the excluded independent component
Returns
-------
tuple
A (raw, events, metadata) tuple where the independent component has
been excluded from the `raw` object.
"""
global weights_dict
raw, events, metadata = add_bin_pupil(raw, events, metadata)
print(f'running ica to exclude component {exclude_component}')
@fnc.memoize(persistent=True)
def run_ica(raw):
return epp.run_ica(raw)
# run_ica.clear()
raw.info['bads'] = []
ica = run_ica(raw)
print('applying ica')
ica.apply(raw, exclude=[exclude_component])
weights = np.dot(ica.mixing_matrix_[:, exclude_component].T,
ica.pca_components_[:ica.n_components_])
weights_dict = {ch_name: weight
for ch_name, weight in zip(ica.ch_names, weights)}
print(f'weights: {weights_dict} (len={len(weights_dict)})')
return raw, events, metadata
@fnc.memoize(persistent=True)
def ica_perturbation_decode(subject_nr, factor):
"""Performs the ICA perturbation analysis.
Parameters
----------
subject_nr: int
factor: str
Returns
-------
tuple
The first element of the tuple is a DataMatrix with the regular
decoding results. The second element is a dict with independent
component indices as keys and (dm, weights_dict) tuples as values.
Here, dm is the DataMatrix with the decoding results after excluding
the independent component, and weights_dict is a mapping with channel
names as keys and weights (loading of the channel of the independent
component) as values.
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
fdm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
print(f'full-data accuracy: {fdm.braindecode_correct.mean}')
perturbation_results = {}
for exclude_component in range(N_CHANNELS):
bdu.decode_subject.clear()
dm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER,
window_stride=1, window_size=200, n_fold=4, epochs=4,
patch_data_func=lambda raw, events, metadata: select_ica(
raw, events, metadata, exclude_component))
perturbation_results[exclude_component] = dm, weights_dict
print(f'perturbation accuracy({exclude_component}): '
f'{dm.braindecode_correct.mean}')
return fdm, perturbation_results
def notch_filter(raw, events, metadata, freq):
"""A helper function that excludes a frequency from the signal using a
notch filter.
Parameters
----------
raw: Raw
events: tuple
metadata: DataFrame
freq: float
The frequency to remove.
Returns
-------
tuple
A (raw, events, metadata) tuple where the frequency has been removed
from the `raw` object using a notch filter.
"""
global weights_dict
raw, events, metadata = add_bin_pupil(raw, events, metadata)
width = np.exp(np.log(freq / 4))
print(f'notch-filtering frequency band: {freq:.2f} / {width:.2f}')
raw.notch_filter(freq, notch_widths=width, trans_bandwidth=width)
return raw, events, metadata
@fnc.memoize(persistent=True)
def freq_perturbation_decode(subject_nr, factor):
"""Performs the frequency perturbation analysis.
Parameters
----------
subject_nr: int
factor: str
Returns
-------
tuple
The first element of the tuple is a DataMatrix with the regular
decoding results. The second element is a dict with frequencies
as keys and the DataMatrix objects with the decoding results after
excluding the frequencies as values.
"""
read_subject_kwargs = dict(subject_nr=subject_nr,
saccade_annotation='BADS_SACCADE',
min_sacc_size=128)
fdm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER, window_stride=1,
window_size=200, n_fold=4, epochs=4, patch_data_func=add_bin_pupil)
print(f'full-data accuracy: {fdm.braindecode_correct.mean}')
perturbation_results = {}
for freq in NOTCH_FREQS:
bdu.decode_subject.clear()
dm = bdu.decode_subject(
read_subject_kwargs=read_subject_kwargs, factors=factor,
epochs_kwargs=EPOCHS_KWARGS, trigger=TARGET_TRIGGER,
window_stride=1, window_size=200, n_fold=4, epochs=4,
patch_data_func=lambda raw, events, metadata: notch_filter(
raw, events, metadata, freq))
perturbation_results[freq] = dm
print(f'perturbation accuracy({freq}): {dm.braindecode_correct.mean}')
return fdm, perturbation_results
def area_to_mm(au):
"""Converts in arbitrary units to millimeters of diameter. This is specific
to the recording set-up.
Parameters
----------
au: float
Returns
-------
float
"""
return -0.9904 + 0.1275 * au ** .5
def pupil_plot(dm, dv='pupil_target', **kwargs):
"""A simple wrapper function that plots pupil size over time.
Parameters
----------
dm: DataMatrix
dv: str, optional
**kwargs: dict, optional
"""
tst.plot(dm, dv=dv, legend_kwargs={'loc': 'lower left'},
**kwargs)
x = np.linspace(12, 262, 6)
t = [f'{int(s)}' for s in np.linspace(0, 1000, 6)]
plt.xticks(x, t)
plt.xlabel('Time (ms)')
if dv == 'pupil_target':
plt.axhline(0, linestyle=':', color='black')
plt.ylim(-.6, .2)
else:
plt.ylim(2, 8)
plt.xlim(0, 250)
plt.ylabel('Baseline-corrected pupil size (mm)')
def erp_plot(dm, dv='lat_erp', ylim=None, **kwargs):
"""A simple wrapper function that plots ERPs.
Parameters
----------
dm: DataMatrix
dv: str, optional
ylim: float or None, optional
**kwargs: dict, optional
"""
tst.plot(dm, dv=dv, **kwargs)
plt.xticks(np.arange(25, 150, 25), np.arange(0, 500, 100))
plt.axvline(25, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.xlabel('Time (ms)')
if ylim:
plt.ylim(*ylim)
def tfr_plot(dm, dv):
"""A simple wrapper function that creates a multipanel TFR plot.
Parameters
----------
dm: DataMatrix
dv: str, optional
"""
plt.figure(figsize=(12, 4))
plt.subplots_adjust(wspace=0)
plt.subplot(141)
tfr_red = (dm.inducer == 'red')[dv][...]
tfr_blue = (dm.inducer == 'blue')[dv][...]
plt.title('a) Induced Pupil Size (Large - Small)')
plt.imshow(tfr_red - tfr_blue, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.yticks(Y_FREQS, FULL_FREQS[Y_FREQS])
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.subplot(142)
tfr_large = (dm.bin_pupil == 1)[dv][...]
tfr_small = (dm.bin_pupil == 0)[dv][...]
plt.title('b) Spontaneous Pupil Size (Large - Small)')
plt.imshow(tfr_large - tfr_small, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.subplot(143)
tfr_bright = (dm.intensity == 255)[dv].mean
tfr_dim = (dm.intensity == 100)[dv].mean
plt.title('c) Stimulus Intensity (Bright - Dim)')
plt.imshow(tfr_bright - tfr_dim, aspect='auto', vmin=VMIN, vmax=VMAX,
cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
plt.subplot(144)
tfr_attended = (dm.valid == 'yes')[dv].mean
tfr_unattended = (dm.valid == 'no')[dv].mean
plt.title('d) Covert Visual Attention (Attended - Unattended)')
plt.imshow(tfr_attended - tfr_unattended, aspect='auto', vmin=VMIN,
vmax=VMAX, cmap=CMAP, interpolation='bicubic')
plt.gca().get_yaxis().set_visible(False)
plt.xticks(np.arange(0, 31, 6.25), np.arange(0, 499, 100))
plt.xlabel('Time (ms)')
| smathot/causal-pupil | analysis_utils.py | analysis_utils.py | py | 23,689 | python | en | code | 2 | github-code | 6 |
40615421063 | import os
import time
from collections import Counter
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import core.leading_tree as lt
import core.lmca as lm
from core.delala_select import DeLaLA_select
from utils import common
def load_parameters(param_path):
dataset = common.load_csv(param_path)
dc = dataset[:, 0].astype(float)
lt_num = dataset[:, 1].astype(int)
length_scale = dataset[:, 2].astype(float)
return dc, lt_num, length_scale
def removeMinorData(X, Labels, k):
"""
Remove the points and labels that have no enough neighbors
:param X:
:param Labels:
:param k:
:return:
"""
setLabels = set(Labels)
classNum = len(setLabels)
cntArray = np.zeros(classNum, dtype=int)
i = 0
rmInds = np.zeros(0, dtype=int)
for l in setLabels:
clCnt = list(Labels).count(l)
cntArray[i] = clCnt
i += 1
if clCnt <= k:
lbInds = [i for i in range(len(Labels)) if Labels[i] == l]
rmInds = np.append(rmInds, lbInds)
# print("remove class: ", l)
if len(rmInds) > 0:
Remove_index = X[rmInds]
X = np.delete(X, rmInds, axis=0)
Labels = np.delete(Labels, rmInds)
return X, Labels, Remove_index
else:
return X, Labels, -1
order = 0
param_path = os.path.join("data", "parameters.csv")
dc_all, lt_num_all, length_scale_all = load_parameters(param_path)
SelectedInds_all = np.zeros(0, dtype=int)
def PredictLabel(train_AL, label_num, layer, dimension):
global order, SelectedInds_all
if label_num == 0:
pass
elif label_num == 1: # predict that this subtree sample has the same label as its root node
y_test = np.delete(y[train_AL], 0) # test set labels
y_predict = np.zeros(len(y_test), dtype=int) + y[train_AL[0]]
y_predict_all[train_AL] = y[train_AL[0]]
SelectedInds_all = np.append(SelectedInds_all, train_AL[0])
return 0
elif 2 <= label_num <= 3 or layer == 3: # Prediction with LMCA
D_A = D[train_AL]
D_A = D_A[:, train_AL]
LT = lt.LeadingTree(X_train=X[train_AL], dc=dc_all[order], lt_num=lt_num_all[order], D=D_A)
LT.fit()
LTgammaPara = LT.density * LT.delta
selectedInds = DeLaLA_select(LTgammaPara, LT.density, LT.layer, y[train_AL], 2, label_num * 2, 0.5)
selectedInds_universe = train_AL[selectedInds]
X_train = X[selectedInds_universe]
y_train = y[selectedInds_universe]
lmca = lm.LMCA(dimension=dimension, init_method="kpca", verbose=True, max_iter=100, stepsize=1.E-2,
nn_active=False, length_scale=length_scale_all[order], k=1)
lmca.fit(X_train, y_train)
order += 1
X_test = np.delete(X[train_AL], selectedInds, axis=0) # After removing the training set, the test set samples are obtained
y_test = np.delete(y[train_AL], selectedInds, axis=0) # test set labels
y_predict = np.zeros(len(y_test), dtype=int) - 1 # predict labels
MatDist = common.euclidian_dist_square(X_train, X_test) # The kernel matrix corresponding to the training set and the test set
test_bnd_K = np.exp(-1 * lmca.length_scale * MatDist).T
B = test_bnd_K.dot(lmca.Omega) # Test set after dimensionality reduction
A = lmca.K.dot(lmca.Omega) # The training set after dimensionality reduction
# Find the training sample with the closest Euclidean distance for each test sample, and predict that both have the same label.
D_temp = common.euclidian_dist(B, A)
Pa = np.zeros(len(y_predict), dtype=int) # Pa[i] represents the index of the training sample closest to the test sample i
for j in range(len(y_predict)):
index1 = np.argmin(D_temp[j])
Pa[j] = index1
y_predict = y_train[Pa]
index_predict = np.delete(train_AL, selectedInds, axis=0)
y_predict_all[index_predict] = y_predict
SelectedInds_all = np.append(SelectedInds_all, selectedInds_universe)
return 0
else:
return 1
if __name__ == "__main__":
dataset_path = os.path.join("data", "letter.csv")
X, y = common.load_data(dataset_path, label_index=0, map_label=False)
t1 = time.time()
scalar = MinMaxScaler()
X = scalar.fit_transform(X)
D = common.euclidian_dist(X, X)
remove_index_all = np.zeros(0, dtype=int)
y_predict_all = np.zeros(len(y), dtype=int) - 1
dc_lt_num_arr = [[0.12, 45, 1, 2], [0.19, 10, 2, 2], [0.12, 5, 3, 8]]
def recursive_partitioning(_X, _D, _layer=0, _train_AL=None):
global remove_index_all
lt1 = lt.LeadingTree(X_train=_X, dc=dc_lt_num_arr[_layer][0],
lt_num=dc_lt_num_arr[_layer][1], D=_D) # Constructing the lead tree for the entire dataset
lt1.fit()
for i in range(dc_lt_num_arr[_layer][1]):
if _layer == 0:
_train_AL2, _y_AL, _remove_index = removeMinorData(lt1.AL[i], y[lt1.AL[i]], 2)
else:
_train_AL2, _y_AL, _remove_index = removeMinorData(_train_AL[lt1.AL[i]], y[_train_AL[lt1.AL[i]]], 2)
_label_num = len(np.unique(y[_train_AL2]))
remove_index_all = np.append(remove_index_all, _remove_index)
_a = PredictLabel(_train_AL2, _label_num, dc_lt_num_arr[_layer][2], dc_lt_num_arr[_layer][3])
if _a == 1:
print(f"The {_layer}th layer case3: The number of subtree {i} categories is {_label_num}, which needs to be divided again.")
_D_2 = D[_train_AL2]
_D_2 = _D_2[:, _train_AL2]
recursive_partitioning(X[_train_AL2], _D_2, _layer + 1, _train_AL=_train_AL2)
return lt1
lt0 = recursive_partitioning(X, D)
index_None = np.zeros(0, dtype=int) # Returns -1 if there are no indexes that need to be removed, here they are to be removed
for i in range(len(remove_index_all)):
if remove_index_all[i] == -1:
index_None = np.append(index_None, i)
remove_index_all = np.delete(remove_index_all, index_None)
y_remove_Select = np.delete(y_predict_all, np.append(SelectedInds_all, remove_index_all))
D = common.euclidian_dist(X[remove_index_all], X[SelectedInds_all])
Pa = np.zeros(len(remove_index_all), dtype=int)
for i in range(len(remove_index_all)):
index1 = np.argmin(D[i])
Pa[i] = index1
y_predict_all[remove_index_all] = y[SelectedInds_all][Pa]
arr1 = y[remove_index_all] - y_predict_all[remove_index_all]
count0 = Counter(arr1)[0]
print(f'The accuracy of the remove sample prediction is {count0 / len(arr1)}, {count0}/{len(arr1)}')
# Subtree accuracy
for i in range(45):
temp = np.setdiff1d(lt0.AL[i], SelectedInds_all)
arr2 = y[temp] - y_predict_all[temp]
count1 = Counter(arr2)[0]
print(f'The accuracy of subtree {i} is {count1 / len(temp)}, {count1}/{len(temp)}')
# Overall accuracy
y_predict_all = np.delete(y_predict_all, SelectedInds_all)
y_test_all = np.delete(y, SelectedInds_all)
arr = y_test_all - y_predict_all
count = Counter(arr)[0]
t2 = time.time()
print(f'A total of {len(SelectedInds_all)} points are selected, with an accuracy of {count / len(y_test_all)}, {count}/{len(y_test_all)}')
print(f'Takes {t2 - t1} seconds')
| alanxuji/DeLaLA | DeLaLA/DeLaLA-Letter.py | DeLaLA-Letter.py | py | 7,574 | python | en | code | 6 | github-code | 6 |
14769244814 | from defines import NUMBER_OF_RESULTS_FOR_QUERY
from logging_messages import log_debug_message
from memories_querying import (
retrieve_description_from_scored_results_entry,
search_memories,
)
from vector_storage import process_raw_data
from wrappers import validate_agent_type
def generate_summary_description_segment(
agent, current_timestamp, query, prompt, memories_raw_data, index
):
"""Genererates a segment of a character summary
Args:
agent (Agent): the agent to whom the summary description corresponds
current_timestamp (datetime): the current timestamp
query (str): the query that will be made to the agent's memories
prompt_header (str): the header for the prompt
memories_raw_data (dict): the raw data of the agent's memories
index (AnnoyIndex): the index of the Annoy library
Returns:
str: the response from the AI model
"""
scored_results = search_memories(
agent,
current_timestamp,
process_raw_data(query),
NUMBER_OF_RESULTS_FOR_QUERY,
memories_raw_data,
index,
)
for _, vector_id in enumerate(scored_results):
prompt += "- " + retrieve_description_from_scored_results_entry(
vector_id, memories_raw_data
)
log_debug_message(f"{prompt}")
return agent.get_request_response_function()(prompt)
@validate_agent_type
def request_character_summary(agent, current_timestamp, memories_raw_data, index):
"""Produces the agent's character summary through the AI model
Args:
agent (Agent): the agent to whom the memories belong
current_timestamp (datetime): the current timestamp
memories_raw_data (dict): the raw data of the agent's memories
index (AnnoyIndex): the index of the Annoy database
Returns:
str: the generated summary description for the agent involved
"""
# First of all we perform a retrieval on the query "[name]'s core characteristics"
prompt = f"How would one describe {agent.name}'s core characteristics given the following statements? "
prompt += f"Start the sentence by saying either '{agent.name} is' or '{agent.name} has':\n"
core_characteristics = generate_summary_description_segment(
agent,
current_timestamp,
f"{agent.name}'s core characteristics",
prompt,
memories_raw_data,
index,
)
prompt = f"How would one describe {agent.name}'s current daily occupation given the following statements? "
prompt += f"Start the sentence by saying either '{agent.name} is' or '{agent.name} has':\n"
current_daily_occupation = generate_summary_description_segment(
agent,
current_timestamp,
f"{agent.name}'s current daily occupation",
prompt,
memories_raw_data,
index,
)
prompt = f"How would one describe {agent.name}'s feeling about his recent progress in life given the "
prompt += f"following statements? Start the sentence by saying either '{agent.name} is' or '{agent.name} has':\n"
recent_progress_in_life = generate_summary_description_segment(
agent,
current_timestamp,
f"{agent.name}'s feeling about his recent progress in life",
prompt,
memories_raw_data,
index,
)
innate_traits = generate_summary_description_segment(
agent,
current_timestamp,
f"{agent.name}'s innate traits",
f"How would one describe {agent.name}'s innate traits given the following statements? Write it solely as a series of adjectives:\n",
memories_raw_data,
index,
)
# careful with these. Try to close index always when you're not using it.
index.unload()
summary_description = f"Name: {agent.name} (age: {agent.age})\n"
summary_description += f"Innate traits: {innate_traits}\n"
summary_description += (
f"{core_characteristics}\n{current_daily_occupation}\n{recent_progress_in_life}"
)
log_debug_message(
f"Function {request_character_summary.__name__}:\n{summary_description}"
)
return summary_description
| joeloverbeck/intelligent_agents_simulations | character_summaries.py | character_summaries.py | py | 4,287 | python | en | code | 0 | github-code | 6 |
35863181048 | import os
from trainingData import get_filenames
def stats_main():
f = open("dataList.csv", "w")
f.write("sample, file, supop\n")
samples, all_files = get_filenames()
for i in range(len(samples)):
sample = samples[i]
for file in all_files[i]:
text = writeData(sample, file)
f.write(text)
f.close()
def writeData(sample, file):
text = sample + ", " + file + ", " + get_y_sample(sample) + "\n"
return text
def get_y_sample(sample):
"""This function returns the superpopulation group of a sample
"""
f = open("./samples_population.csv")
f.readline()
while True:
row = f.readline()
if len(row) == 0:
break
(samp, gen, pop, supop) = row.split(',')
if samp == sample:
sp = supop.strip()
break
f.close()
try:
sp
except:
return "unknown"
return sp
stats_main()
| ryngrg/DNA_classifier | dataList.py | dataList.py | py | 984 | python | en | code | 0 | github-code | 6 |
14186262586 | import json
from sksurv.functions import StepFunction
from sksurv.linear_model import CoxPHSurvivalAnalysis
from sksurv.metrics import concordance_index_censored
from sksurv.nonparametric import nelson_aalen_estimator, kaplan_meier_estimator
from core.cox_wrapper import CoxFairBaseline
from core.drawing import draw_points_tsne
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sksurv.ensemble import RandomSurvivalForest
from exp_config import CONFIG, RES_DIR
from core.cox_generator import CoxGenerator
from survshap import SurvivalModelExplainer, ModelSurvSHAP
from survbex.estimators import BeranModel
from survbex.explainers import SurvBexExplainer
########################################################################################################################
# ------------------------------------------------ PREPARE DATA --------------------------------------------------------
########################################################################################################################
def get_cox_data(coefs: np.ndarray):
cox_generator = CoxGenerator(coefs=coefs)
x_cox_train, x_cox_test, y_cox_train, y_cox_test = train_test_split(
*cox_generator.generate_data(size=CONFIG['TRAIN_SIZE'], censored_part=0.2),
train_size=0.7
)
x_cox_train = pd.DataFrame(x_cox_train, columns=[f'f{i + 1}' for i in range(len(coefs))])
x_cox_test = pd.DataFrame(x_cox_test, columns=[f'f{i + 1}' for i in range(len(coefs))])
return [x_cox_train, y_cox_train], [x_cox_test, y_cox_test]
# np.random.seed(42)
# train, test = get_veterans_data()
cox_clusters = [get_cox_data(coefs=cox_coefs) for cox_coefs in CONFIG['COX_COEFS_CLS']]
cox_clusters = [
(
[cox_cluster[0][0] + 2.0 / len(cox_clusters) * cl_i, cox_cluster[0][1]],
[cox_cluster[1][0] + 2.0 / len(cox_clusters) * cl_i, cox_cluster[1][1]]
# [cox_cluster[0][0] + 1. * cl_i, cox_cluster[0][1]],
# [cox_cluster[1][0] + 1. * cl_i, cox_cluster[1][1]]
)
for cl_i, cox_cluster in enumerate(cox_clusters)
]
all_train = [
pd.concat([cox_cluster[0][0] for cox_cluster in cox_clusters]),
np.hstack([cox_cluster[0][1] for cox_cluster in cox_clusters])
]
all_test = [
pd.concat([cox_cluster[1][0] for cox_cluster in cox_clusters]),
np.hstack([cox_cluster[1][1] for cox_cluster in cox_clusters])
]
# Use SurvLimeExplainer class to find the feature importance
training_events = np.array([event for event, _ in all_train[1]])
training_times = np.array([time for _, time in all_train[1]])
training_features = all_train[0]
test_events = np.array([event for event, _ in all_test[1]])
test_times = np.array([time for _, time in all_test[1]])
test_features = all_test[0]
with open(f'{RES_DIR}/dataset.json', 'w+') as fp:
json.dump(fp=fp, obj=dict(
training_features=training_features.to_dict(orient='raw'),
training_events=training_events.tolist(),
training_times=training_times.tolist(),
test_features=test_features.to_dict(orient='raw'),
test_events=test_events.tolist(),
test_times=test_times.tolist()
))
########################################################################################################################
# ------------------------------------------------ BUILD BBOX ----------------------------------------------------------
########################################################################################################################
if CONFIG['BBOX'] == 'rf':
model = RandomSurvivalForest(n_estimators=100, max_samples=min(500, len(all_train[0])), max_depth=8)
model.fit(all_train[0], all_train[1])
pred_surv_fn = model.predict_survival_function
pred_hazard_fn = model.predict_cumulative_hazard_function
pred_risk_fn = model.predict
elif CONFIG['BBOX'] == 'beran':
assert len(CONFIG['COX_COEFS_CLS']) == 1
model = BeranModel(kernel_width=250, kernel_name='gaussian')
model.fit(X=all_train[0].to_numpy(), b=CONFIG['COX_COEFS_CLS'][0],
y_events=training_events, y_event_times=training_times)
def surv_np_to_step_surv(surv_arr: np.ndarray):
return np.array([StepFunction(x=model.unique_times_, y=sample) for sample in surv_arr])
pred_surv_fn = lambda X: surv_np_to_step_surv(model.predict_survival_torch_optimized(X))
pred_hazard_fn = lambda X: -np.log(model.predict_survival_torch_optimized(X))
pred_risk_fn = lambda X: np.sum(pred_hazard_fn(X), axis=1)
elif 'cox' in CONFIG['BBOX']:
model = CoxPHSurvivalAnalysis(alpha=1)
model.fit(all_train[0], all_train[1])
pred_surv_fn = model.predict_survival_function
pred_hazard_fn = model.predict_cumulative_hazard_function
pred_risk_fn = model.predict
if CONFIG['BBOX'] in ['cox_na', 'cox_km']:
if CONFIG['BBOX'] == 'cox_na':
cox_fair_baseline = CoxFairBaseline(
training_events=training_events,
training_times=training_times,
baseline_estimator_f=nelson_aalen_estimator
)
elif CONFIG['BBOX'] == 'cox_km':
cox_fair_baseline = CoxFairBaseline(
training_events=training_events,
training_times=training_times,
baseline_estimator_f=kaplan_meier_estimator
)
else:
raise Exception(f'Undefined cox model = {CONFIG["BBOX"]}')
model.coef_ /= np.abs(model.coef_).sum()
pred_surv_fn = lambda X: cox_fair_baseline.predict_survival_function(X, cox_coefs=model.coef_)
pred_hazard_fn = lambda X: cox_fair_baseline.predict_cum_hazard_from_surv_np(X, cox_coefs=model.coef_)
pred_risk_fn = lambda X: np.dot(X, model.coef_)
elif CONFIG['BBOX'] != 'cox':
raise Exception(f'Undefined cox model = {CONFIG["BBOX"]}')
else:
raise Exception(f"Undefined bbox = {CONFIG['BBOX']}")
cindex_train = concordance_index_censored(
event_indicator=training_events, event_time=training_times, estimate=pred_risk_fn(training_features))[0]
print(f'cindex train = {cindex_train}')
cindex_test = concordance_index_censored(
event_indicator=test_events, event_time=test_times, estimate=pred_risk_fn(test_features))[0]
print(f'cindex test = {cindex_test}')
########################################################################################################################
# ------------------------------------------------ SELECT POINTS TO EXPLAIN --------------------------------------------
########################################################################################################################
# draw_comparison(ex_i=random.randint(0, len(test)))
cluster_centroids = [
cox_cluster[0][0].mean() + all_test[0].std() * CONFIG['DATA_POINT_DEV']
for cox_cluster in cox_clusters
]
cl_distances = [
[sum((cl_centroid - fs) ** 2) for fs in all_test[0].to_numpy()]
for cl_centroid in cluster_centroids
]
exp_test_ids = [np.argmin(distances) for distances in cl_distances]
draw_points_tsne(
pt_groups=[
*[cox_cluster[0][0].to_numpy() for cox_cluster in cox_clusters],
*list(all_test[0].to_numpy()[exp_test_ids])
],
names=[
*[f'cl{i}' for i, _ in enumerate(cox_clusters)],
*[f'ex for cl {i}' for i, _ in enumerate(exp_test_ids)]
],
colors=[None] * len(cox_clusters) * 2,
path=f'{RES_DIR}/clusters.png'
# path=f'clusters.png'
)
with open(RES_DIR.joinpath("y_true.json"), 'w+') as fp:
json.dump(
fp=fp,
obj=[
dict(event=bool(all_test[1][ex_i][0]), event_time=all_test[1][ex_i][1])
for ex_i in exp_test_ids
]
)
########################################################################################################################
# ------------------------------------------------ SurvSHAP ------------------------------------------------------------
########################################################################################################################
surv_shap = SurvivalModelExplainer(model, all_test[0].iloc[exp_test_ids], all_test[1][exp_test_ids],
predict_survival_function=lambda model, X: pred_surv_fn(X))
exp_survshap = ModelSurvSHAP(random_state=42)
exp_survshap.fit(surv_shap)
shap_explanations = np.array(
[
[
imp[1]
for imp in pt_exp.simplified_result.values
]
for pt_exp in exp_survshap.individual_explanations
]
)
with open(RES_DIR.joinpath("explanation_shap.json"), 'w+') as fp:
json.dump(fp=fp, obj=shap_explanations.tolist())
########################################################################################################################
# ------------------------------------------------ SurvLIME ------------------------------------------------------------
########################################################################################################################
explainer = SurvBexExplainer(
training_features=training_features,
training_events=list(training_events),
training_times=list(training_times),
model_output_times=model.event_times_,
kernel_width=CONFIG['KERNEL_WIDTH']
)
cox_explanations = np.array(
[
explainer.explain_instance(
data_row=all_test[0].iloc[ex_i],
predict_fn=pred_surv_fn,
num_samples=CONFIG['NEIGH_SIZE'],
type_fn='survival',
optimizer='convex'
)
for ex_i in exp_test_ids
]
)
with open(RES_DIR.joinpath("explanation_cox.json"), 'w+') as fp:
json.dump(fp=fp, obj=cox_explanations.tolist())
########################################################################################################################
# ------------------------------------------------ SurvBeX -------------------------------------------------------------
########################################################################################################################
beran_explanations = []
for cl_i, ex_i in enumerate(exp_test_ids):
beran_explanations.append(
explainer.explain_instance(
data_row=all_test[0].iloc[ex_i],
predict_fn=pred_surv_fn,
num_samples=CONFIG['NEIGH_SIZE'],
num_val_samples=CONFIG['NEIGH_VAL_SIZE'],
type_fn='survival',
optimizer='gradient',
grid_info_file=f"{RES_DIR}/optimization_cl={cl_i}.csv",
max_iter=CONFIG['MAX_ITER']
)
)
with open(RES_DIR.joinpath("explanation_beran.json"), 'w+') as fp:
json.dump(
fp=fp,
obj=np.array(beran_explanations).tolist()
)
| DanilaEremenko/SurvBeX | main_run_synth_data_explainers.py | main_run_synth_data_explainers.py | py | 10,714 | python | en | code | 0 | github-code | 6 |
32108433366 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from temba_client.v2 import TembaClient
from django.conf import settings
from django.db import migrations
from ureport.utils import datetime_to_json_date, json_date_to_datetime
logger = logging.getLogger(__name__)
class Migration(migrations.Migration):
def populate_poll_poll_date(apps, schema_editor):
Poll = apps.get_model("polls", "Poll")
Org = apps.get_model("orgs", "Org")
agent = getattr(settings, "SITE_API_USER_AGENT", None)
host = settings.SITE_API_HOST
for org in Org.objects.all():
temba_client = TembaClient(host, org.api_token, user_agent=agent)
api_flows = temba_client.get_flows()
flows_date = dict()
for flow in api_flows:
flows_date[flow.uuid] = datetime_to_json_date(flow.created_on)
for poll in Poll.objects.filter(org=org):
json_date = flows_date.get(poll.flow_uuid, None)
if json_date:
date = json_date_to_datetime(json_date)
else:
logger.info("using created_on for flow_date on poll with id %s" % poll.pk)
date = poll.created_on
poll.poll_date = date
poll.save()
dependencies = [("polls", "0022_poll_flow_date")]
operations = [migrations.RunPython(populate_poll_poll_date)]
| rapidpro/ureport | ureport/polls/migrations/0023_populate_flow_date.py | 0023_populate_flow_date.py | py | 1,499 | python | en | code | 23 | github-code | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.