content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
import os
import hy
from models import test
#from pyswip.prolog import Prolog
class Answers:
def __init__(self):
self._list_answer = []
self._limit_questions = 0
self._root = ''
# Files,NOTA !!!!! cambiar las rutas de los archivos, si es necesario .... !!!!
self.__my_file = ''
self.__pas_path = '/models/pas.txt'
self.__esquizoide_path = '/models/esquizoide.txt'
self.__emocional_path = '/models/emocional.txt'
self.__sociopata_path = '/models/sociopata.txt'
self.__toc_path = '/models/toc.txt'
self.__estable_path = '/models/estable.txt'
self.__default_path = '/models/default.txt'
# direccion donde se guardara el archivo resultante
self.__last_path = '/models/result.txt'
# totales de percepcion, comprension y regulacion
self._total_percepcion = 0
self._total_comprension = 0
self._total_regulacion = 0
# Resultados primera face
self.__nivel_percepcion = ''
self.__nivel_comprension = ''
self.__nivel_regulacion = ''
# Resultado de segunda face, posible trastorno
self.__your_desorder = ''
# resultados en texto de la segunda face
self.__text_percepcion = ''
self.__text_comprension = ''
self.__text_regulacion = ''
self.__text_estado = ''
self.__estado = ''
def _solve_total_percepcion(self):
total = 0
for i in range(1, 9):
acum = total
total = acum + int(self._list_answer[i])
self._total_percepcion = total
def _solve_total_comprension(self):
total = 0
for i in range(9, 17):
acum = total
total = acum + int(self._list_answer[i])
self._total_comprension = total
def _solve_total_regulacion(self):
total = 0
for i in range(16, 25):
acum = total
total = acum + int(self._list_answer[i])
self._total_regulacion = total
def _result_percepcion(self):
# chequeo en lisp de la percepcion, crear globales antes
test.crear_globales()
self.__nivel_percepcion = test.percepcion(self._total_percepcion)
def _result_comprension(self):
# chequeo en lisp de la comprension
self.__nivel_comprension = test.comprension(self._total_comprension)
def _result_regulacion(self):
# chequeo en lisp de la regulacion
self.__nivel_regulacion = test.regulacion(self._total_regulacion)
def _get_type_desorder(self):
resultado = ''
percepcion = self.__nivel_percepcion
comprension = self.__nivel_comprension
regulacion = self.__nivel_regulacion
resultado = test.verifica_primero(percepcion, comprension, regulacion)
self.__your_desorder = resultado
def impresion_final(self):
print '\nnivel de tu percepcion emocional ...'
print self.__nivel_percepcion
print '\nnivel de tu comprension emocional ...'
print self.__nivel_comprension
print '\nnivel de tu regulacion emocional ...'
print self.__nivel_regulacion
print '\nposible tendencia de transtorno ...'
print self.__your_desorder
def __create_file(self):
self.__my_file = open(self.__last_path, 'w')
self.__my_file.write('\t\t\t<<< resultado de test emocional >>>\n\n')
self.__my_file.write(self.__text_percepcion)
self.__my_file.write(self.__text_comprension)
self.__my_file.write(self.__text_regulacion + '\n')
self.__my_file.write(self.__text_estado)
self.__my_file.close()
# abrimos el archivo en mouse path
os.system('mousepad ' + self.__last_path)
def __generate_text(self, path):
text = ''
self.__my_file = open(path, 'r')
for line in self.__my_file:
text += line
self.__my_file.close()
return text
def _go_results(self):
percepcion = self.__nivel_percepcion
comprension = self.__nivel_comprension
regulacion = self.__nivel_regulacion
estado= self.__your_desorder
# Verificacion dependiendo del resultado
if percepcion == 'poca_percepcion':
self.__text_percepcion = '\nSu nivel de percepcion de sentimientos:\n\tpoca, requiere mejorar percepcion.'
elif percepcion == 'adecuada_percepcion':
self.__text_percepcion = '\n\nSu nivel de percepcion de sentimientos:\n\tadecuada.'
elif percepcion == 'demasiada_percepcion':
self.__text_percepcion = '\n\nSu nivel de percepcion de sentimientos:\n\tdemasiada, relaje su percepcion.'
# Resultados de comprension
if comprension == 'poca_comprension':
self.__text_comprension = '\n\nNivel comprension de edos emocionales:\n\tpoca, debe mejorar su comprension.'
elif comprension == 'adecuada_comprension':
self.__text_comprension = '\n\nNivel comprension de edos emocionales:\n\tadecuada, puede mejorar su comprension.'
elif comprension == 'excelente_comprension':
self.__text_comprension = '\n\nNivel comprension de edos emocionales:\n\texcelente.'
# Resultados de regulacion
if regulacion == 'poca_regulacion':
self.__text_regulacion = '\n\nNivel regulacion de edos emocionales:\n\tpoca, debe mejorar su regulacion.'
elif regulacion == 'adecuada_regulacion':
self.__text_regulacion = '\n\nNivel regulacion de edos emocionales:\n\tadecuada, puede mejorar su regulacion.'
elif regulacion == 'excelente_regulacion':
self.__text_regulacion = '\n\nNivel regulacion de edos emocionales:\n\texcelente.'
if estado == 'pas_positivo':
self.__text_estado = self.__generate_text(self.__pas_path)
elif estado == 'ezquizoide_positivo':
self.__text_estado = self.__generate_text(self.__esquizoide_path)
elif estado == 'emocional_positivo':
self.__text_estado = self.__generate_text(self.__emocional_path)
elif estado == 'toc_positivo':
self.__text_estado = self.__generate_text(self.__toc_path)
elif estado == 'sociopata_positivo':
self.__text_estado = self.__generate_text(self.__sociopata_path)
elif estado == 'estable_positivo':
self.__text_estado = self.__generate_text(self.__estable_path)
else :
self.__text_estado = self.__generate_text(self.__default_path)
def _first_move(self):
# metodos por orden de ejecucion
self._solve_total_percepcion()
self._solve_total_comprension()
self._solve_total_regulacion()
# obteniendo el nivel de cada uno de los tres aspectos, percepcion, comprension y regulacion
self._result_percepcion()
self._result_comprension()
self._result_regulacion()
# Obteniendo la segunda face, es el tipo de posible trastorno
self._get_type_desorder()
self.impresion_final()
self._go_results()
self.__create_file()
class RunAnswer:
@staticmethod
def _construir(list_answers):
print 'ok, pasando a lo ultimo'
print list_answers
machine_answer = Answers()
machine_answer._list_answer = list_answers
machine_answer._first_move() |
from django.apps import AppConfig
class SdConfig(AppConfig):
name = 'sd'
|
# SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa import Tags
import simpa as sp
import numpy as np
import matplotlib.pyplot as plt
from utils.save_directory import get_save_path
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
VOLUME_WIDTH_HEIGHT_DIM_IN_MM = 50
VOLUME_PLANAR_DIM_IN_MM = 50
SPACING = 0.5
RANDOM_SEED = 2736587
path_manager = sp.PathManager()
SAVE_PATH = get_save_path("Tissue_Generation", "Phantom")
VOLUME_NAME = "PhantomScan" + str(RANDOM_SEED)
file_path = SAVE_PATH + "/" + VOLUME_NAME + ".hdf5"
def create_example_tissue():
"""
This is a very simple example script of how to create a tissue definition.
It contains a muscular background, an epidermis layer on top of the muscles
and a blood vessel.
"""
background_dictionary = sp.Settings()
background_dictionary[Tags.MOLECULE_COMPOSITION] = sp.TISSUE_LIBRARY.ultrasound_gel()
background_dictionary[Tags.STRUCTURE_TYPE] = Tags.BACKGROUND
phantom_material_dictionary = sp.Settings()
phantom_material_dictionary[Tags.PRIORITY] = 3
phantom_material_dictionary[Tags.STRUCTURE_START_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2,
0,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2]
phantom_material_dictionary[Tags.STRUCTURE_END_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2,
VOLUME_PLANAR_DIM_IN_MM,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2]
phantom_material_dictionary[Tags.STRUCTURE_RADIUS_MM] = 14
phantom_material_dictionary[Tags.MOLECULE_COMPOSITION] = sp.TISSUE_LIBRARY.soft_tissue()
phantom_material_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = False
phantom_material_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
inclusion_1_dictionary = sp.Settings()
inclusion_1_dictionary[Tags.PRIORITY] = 5
inclusion_1_dictionary[Tags.STRUCTURE_START_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 + 7 * np.sin(np.deg2rad(10)),
0,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 + 7 * np.cos(np.deg2rad(10))]
inclusion_1_dictionary[Tags.STRUCTURE_END_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 + 7 * np.sin(np.deg2rad(10)),
VOLUME_PLANAR_DIM_IN_MM,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 + 7 * np.cos(np.deg2rad(10))]
inclusion_1_dictionary[Tags.STRUCTURE_RADIUS_MM] = 2
inclusion_1_dictionary[Tags.MOLECULE_COMPOSITION] = sp.TISSUE_LIBRARY.blood()
inclusion_1_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = False
inclusion_1_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
inclusion_2_dictionary = sp.Settings()
inclusion_2_dictionary[Tags.PRIORITY] = 5
inclusion_2_dictionary[Tags.STRUCTURE_START_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 - 7 * np.sin(np.deg2rad(10)),
0,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 - 7 * np.cos(np.deg2rad(10))]
inclusion_2_dictionary[Tags.STRUCTURE_END_MM] = [VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 - 7 * np.sin(np.deg2rad(10)),
VOLUME_PLANAR_DIM_IN_MM,
VOLUME_WIDTH_HEIGHT_DIM_IN_MM / 2 - 7 * np.cos(np.deg2rad(10))]
inclusion_2_dictionary[Tags.STRUCTURE_RADIUS_MM] = 2
inclusion_2_dictionary[Tags.MOLECULE_COMPOSITION] = sp.TISSUE_LIBRARY.blood()
inclusion_2_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = False
inclusion_2_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
tissue_dict = sp.Settings()
tissue_dict[Tags.BACKGROUND] = background_dictionary
tissue_dict["phantom"] = phantom_material_dictionary
tissue_dict["inclusion_1"] = inclusion_1_dictionary
tissue_dict["inclusion_2"] = inclusion_2_dictionary
return tissue_dict
# Seed the numpy random configuration prior to creating the global_settings file in
# order to ensure that the same volume
# is generated with the same random seed every time.
np.random.seed(RANDOM_SEED)
settings = {
# These parameters set he general propeties of the simulated volume
Tags.RANDOM_SEED: RANDOM_SEED,
Tags.VOLUME_NAME: VOLUME_NAME,
Tags.SIMULATION_PATH: SAVE_PATH,
Tags.SPACING_MM: SPACING,
Tags.WAVELENGTHS: [700],
Tags.DIM_VOLUME_Z_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,
Tags.DIM_VOLUME_X_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,
Tags.DIM_VOLUME_Y_MM: VOLUME_PLANAR_DIM_IN_MM,
Tags.VOLUME_CREATOR: Tags.VOLUME_CREATOR_VERSATILE
}
settings = sp.Settings(settings)
settings.set_volume_creation_settings({
Tags.STRUCTURES: create_example_tissue(),
Tags.SIMULATE_DEFORMED_LAYERS: False
})
device = sp.RSOMExplorerP50()
SIMUATION_PIPELINE = [
sp.ModelBasedVolumeCreationAdapter(settings)
]
import time
start_time = time.time()
sp.simulate(SIMUATION_PIPELINE, settings, device)
end_time = time.time() - start_time
with open(os.path.join(SAVE_PATH, "run_time.txt"), "w+") as out_file:
out_file.write("{:.2f} s".format(end_time))
wavelength = settings[Tags.WAVELENGTHS][0]
segmentation_mask = sp.load_data_field(file_path=file_path,
wavelength=wavelength,
data_field=Tags.DATA_FIELD_SEGMENTATION)
fontsize = 13
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, projection='3d')
ax.voxels(segmentation_mask == sp.SegmentationClasses.BLOOD, shade=True, facecolors="red", alpha=0.55)
ax.voxels(segmentation_mask == sp.SegmentationClasses.MUSCLE, shade=True, facecolors="yellow", alpha=0.15)
ax.set_aspect('auto')
# ax.set_xticks(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM]/settings[Tags.SPACING_MM], 6))
# ax.set_yticks(np.linspace(0, settings[Tags.DIM_VOLUME_Y_MM]/settings[Tags.SPACING_MM], 6))
# ax.set_zticks(np.linspace(0, settings[Tags.DIM_VOLUME_Z_MM]/settings[Tags.SPACING_MM], 6))
# ax.set_xticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)
# ax.set_yticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)
# ax.set_zticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_zlim(int(settings[Tags.DIM_VOLUME_X_MM]/settings[Tags.SPACING_MM]), 0)
# ax.set_zlabel("Depth [mm]", fontsize=fontsize)
# ax.set_xlabel("x width [mm]", fontsize=fontsize)
# ax.set_ylabel("y width [mm]", fontsize=fontsize)
ax.view_init(elev=10., azim=-45)
plt.savefig(os.path.join(SAVE_PATH, "phantom.svg"), dpi=300)
plt.close()
|
# Refer Readme file for setup instruction
from selenium import webdriver
from selenium.webdriver.support.select import Select
import pandas as pd
auton = webdriver.ChromeOptions()
auton.add_argument("headless")
############### Change chromedriver.exe path here #############
driver = webdriver.Chrome(options=auton, executable_path="C:/Users/Abhishek/Desktop/chromedriver.exe")
#################################################################
flag=1
def getSGPA(roll,str_ing,sems):
global flag
try:
driver.find_element_by_id('txtrollno').clear()
driver.find_element_by_id('txtrollno').send_keys(roll)
driver.find_element_by_id('btnSearch').click()
sl = Select(driver.find_element_by_id('ddlResult'))
sl.select_by_visible_text(str_ing)
driver.find_element_by_id('btnGo').click()
try:
obtmrk = driver.find_element_by_id('lblsgpaodddisp').text
except:
obtmrk=-1
try:
obtmark2=driver.find_element_by_id('lblsgpaevendisp').text
except:
obtmark2=-1
obtmark3 = driver.find_element_by_id('lbltotlmarksDisp').text
name = driver.find_element_by_id('lblname').text
return [obtmrk,obtmark2,obtmark3,name]
except:
print("RESULT OF", roll, "FOR SEM:",end='')
print(sems,'not available')
return ["NA", "NA"]
while(True):
print("ENTER YOUR BRANCH CODE- ")
print("CE:1 CS:2 EE:3 EL:4 ME:5 IT:6")
branch_code = int(input())
if branch_code < 1 or branch_code > 6:
print("WRONG CHOICE ENTERED! TRY AGAIN")
else:
break
branch_to_code={1:"CE",2:"CS",3:"EE",4:"EL",5:"ME",6:"IT"}
####### BATCH SELECTION############
while (True):
print("ENTER YOUR BATCH( Ex: 2018-22 )")
year = input()
start_year = int(year[2:4])
end_year = int(year[5:])
if end_year-start_year != 4:
print('WRONG VALUE ENTERED! TRY AGAIN')
else:
break
######## SEMESTER SELECTION #########
while(True):
print("ENTER SEMESTER(should be a single digit number)-")
sem=int(input())
if sem < 1 or sem > 8:
print("WRONG CHOICE ENTERED! TRY AGAIN")
else:
break
semester={1:"1-2",2:"1-2",3:"3-4",4:"3-4",5:"5-6",6:"5-6",7:"7-8",8:"7-8"}
########DON'T TOUCH THIS ###############################
if sem == 1 or sem == 2:
s= str(2000+ start_year) + "-" + str(start_year+1)
elif sem==3 or sem==4 :
s = str(2000 + start_year+1) + "-" + str(start_year + 2)
elif sem==5 or sem==6:
s = str(2000 + start_year+2) + "-" + str(start_year + 3)
elif sem== 7 or sem == 8:
s = str(2000 + start_year+3) + "-" + str(start_year + 4)
###########################################################
driver.get('https://govexams.com/knit/searchresult.aspx')
names = []
SGPA_odd = []
SGPA_even = []
CGPA = []
roll = []
roll_list = []
######### ROLL NO LIST GENERATION ###########
regular_start = start_year*1000 + branch_code * 100 + 1
regular_end = start_year*1000 + branch_code * 100 + 72
lateral_start = (start_year+1)*10000 + (start_year%10)*1000 + branch_code * 100 + 1
lateral_end = (start_year+1)*10000 + (start_year%10)*1000 + branch_code * 100 + 8
regular = [y for y in range(regular_start, regular_end)]
lateral = [x for x in range(lateral_start, lateral_end)]
regular.extend(lateral)
roll_list.extend(regular)
################################################
str_ing='REGULAR (' + s + ') Semester ' + semester[sem]
print("Fetching Result...")
for i in roll_list:
temp = getSGPA(i,str_ing,semester[sem])
if (temp[1] != "NA"):
names.append(temp[3])
SGPA_odd.append(temp[0])
SGPA_even.append(temp[1])
CGPA.append(temp[2])
roll.append(i)
# driver.execute_script("window.scrollTo(0,200)")
# if(input("To see next press Enter : ")==""):
# driver.back()
# print("Currently Displaying :",i+1)
# continue
driver.back()
dct={"Roll No.": pd.Series(roll).astype(int),"Name":pd.Series(names),"SGPA_odd":pd.Series(SGPA_odd),"SGPA_even":pd.Series(SGPA_even),"CGPA":pd.Series(CGPA)}
df = pd.DataFrame(dct)
nan_value = float("NaN")
df.replace(-1, nan_value, inplace=True)
df.dropna(how='all', axis=1, inplace=True)
file_name = 'Result_' + str(branch_to_code[branch_code]) + "_" + year + "_"+"Sem."+semester[sem]+ '.csv'
rslt_df = df.sort_values(by = 'CGPA', ascending = False)
rslt_df.to_csv(file_name, index=False)
print("DONE")
|
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# License: BSD 3 clause
def run_playground_decision():
import os
import onelearn
filename = onelearn.__file__.replace(
"/onelearn/__init__.py", "/examples/playground_classification.py"
)
os.system("streamlit run %s" % filename)
def run_playground_tree():
import os
import onelearn
filename = onelearn.__file__.replace(
"/onelearn/__init__.py", "/examples/playground_tree.py"
)
os.system("streamlit run %s" % filename)
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.template import loader
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('calculator/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'calculator/detail.html', {'question': question})
def results(request, question_id):
response = "Results of question %s"
return HttpResponse(response % question_id)
def calculate(request):
try:
v = float(request.POST.get('4'))
u = float(request.POST.get('3'))
p = float(request.POST.get('2'))
except TypeError:
return HttpResponse("You have to fill out all of the assessment! <a href='../'>Go back!</a>")
result = (1-p) * u * (1-v)
return render(request, 'calculator/result.html', {'result' : result})
|
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import pandasdmx as pdmx
import pandas_datareader.data as web
# Plot settings
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
# Page settings
st.set_page_config(
page_title="An example streamlit app",
page_icon="๐ฉโ๐ป",
layout="wide",
initial_sidebar_state="expanded",
)
# ----------------------------------------------------------------
# This section is just data retrieval and not directly about the dashboard
# ----------------------------------------------------------------
# settings to retrieve OECD data
industry_dict = {
"B1GVA": "Agriculture, forestry, and fishing.",
"B1GVB_E": "Industry, including energy",
"B1GVF": "Construction",
"B1GVG_I": "Distrib. trade",
"B1GVJ": "Information and communication",
"B1GVK": "Financial and insurance",
"B1GVL": "Real estate",
"B1GVM_N": "Prof. services",
"B1GVO_Q": "Public Admin",
"B1GVR_U": "Other services",
}
codes = list(industry_dict.keys())
industry_names = list(industry_dict.values())
# Functions
@st.cache
def get_oecd_data():
"""Grabs OECD data in tidy format.
:return: OECD output data by time-sector for UK
:rtype: pandas dataframe
"""
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="QNA",
key="GBR." + "+".join(codes) + ".LNBQRSA.Q/all?startTime=2015",
).to_pandas()
df_oecd = pd.DataFrame(data).reset_index()
# some clean up operations
df_oecd["datetime"] = (
pd.to_datetime(
df_oecd["TIME_PERIOD"].apply(
lambda x: str(x[:4]) + "-" + str(int(x[-1]) * 3)
),
format="%Y-%m",
)
+ pd.offsets.MonthEnd()
)
df_oecd["value"] = df_oecd["value"]
df_oecd["industry"] = df_oecd["SUBJECT"].map(industry_dict)
df_oecd = df_oecd.sort_values(["industry", "datetime"])
# find the greatest growing sector within each time-sector cell
# first compute growth within industry
df_oecd["growth"] = df_oecd.groupby(["industry"])["value"].transform(
lambda x: x.pct_change()
)
# now max growth within each time-period
df_oecd["max"] = df_oecd.groupby("datetime")["growth"].transform(lambda x: x.max())
df_oecd["argmax"] = (
df_oecd.groupby(["datetime"])["growth"]
.transform(lambda x: x.idxmax())
.astype("Int32")
)
df_oecd["max_ind"] = pd.NA
df_oecd.loc[:, "max_ind"] = df_oecd.loc[
df_oecd["argmax"].fillna(0), "industry"
].values
# and min
df_oecd["min"] = df_oecd.groupby("datetime")["growth"].transform(lambda x: x.min())
df_oecd["argmin"] = (
df_oecd.groupby(["datetime"])["growth"]
.transform(lambda x: x.idxmin())
.astype("Int32")
)
df_oecd["min_ind"] = pd.NA
df_oecd.loc[:, "min_ind"] = df_oecd.loc[
df_oecd["argmin"].fillna(0), "industry"
].values
# compute a total
df_oecd["GDP"] = df_oecd.groupby(["datetime", "LOCATION"])["value"].transform(
lambda x: x.sum()
)
# then shares as a pct
df_oecd["Percent"] = round(100 * df_oecd["value"] / df_oecd["GDP"], 2)
return df_oecd
@st.cache
def fred_data_uk_wages():
"""Long run UK average wages from FRED
:return: wages in dataframe
:rtype: pandas dataframe
"""
start = datetime.datetime(1919, 1, 1)
end = datetime.datetime(2016, 1, 1)
return web.DataReader("AWEPPUKQ", "fred", start, end)
# ----------------------------------------------------------------
# Dashboard
# ----------------------------------------------------------------
st.title("An example dashboard")
# Here's some text...
intro_text = "This is a short example dashboard demonstrating some of the basic functionality of streamlit."
# ...but it only appears in the script when we call `st.write`
st.write(intro_text)
# Here's some markdown
st.markdown("### This is a markdown subtitle")
st.markdown(
"Regular markdown syntax, inlcuding [links](https://aeturrell.github.io/coding-for-economists), will work. You can use `st.latex` to embed latex equations. Here's the result of using that command:"
)
# Here's an example of a latex equation:
st.latex(r"Y = \beta_0 + \beta_1X + \varepsilon")
st.markdown("### Data: in two columns using `st.columns`")
c1, c2 = st.columns((1, 1))
c1.write("#### OECD Data")
text = "We're going to download data from the OECD API for the UK by *industry*. We can wrap up the data retrieval and cleaning in a function and cache it using `st.cache`; very helpful if you're using an API."
c1.write(text)
df = get_oecd_data()
c1.write("Here are the first few lines of the OECD data:")
c1.write(df.head())
c2.write("#### FRED Data")
c2.write(
"We're also going to download data from FRED. This will cover historical wages and, later, we'll give the option to plot it on both a log and a linear scale."
)
# Get FRED data on UK wages
fred_uk_awe = fred_data_uk_wages()
c2.write("FRED UK wage data first few lines:")
c2.write(fred_uk_awe.head())
st.markdown("### Metrics")
st.write(
"Next, we'll use the columns functionality to show which sectors are the biggest movers on the quarter making use of OECD data and industry categories."
)
# want to compute these using the last timestep available in data
# subset to most recent data
subdf = df.loc[df["datetime"] == df["datetime"].max(), :]
rise_ind, rise_growth = subdf["max_ind"].iloc[0], subdf["max"].iloc[0] * 100
rise_share = subdf.loc[subdf["industry"] == rise_ind, "Percent"]
fall_ind, fall_growth = subdf["min_ind"].iloc[0], subdf["min"].iloc[0] * 100
fall_share = subdf.loc[subdf["industry"] == fall_ind, "Percent"]
st.write(
f"#### For the quarter ending {df['datetime'].max():%Y-%m-%d}, the biggest moves were:"
)
col1, col2 = st.columns(2)
col1.metric("Biggest rise:", f"{rise_ind}", f"{rise_growth:.2f} %")
col2.metric("Biggest fall:", f"{fall_ind}", f"{fall_growth:.2f} %")
st.markdown("### Charts and Interactivity")
st.write(
"This section will show how to make charts using **altair** and **matplotlib**, two different plotting libraries. In both cases, we have added interactive elements so that the user can change the charts and have them update instantly."
)
st.markdown("#### Altair")
st.write(
"We're going to plot data on UK output by sector sourced from the OECD. Note that altair charts have some interactivity built in (try zooming in or out, or hovering your mouse)."
)
# Let's give users a multi-select box to choose industries
chosen_industries = st.multiselect(
"Choose which industries to display", industry_names, default=industry_names
)
# Filter OECD data just to chosen industries
subdf = df[df["industry"].isin(chosen_industries)]
graph = (
alt.Chart(subdf)
.mark_bar(size=20)
.encode(
alt.Y("value:Q", scale=alt.Scale(domain=(0, 600e3))),
x="datetime:T",
color="industry",
tooltip=["industry", subdf.columns[-1]],
)
.properties(
title="UK output by sector (chained volume, seasonally adjusted; mn GBP)"
)
.interactive()
)
st.altair_chart(graph, use_container_width=True)
st.write("Try changing the multi-select box above to see how the chart changes.")
st.markdown("#### Matplotlib")
# Interactive to choose log or linear scale
st.write(
"This next chart is plotted by **matplotlib**, and doesn't come with any built-in interactivity. However, we have added interactivity using streamlit; the checkbox allows you to show the data on a log or linear scale."
)
logscale = st.checkbox("Log scale?", False)
# Plot chart
fig, ax = plt.subplots(figsize=(6, 3), dpi=150)
ax.plot(fred_uk_awe)
ax.set_ylim(1, None)
ax.set_ylabel("Seasonally adjusted GBP (nominal)")
scale_txt = "linear scale"
if logscale:
scale_txt = "log scale"
ax.set_yscale("log")
ax.set_title(f"Average weekly earnings per person in the UK ({scale_txt})", loc="left")
st.pyplot(fig)
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import roslaunch
import rosnode
import random
import re
import os
from time import sleep
from fmp_slam_eval.net_utils import next_free_port
class ROSLauncher(object):
"""
Wrapper class for the ROS Launcher API
"""
def __init__(self, package, launch_file, wait_for_master=False, log_path=None, monitored_nodes=None,
protocol=None, host=None, port=None):
"""
Constructor for a ROS Launcher object.
:param package: (str) Name of the package where the .launch file is located.
:param launch_file: (str) Path to the xml .launch file
:param wait_for_master: (bool)[Default: False] Waits for a ROS Master to exist if True.
If False, then, if no Master exists, it starts one.
:param log_path: (str)[Default: None] Path where the log files for the launched nodes will be saved.
If None, then they will be saved to the default $ROS_LOG_DIR path
:param monitored_nodes: (dict)[Default: None] Kill the launch process when the nodes listed here shutdown.
E.g. : {"any": ["kill", "if", "any", "of", "these/nodes", "die"],
"all": ["kill", "if", "all", "of", "these/nodes", "die"]}
:param protocol: (str)[Default: None] Protocol to start the launcher with. By default it is "http".
:param host: (str)[Default: None] Host where the launch file will be executed. By default it is "localhost".
:param port: (int)[Default: None] TCP port where the ROS Master will start. By default 11311.
"""
self._package = package
self._launch_file = launch_file
if log_path:
os.environ["ROS_LOG_DIR"] = log_path
self._protocol = "http"
self._host = "localhost"
self._port = 11311
host_changed = False
if protocol is not None:
if isinstance(protocol, str):
self._protocol = protocol
host_changed = True
else:
raise TypeError("Invalid type for a protocol ({}: {}). Only str supported.".format(
type(protocol), protocol))
if host is not None:
if isinstance(host, str):
self._host = host
host_changed = True
else:
raise TypeError("Invalid type for a hostname ({}: {}). Only str supported.".format(
type(host), host))
if port is not None:
if isinstance(port, int):
if 1024 <= port <= 65535:
self._port = port
host_changed = True
else:
raise ValueError("Invalid Port number ({}).".format(port))
elif isinstance(port, str):
if port.lower() == "auto":
random.seed()
port = random.randint(1024, 65535)
self._port = next_free_port(host=self._host, port=port)
host_changed = True
else:
raise ValueError("Invalid option '{}'".format(port))
else:
raise TypeError("Invalid type for port ({}: {}). Only int and str supported.".format(type(port), port))
if host_changed:
os.environ["ROS_MASTER_URI"] = "{}://{}:{}".format(self._protocol, self._host, self._port)
os.environ["ROS_HOSTNAME"] = self._host
self._monitored_nodes = None
if monitored_nodes is not None:
if isinstance(monitored_nodes, dict):
self._monitored_nodes = monitored_nodes
else:
raise TypeError("Monitored nodes must be a dictionary of lists.")
self._uuid = roslaunch.rlutil.get_or_generate_uuid(None, wait_for_master)
self._launch_obj = None
def start(self, args_dict=None):
"""
Start the ROS Launch process and all of the launch file nodes.
:param args_dict: (dict)[Default: None] Dictionary of arguments for the launch file.
:return: None
"""
args_list = []
if args_dict is not None:
# launch_args = [self._package, self._launch_file]
# launch_args = [self._launch_file]
args_list = ["{}:={}".format(k, v) for k, v in args_dict.items()]
# launch_args.extend(args_list)
self._launch_obj = roslaunch.parent.ROSLaunchParent(self._uuid, [(self._launch_file, args_list)])
self._launch_obj.start()
def stop(self):
"""
Shutdown the launch process and all the child nodes and subprocesses with it.
:return: None
"""
if self._launch_obj is not None:
try:
rosnode.rosnode_cleanup()
except (rosnode.ROSNodeIOException, rosnode.ROSNodeException) as e:
print(e)
self._launch_obj.shutdown()
def _monitored_nodes_are_dead(self):
"""
Check if the nodes configured to be monitored are still running.
:return: (bool) True if the configured nodes are dead. False otherwise.
"""
if self._monitored_nodes is None:
return False
if not self._monitored_nodes:
return False
dead_nodes = [re.search("([a-zA-Z_/0-9]*)-[0-9]*", n.name).group(1) for n in self._launch_obj.pm.dead_list]
nodes_dead = False
if "any" in self._monitored_nodes:
nodes_dead = nodes_dead or any((True for n in self._monitored_nodes['any'] if n in dead_nodes))
if "all" in self._monitored_nodes:
nodes_dead = nodes_dead or set(self._monitored_nodes['all']).issubset(dead_nodes)
return nodes_dead
def is_running(self):
"""
Return whether the launcher is still running, or if it should/could be shutdown.
:return: (Bool) False if either the Process Manager is shutdown, the Server is shutdown, or the monitored nodes
are dead. True otherwise.
"""
active_nodes = self._launch_obj.pm.get_active_names()
pm_is_shutdown = self._launch_obj.pm.is_shutdown
server_is_shutdown = self._launch_obj.server.is_shutdown
nodes_dead = self._monitored_nodes_are_dead()
return not (pm_is_shutdown or server_is_shutdown or len(active_nodes) <= 2 or nodes_dead)
def spin(self):
"""
Similar to the ros.spin() method. It just blocks the execution until the is_running method returns false.
:return: None
"""
while self.is_running():
sleep(0.1)
self.stop()
|
"stats template"
# For computation of weighted variance, see
# http://en.wikipedia.org/wiki/Weighted_sample_variance#Weighted_sample_variance
from copy import deepcopy
import bottlechest as bn
__all__ = ["stats"]
FLOAT_DTYPES = [x for x in bn.dtypes if 'float' in x]
INT_DTYPES = [x for x in bn.dtypes if 'int' in x]
# Float dtypes (not axis=None) ----------------------------------------------
floats = {}
floats['dtypes'] = FLOAT_DTYPES
floats['axisNone'] = True
floats['force_output_dtype'] = 'bool'
floats['reuse_non_nan_func'] = False
floats['top'] = """
@cython.boundscheck(False)
@cython.wraparound(False)
def NAME_NDIMd_DTYPE_axisAXIS(np.ndarray[np.DTYPE_t, ndim=NDIM] a,
np.ndarray[np.float_t, ndim=1] w = None,
int compute_variance = False):
'''Compute min, max, mean and #nans.
'''
"""
loop = {}
loop[1] = """\
cdef:
np.DTYPE_t ai
np.float_t wt
np.DTYPE_t a_min = MAXDTYPE
np.DTYPE_t a_max = MINDTYPE
np.float_t mean = 0
np.float_t var = 0
np.float_t non_nans = 0
np.float_t nans = 0
np.float_t tot_wt2, d
if w is None:
for iINDEX0 in range(nINDEX0):
ai = a[iINDEX0]
if ai != ai:
nans += 1
continue
if ai < a_min:
a_min = ai
if ai > a_max:
a_max = ai
mean += ai
non_nans = nINDEX0 - nans
if non_nans != 0:
mean /= non_nans
if compute_variance and non_nans >= 2:
for iINDEX0 in range(nINDEX0):
ai = a[iINDEX0]
if ai == ai:
var += (ai - mean) ** 2
var /= non_nans - 1
else:
if len(w) != n0:
raise ValueError("invalid length of the weight vector ({} != {})".
format(len(w), n0))
for iINDEX0 in range(nINDEX0):
ai = a[iINDEX0]
wt = w[iINDEX0]
if ai != ai:
nans += wt
continue
else:
non_nans += wt
if ai < a_min:
a_min = ai
if ai > a_max:
a_max = ai
mean += wt * ai
if non_nans != 0:
mean /= non_nans
if compute_variance:
tot_wt2 = 0
for iINDEX0 in range(nINDEX0):
ai = a[iINDEX0]
if ai == ai:
wt = w[iINDEX0]
tot_wt2 += wt ** 2
var += wt * (ai - mean) ** 2
d = non_nans ** 2 - tot_wt2
if d > 1e-6:
var *= non_nans / d
return a_min, a_max, mean, var, nans, non_nans
"""
loop[2] = """\
cdef:
np.npy_intp *dims = [nINDEX1, 6]
np.ndarray[np.float64_t, ndim=2] y = PyArray_ZEROS(2, dims, NPY_float64, 0)
np.DTYPE_t ai
np.float64_t mean
np.float_t wt
np.float_t var
np.float_t tot_wt
np.ndarray[np.float64_t, ndim=1] tot_wt2 = PyArray_ZEROS(1, dims, NPY_float64, 0)
np.float_t d
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 0] = MAXfloat64
y[iINDEX1, 1] = MINfloat64
if w is None:
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
if ai != ai:
y[iINDEX1, 4] += 1
continue
if ai < y[iINDEX1, 0]:
y[iINDEX1, 0] = ai
if ai > y[iINDEX1, 1]:
y[iINDEX1, 1] = ai
y[iINDEX1, 2] += ai
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 5] = nINDEX0 - y[iINDEX1, 4]
if y[iINDEX1, 5] > 0:
y[iINDEX1, 2] /= y[iINDEX1, 5]
if compute_variance:
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
if ai == ai:
y[iINDEX1, 3] += (ai - y[iINDEX1, 2]) ** 2
for iINDEX1 in range(nINDEX1):
if y[iINDEX1, 5] >= 2:
y[iINDEX1, 3] /= y[iINDEX1, 5] - 1
else:
for iINDEX0 in range(nINDEX0):
wt = w[iINDEX0]
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
if ai != ai:
y[iINDEX1, 4] += wt
continue
y[iINDEX1, 5] += wt
if ai < y[iINDEX1, 0]:
y[iINDEX1, 0] = ai
if ai > y[iINDEX1, 1]:
y[iINDEX1, 1] = ai
y[iINDEX1, 2] += wt * ai
for iINDEX1 in range(nINDEX1):
if y[iINDEX1, 5] > 0:
y[iINDEX1, 2] /= y[iINDEX1, 5]
if compute_variance:
for iINDEX0 in range(nINDEX0):
wt = w[iINDEX0]
for iINDEX1 in range(nINDEX1):
if y[iINDEX1, 5] >= 2:
ai = a[INDEXALL]
if ai == ai:
tot_wt2[iINDEX1] += wt ** 2
y[iINDEX1, 3] += wt * (ai - y[iINDEX1, 2]) ** 2
for iINDEX1 in range(nINDEX1):
tot_wt = y[iINDEX1, 5]
d = tot_wt ** 2 - tot_wt2[iINDEX1]
if d > 1e-6:
y[iINDEX1, 3] *= tot_wt / d
return y
"""
sparse = """
@cython.boundscheck(False)
@cython.wraparound(False)
def SPARSE(object a,
np.ndarray[np.float_t, ndim=1] w = None,
int compute_variance = False):
'''Compute min, max, #nans, mean and variance.
'''
cdef:
Py_ssize_t n_rows = a.shape[0]
Py_ssize_t n_cols = a.shape[1]
if w is not None and len(w) != n_rows:
raise ValueError("invalid length of the weight vector")
cdef:
np.ndarray[np.DTYPE_t, ndim=1] data = a.data
np.ndarray[int, ndim=1] indices = a.indices
np.ndarray[int, ndim=1] indptr = a.indptr
np.npy_intp *dims = [n_cols, 6]
np.ndarray[np.float64_t, ndim=2] y = PyArray_ZEROS(2, dims, NPY_float64, 0)
np.ndarray[np.float64_t, ndim=1] tot_wt2 = PyArray_ZEROS(1, dims, NPY_float64, 0)
int ri, ci
np.float_t wt
np.float_t tot_w = 0
np.float_t d
np.DTYPE_t ai
for ci in range(n_cols):
y[ci, 0] = MAXfloat64
y[ci, 1] = MINfloat64
if w is None:
tot_w = n_rows
else:
for ri in range(n_rows):
tot_w += w[ri]
if tot_w == 0:
return y
for ri in range(a.shape[0]):
wt = 1 if w is None else w[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ai = data[i]
if ai != ai:
continue
ci = indices[i]
y[ci, 5] += wt
if ai < y[ci, 0]:
y[ci, 0] = ai
if ai > y[ci, 1]:
y[ci, 1] = ai
y[ci, 2] += wt * ai
for ci in range(n_cols):
y[ci, 4] = tot_w - y[ci, 5]
if y[ci, 5] != 0:
y[ci, 2] /= y[ci, 5]
if compute_variance:
for ri in range(a.shape[0]):
wt = 1 if w is None else w[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ai = data[i]
if ai == ai:
ci = indices[i]
y[ci, 3] += wt * (ai - y[ci, 2]) ** 2
tot_wt2[ci] += wt ** 2
for ci in range(n_cols):
d = y[ci, 5] ** 2 - tot_wt2[ci]
if d > 1e-6:
y[ci, 3] *= y[ci, 5] / d
return y
"""
floats['loop'] = loop
floats['sparse'] = sparse
# Int dtypes (not axis=None) ------------------------------------------------
ints = deepcopy(floats)
ints['dtypes'] = INT_DTYPES
loop = {}
loop[1] = """\
cdef:
np.DTYPE_t ai
np.float_t wt
np.DTYPE_t a_min = MAXDTYPE
np.DTYPE_t a_max = MINDTYPE
np.float64_t mean = 0
np.float64_t var = 0
np.float_t tot_w, tot_w2, d
if n0 == 0:
return (a_min, a_max, 0, 0, 0, 0)
if w is None:
tot_w = nINDEX0
for iINDEX0 in range(nINDEX0):
ai = a[INDEXALL]
if ai < a_min:
a_min = ai
if ai > a_max:
a_max = ai
mean += ai
mean /= n0
if compute_variance and n0 >= 2:
for iINDEX0 in range(nINDEX0):
var += (a[INDEXALL] - mean) ** 2
var /= n0 - 1
else:
tot_w = 0
if len(w) != n0:
raise ValueError("invalid length of the weight vector")
for iINDEX0 in range(nINDEX0):
ai = a[INDEXALL]
wt = w[iINDEX0]
tot_w += wt
if ai < a_min:
a_min = ai
if ai > a_max:
a_max = ai
mean += wt * ai
if tot_w != 0:
mean /= tot_w
if compute_variance:
tot_w2 = 0
for iINDEX0 in range(nINDEX0):
tot_w2 += w[iINDEX0] ** 2
var += w[iINDEX0] * (a[INDEXALL] - mean) ** 2
d = tot_w ** 2 - tot_w2
if d > 1e-6:
var *= tot_w / d
return a_min, a_max, mean, var, 0, tot_w
"""
loop[2] = """\
cdef:
np.npy_intp *dims = [n1, 6]
np.ndarray[np.float64_t, ndim=2] y = PyArray_ZEROS(2, dims, NPY_float64, 0)
np.DTYPE_t ai
np.float64_t mean
np.float_t wt
np.float_t tot_w = 0, tot_w2 = 0
if w is None:
tot_w = nINDEX0
else:
for iINDEX0 in range(nINDEX0):
tot_w += w[iINDEX0]
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 0] = MAXfloat64
y[iINDEX1, 1] = MINfloat64
y[iINDEX1, 5] = tot_w
if tot_w == 0:
return y
if w is None:
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
if ai < y[iINDEX1, 0]:
y[iINDEX1, 0] = ai
if ai > y[iINDEX1, 1]:
y[iINDEX1, 1] = ai
y[iINDEX1, 2] += ai
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 2] /= nINDEX0
mean = y[iINDEX1, 2]
if compute_variance and nINDEX0 >= 2:
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 3] += (a[INDEXALL] - y[iINDEX1, 2]) ** 2
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 3] /= nINDEX0 - 1
else:
for iINDEX0 in range(nINDEX0):
wt = w[iINDEX0]
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
if ai < y[iINDEX1, 0]:
y[iINDEX1, 0] = ai
if ai > y[iINDEX1, 1]:
y[iINDEX1, 1] = ai
y[iINDEX1, 2] += wt * ai
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 2] /= tot_w
mean = y[iINDEX1, 2]
if compute_variance:
for iINDEX0 in range(nINDEX0):
wt = w[iINDEX0]
tot_w2 += wt ** 2
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 3] += wt * (a[INDEXALL] - y[iINDEX1, 2]) ** 2
d = tot_w ** 2 - tot_w2
if d > 1e-6:
for iINDEX1 in range(nINDEX1):
y[iINDEX1, 3] *= tot_w / d
return y
"""
sparse = """
@cython.boundscheck(False)
@cython.wraparound(False)
def SPARSE(object a,
np.ndarray[np.float_t, ndim=1] w = None,
int compute_variance = False):
'''Compute min, max, #nans, mean and variance.
'''
cdef:
Py_ssize_t n_rows = a.shape[0]
Py_ssize_t n_cols = a.shape[1]
if w is not None and len(w) != n_rows:
raise ValueError("invalid length of the weight vector")
cdef:
np.ndarray[np.DTYPE_t, ndim=1] data = a.data
np.ndarray[int, ndim=1] indices = a.indices
np.ndarray[int, ndim=1] indptr = a.indptr
np.npy_intp *dims = [n_cols, 6]
np.ndarray[np.float64_t, ndim=2] y = PyArray_ZEROS(2, dims, NPY_float64, 0)
np.ndarray[np.float64_t, ndim=1] tot_w2 = PyArray_ZEROS(1, dims, NPY_float64, 0)
np.float_t wt
np.float_t tot_w = 0
int ri, ci
np.DTYPE_t ai
for ci in range(n_cols):
y[ci, 0] = MAXfloat64
y[ci, 1] = MINfloat64
if w is None:
tot_w = n_rows
else:
for ri in range(n_rows):
tot_w += w[ri]
if tot_w == 0:
return y
for ri in range(a.shape[0]):
wt = 1 if w is None else w[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
ai = data[i]
if ai < y[ci, 0]:
y[ci, 0] = ai
if ai > y[ci, 1]:
y[ci, 1] = ai
y[ci, 5] += wt
y[ci, 2] += wt * ai
for ci in range(n_cols):
y[ci, 4] = tot_w - y[ci, 5]
if y[ci, 5] != 0:
y[ci, 2] /= y[ci, 5]
if compute_variance:
for ri in range(a.shape[0]):
wt = 1 if w is None else w[ri]
for i in range(indptr[ri], indptr[ri + 1]):
ci = indices[i]
tot_w2[ci] += wt ** 2
y[ci, 3] += wt * (data[i] - y[ci, 2]) ** 2
for ci in range(n_cols):
d = y[ci, 5] ** 2 - tot_w2[ci]
if d > 1e-6:
y[ci, 3] *= y[ci, 5] / d
return y
"""
ints['loop'] = loop
ints['sparse'] = sparse
# Slow, unaccelerated ndim/dtype --------------------------------------------
slow = {}
slow['name'] = "stats"
slow['signature'] = "arr, weights, compute_variance"
slow['func'] = "bn.slow.stats(arr, weights=None, compute_variance=False)"
# Template ------------------------------------------------------------------
stats = {}
stats['name'] = 'stats'
stats['is_reducing_function'] = False
stats['cdef_output'] = False
stats['slow'] = slow
stats['sparse'] = {}
stats['templates'] = {}
stats['templates']['float_None'] = floats
stats['templates']['int_None'] = ints
stats['pyx_file'] = 'func/%sbit/stats.pyx'
stats['main'] = '''"stats auto-generated from template"
def stats(arr, weights=None, compute_variance=False):
"""
Compute min, max, #nans, mean and variance.
Result is a tuple (min, max, mean, variance, #nans, #non-nans) or an
array of shape (len(arr), 6).
The mean and the number of nans and non-nans are weighted.
Computation of variance requires an additional pass and is not enabled
by default. Zeros are filled in instead of variance.
Parameters
----------
x : array_like, 1 or 2 dimensions
Input array.
weights : array_like, optional
Weights, array of the same length as `x`.
compute_variance : bool, optional
If set to True, the function also computes variance.
Returns
-------
out : a 6-element tuple or an array of shape (len(x), 6)
Computed (min, max, mean, 0, #nans and #non-nans)
Raises
------
ValueError
If the length of the weight vector does not match the length of the
array
"""
func, a, weights = stats_selector(arr, weights)
return func(a, weights, compute_variance)
def stats_selector(arr, weights):
cdef int dtype
cdef tuple key
if sp.issparse(arr):
a = arr
dtype = PyArray_TYPE(arr.data)
ndim = 0
key = (0, dtype, None)
else:
if type(arr) is np.ndarray:
a = arr
else:
a = np.array(arr, copy=False)
dtype = PyArray_TYPE(arr)
ndim = PyArray_NDIM(a)
key = (ndim, dtype, None)
if weights is not None and (
type(weights) is not np.ndarray or
weights.dtype is not np.float):
weights = np.array(weights, copy=False, dtype=np.float)
try:
func = stats_dict[key]
return func, a, weights
except KeyError:
pass
try:
func = stats_slow_dict[None]
except KeyError:
tup = (str(ndim), str(a.dtype))
raise TypeError("Unsupported ndim/dtype (%s/%s)." % tup)
return func, a, weights
'''
|
from __future__ import absolute_import
from jhu_primitives.monomial.monomial import MonomialPrimitive
__all__ = ['MonomialPrimitive', "AdjacencySpectralEmbedding"]
from .ase import AdjacencySpectralEmbedding
"""
from .lse import LaplacianSpectralEmbedding
from .dimselect import DimensionSelection
from .gclust import GaussianClustering
from .nonpar import NonParametricClustering
from .numclust import NumberOfClusters
from .oocase import OutOfCoreAdjacencySpectralEmbedding
from .ptr import PassToRanks
from .sgc import SpectralGraphClustering
from .sgm import SeededGraphMatching
from .vnsgm import VertexNominationSeededGraphMatching
__all__ = ['AdjacencySpectralEmbedding', 'LaplacianSpectralEmbedding',
'DimensionSelection', 'GaussianClustering', 'NonParametricClustering',
'NumberOfClusters', 'OutOfCoreAdjacencySpectralEmbedding', 'PassToRanks',
'SpectralGraphClustering', 'SeededGraphMatching',
'VertexNominationSeededGraphMatching']
"""
|
from django.urls import path
from nameservice import views
#
urlpatterns = [
path("", views.Home.as_view(), name="home"),
path("create", views.SkyNSCreateView.as_view(), name="nsCreate"),
path("list", views.SkyNSListView.as_view(), name="nsList"),
path("update/<int:pk>", views.SkyNSUpdateView.as_view(), name="nsUpdate"),
path("detail/<int:pk>", views.SkyNSDetailView.as_view(), name="nsDetail"),
path("delete/<int:pk>", views.SkyNSDeleteView.as_view(), name="nsDelete"),
path("<int:pk>/profile", views.UserPortalUpdateView.as_view(), name="profile"),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pandas import DataFrame
from Octopus.dataframe.core.abstractDataFrame import AbstractDataFrame
__all__ = ['PandasDataFrame']
class PandasDataFrame(AbstractDataFrame):
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if type(data) is DataFrame:
self.dataframe = data
else:
self.dataframe = DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
def groupbymin(self, groupby_obj):
return self.dataframe.groupby(by=groupby_obj.by, axis=groupby_obj.axis, level=groupby_obj.level,
as_index=groupby_obj.as_index, sort=groupby_obj.sort,
group_keys=groupby_obj.group_keys,
squeeze=groupby_obj.squeeze).min()
def groupbymax(self, groupby_obj):
return self.dataframe.groupby(by=groupby_obj.by, axis=groupby_obj.axis, level=groupby_obj.level,
as_index=groupby_obj.as_index, sort=groupby_obj.sort,
group_keys=groupby_obj.group_keys,
squeeze=groupby_obj.squeeze).max()
def groupbymean(self, groupby_obj):
return self.dataframe.groupby(by=groupby_obj.by, axis=groupby_obj.axis, level=groupby_obj.level,
as_index=groupby_obj.as_index, sort=groupby_obj.sort,
group_keys=groupby_obj.group_keys,
squeeze=groupby_obj.squeeze).mean()
def groupbysum(self, groupby_obj):
return self.dataframe.groupby(by=groupby_obj.by, axis=groupby_obj.axis, level=groupby_obj.level,
as_index=groupby_obj.as_index, sort=groupby_obj.sort,
group_keys=groupby_obj.group_keys,
squeeze=groupby_obj.squeeze).sum()
def groupbycount(self, groupby_obj):
return self.dataframe.groupby(by=groupby_obj.by, axis=groupby_obj.axis, level=groupby_obj.level,
as_index=groupby_obj.as_index, sort=groupby_obj.sort,
group_keys=groupby_obj.group_keys,
squeeze=groupby_obj.squeeze).count()
def read_csv_pandas(filepath_or_buffer,
sep=',',
delimiter=None,
header='infer',
names=None,
index_col=None,
usecols=None,
squeeze=False):
import pandas as pd
return PandasDataFrame(data=pd.read_csv(filepath_or_buffer=filepath_or_buffer,
sep=sep,
delimiter=delimiter,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze)) |
# -*- coding: utf-8 -*-
"""
This module is experimental and subject to change.
"""
from simmate.database.base_data_types import (
table_column,
Structure,
Forces,
Thermodynamics,
Calculation,
)
from typing import List
from pymatgen.io.vasp.outputs import Vasprun
class DynamicsRun(Structure, Calculation):
"""
Holds results from a dynamics simulations -- often referred to as a molecular
dynamics run.
In addition to the attributes listed, you can also access all ionic steps
of the run via the `structures` attribute. This attribute gives a list of
`DynamicsIonicSteps`.
"""
class Meta:
abstract = True
app_label = "workflows"
temperature_start = table_column.IntegerField(blank=True, null=True)
"""
The starting tempertature of the simulation in Kelvin.
"""
temperature_end = table_column.IntegerField(blank=True, null=True)
"""
The ending tempertature of the simulation in Kelvin.
"""
time_step = table_column.FloatField(blank=True, null=True)
"""
The time in picoseconds for each step in the simulation.
"""
nsteps = table_column.IntegerField(blank=True, null=True)
"""
The total number of steps in the simulation. Note, this is the maximum cutoff
steps set. In some cases, there may be fewer steps when the simulation is
stopped early.
"""
@classmethod
def create_subclasses(cls, name: str, module: str, **extra_columns):
"""
Dynamically creates a subclass of DynamicsRun as well as a separate
DynamicsIonicStep table for it. These tables are linked together.
Example use:
``` python
from simmate.database.base_data_types import DynamicsRun
ExampleDynamicsRun, ExampleDynamicsIonicStep = DynamicsRun.create_subclasses(
"Example",
module=__name__,
)
```
Parameters
----------
- `name` :
The prefix name of the subclasses that are output. "DynamicsRun" and
"DynamicsIonicStep" will be attached to the end of this prefix.
- `module` :
name of the module this subclass should be associated with. Typically,
you should pass __name__ to this.
- `**extra_columns` :
Additional columns to add to the table. The keyword will be the
column name and the value should match django options
(e.g. table_column.FloatField())
Returns
-------
- `NewDynamicsRunClass` :
A subclass of DynamicsRun.
- `NewDynamicsIonicStepClass`:
A subclass of DynamicDynamicsIonicStep.
"""
# For convience, we add columns that point to the start and end structures
NewDynamicsRunClass = cls.create_subclass(
f"{name}DynamicsRun",
module=module,
**extra_columns,
)
NewDynamicDynamicsIonicStepClass = (
DynamicsIonicStep.create_subclass_from_dynamics_run(
name,
NewDynamicsRunClass,
module=module,
**extra_columns,
)
)
# we now have a new child class and avoided writing some boilerplate code!
return NewDynamicsRunClass, NewDynamicDynamicsIonicStepClass
def update_from_vasp_run(
self,
vasprun: Vasprun,
corrections: List,
directory: str,
):
"""
Given a Vasprun object from a finished dynamics run, this will update the
DynamicsRun table entry and the corresponding DynamicsIonicStep entries.
Parameters
----------
vasprun :
The final Vasprun object from the dynamics run outputs.
corrections :
List of errors and corrections applied to during the relaxation.
directory :
name of the directory that relaxation was ran in. This is only used
to reference the archive file if it's ever needed again.
"""
# The data is actually easier to access as a dictionary and everything
# we need is stored under the "output" key
data = vasprun.as_dict()["output"]
# The only other data we need to grab is the list of structures. We can
# pull the structure for each ionic step from the vasprun class directly.
structures = vasprun.structures
# Now let's iterate through the ionic steps and save these to the database.
for number, (structure, ionic_step) in enumerate(
zip(structures, data["ionic_steps"])
):
# first pull all the data together and save it to the database. We
# are saving this to an DynamicsIonicStepStructure datatable. To access this
# model, we look need to use "structures.model".
structure = self.structures.model.from_toolkit(
number=number,
structure=structure,
energy=ionic_step["e_wo_entrp"],
site_forces=ionic_step["forces"],
lattice_stress=ionic_step["stress"],
temperature=self._get_temperature_at_step(number),
dynamics_run=self, # this links the structure to this dynamics run
)
structure.save()
# lastly, we also want to save the corrections made and directory it ran in
self.corrections = corrections
self.directory = directory
# Now we have the relaxation data all loaded and can save it to the database
self.save()
def _get_temperature_at_step(self, step_number: int):
return step_number * self._get_temperature_step_size() + self.temperature_start
def _get_temperature_step_size(self):
return (self.temperature_end - self.temperature_start) / self.nsteps
class DynamicsIonicStep(Structure, Thermodynamics, Forces):
"""
Holds information for a single ionic step of a `DynamicsRun`.
Each entry will map to a `DynamicsRun`, so you should typically access this
data through that class. The exception to this is when you want all ionic
steps accross many relaxations for a machine learning input.
"""
class Meta:
abstract = True
app_label = "workflows"
base_info = (
["number"] + Structure.base_info + Thermodynamics.base_info + Forces.base_info
)
number = table_column.IntegerField()
"""
This is ionic step number for the given relaxation. This starts counting from 0.
"""
temperature = table_column.FloatField(blank=True, null=True)
"""
Expected temperature based on the temperature_start/end and nsteps. Note
that in-practice some steps may not be at equilibrium temperatue. This could
be the 1st 100 steps of a run or alternatively when the thermostat component
is off-temperature.
"""
# TODO: Additional options from Vasprun.as_dict to consider adding
# e_0_energy
# e_fr_energy
# kinetic
# lattice kinetic
# nosekinetic
# nosepot
@classmethod
def create_subclass_from_dynamics_run(
cls,
name: str,
dynamics_run: DynamicsRun,
module: str,
**extra_columns,
):
"""
Dynamically creates a subclass of DynamicsIonicStep and links it to the
DynamicsRun table.
This method should NOT be called directly because it is instead used by
`DynamicsRun.create_subclasses`.
Parameters
----------
- `name` :
Name of the subclass that is output.
- `dynamics_run` :
DynamicsRun table that these ionic steps should be associated with.
- `module` :
name of the module this subclass should be associated with. Typically,
you should pass __name__ to this.
- `**extra_columns` :
Additional columns to add to the table. The keyword will be the
column name and the value should match django options
(e.g. table_column.FloatField())
Returns
-------
- `NewClass` :
A subclass of DynamicsIonicStep.
"""
# All structures in this table come from dynamics run calculations, where
# there can be many structures (one for each ionic step) linked to a
# single run. This means the start structure, end structure, and
# those structure in-between are stored together here.
# Therefore, there's just a simple column stating which relaxation it
# belongs to.
NewClass = cls.create_subclass(
f"{name}DynamicsIonicStep",
dynamics_run=table_column.ForeignKey(
dynamics_run,
on_delete=table_column.CASCADE,
related_name="structures",
),
module=module,
**extra_columns,
)
# we now have a new child class and avoided writing some boilerplate code!
return NewClass
|
"""Functions for training and running group classification."""
import math
import os
import time
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.utils.extmath import softmax
from scipy.special import expit
from sklearn.metrics import f1_score, fbeta_score, classification_report, confusion_matrix, average_precision_score, roc_auc_score
import sklearn
import torch
import torchvision
import tqdm
import pdb
import hha
def run(num_epochs=100,
file_list='FileList_hha_firstEncountersValTest.csv',
modelname="r2plus1d_18",
tasks="Group",
frames=32,
period=2,
pretrained=True,
output=None,
device=None,
n_train_patients=None,
num_workers=5,
batch_size=20,
seed=0,
lr_step_period=15,
run_test=False,
binary=True,
nodes=1,
bias=None,
weighted=False,
oversample=False,
optimizer=None,
rank_auprc=False,
singleframe=False,
singleframe_ed=False,
segmentation_mask=False,
segmentation_mask_invert=False,
downsample=None,
segmentation=False,
segmentation_outline=False,
segmentation_params=None,
loss_funct=None
):
"""Trains/tests classification model.
Args:
num_epochs (int, optional): Number of epochs during training
Defaults to 45.
modelname (str, optional): Name of model. One of ``mc3_18'',
``r2plus1d_18'', or ``r3d_18''
(options are torchvision.models.video.<modelname>)
Defaults to ``r2plus1d_18''.
tasks (str, optional): Name of task to predict. Options are the headers
of FileList.csv.
Defaults to ``group''.
pretrained (bool, optional): Whether to use pretrained weights for model
Defaults to True.
output (str or None, optional): Name of directory to place outputs
Defaults to None (replaced by output/video/<modelname>_<pretrained/random>/).
device (str or None, optional): Name of device to run on. See
https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device
for options. If ``None'', defaults to ``cuda'' if available, and ``cpu'' otherwise.
Defaults to ``None''.
n_train_patients (str or None, optional): Number of training patients. Used to ablations
on number of training patients. If ``None'', all patients used.
Defaults to ``None''.
num_workers (int, optional): how many subprocesses to use for data
loading. If 0, the data will be loaded in the main process.
Defaults to 5.
binary (bool, required): Whether to train binary classification
Defaults to True.
batch_size (int, optional): how many samples per batch to load
Defaults to 20.
seed (int, optional): Seed for random number generator.
Defaults to 0.
lr_step_period (int or None, optional): Period of learning rate decay
(learning rate is decayed by a multiplicative factor of 0.1)
If ``None'', learning rate is not decayed.
Defaults to 15.
run_test (bool, optional): Whether or not to run on test.
Defaults to False.
nodes (int, required): numbers of nodes, representing number of classes,
Defaults to 1, for binary case.
bias (float, optional): Add bias to final layer of model, default: 0.0
weighted (bool, optional): Decides whether or not to weigh classes during training, default: False
optimizer (str, optional): What optimizer to use, default: False
singleframe
singleframe_ed=False,
segmentation_mask=False,
segmentation_mask_invert=False,
downsample=None
"""
## Seed RNGs
np.random.seed(seed)
torch.manual_seed(seed)
## Setting default output directory
print(output)
if output is not None:
output = os.path.join(output, "video", "{}_{}_{}_{}_{}_{}_{}_{}".format(modelname,
frames,
period,
"pretrained" if pretrained else "random",
"weighted" if weighted else "nonweighted",
"oversampled" if oversample else "nonoversampled",
"bias" if bias else "nobias",
"SGD" if optimizer == 'SGD' else "adam",
))
else:
output = os.path.join('output', "video", "{}_{}_{}_{}_{}_{}_{}_{}".format(modelname,
frames,
period,
"pretrained" if pretrained else "random",
"weighted" if weighted else "nonweighted",
"oversampled" if oversample else "nonoversampled",
"bias" if bias else "nobias",
"SGD" if optimizer == 'SGD' else "adam",
))
# Augmentation studies
if singleframe:
output += "_singleframeRandom"
if singleframe_ed:
output += "_singleframeEndDiastolic"
if segmentation_mask:
output += "_segmentationmask"
if segmentation_mask_invert:
output += "_segmentationmaskInvert"
if downsample:
output += "_downsample" + str(downsample)
if segmentation:
output += "_segmentation"
if segmentation_outline:
output += "_segmentationOutline"
if segmentation_params is not None:
output += "segmentationParams"
### Making directory is does not exist
os.makedirs(output, exist_ok=True)
## Setting device for computations
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
## Setting up model
model = torchvision.models.video.__dict__[modelname](pretrained=pretrained)
## Adding last layer of nodes
node = nodes
model.fc = torch.nn.Linear(model.fc.in_features, node)
## Initializing well:atural log(pos/neg) for final bias term #natural log(pos/total) for final bias term
if bias:
if nodes == 1:
bias_terms = [-0.48] #bias_wt #[-0.48]
model.fc.bias.data = torch.tensor(bias_terms)
## TODO: Add an option for normal bias setting etc
if nodes == 3:
bias_terms = [0.0, -0.48, -3.92]
model.fc.bias.data = torch.tensor(bias_terms)
if not bias:
bias_terms = [0.0] * nodes
model.fc.bias.data = torch.tensor(bias_terms)
#pdb.set_trace()
## Implementing data parallelism at the module level.
if device.type == "cuda":
model = torch.nn.DataParallel(model)
model.to(device)
# Set up optimizer: Default sgd
optim = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-3)
if lr_step_period is None:
lr_step_period = math.inf
scheduler = torch.optim.lr_scheduler.StepLR(optim, lr_step_period)
if optimizer == 'adam':
learning_rate = 1e-4
optim = torch.optim.Adam(model.parameters(), lr=learning_rate)
print(optimizer)
## Computing mean and std
print(file_list)
mean, std = hha.utils.get_mean_and_std(hha.datasets.Echo(split="train", file_list=file_list))
kwargs = {"target_type": tasks,
"mean": mean,
"std": std,
"length": frames,
"period": period,
"file_list":file_list,
"singleframe":singleframe,
"singleframe_ed": singleframe_ed,
"segmentation_mask":segmentation_mask,
"segmentation_mask_invert": segmentation_mask_invert,
"downsample": downsample,
"segmentation_outline":segmentation_outline
}
#if segmentation_params is not None:
# kwargs['segmentation_params']={"mask": True, "mitral": False, "expand": 15, "rect":True, "reverse":True}
## Setting up datasets and dataloaders
train_dataset = hha.datasets.Echo(split="train", **kwargs, pad=12)
if singleframe:
## Testing for a truly single frame video
sfv = train_dataset.__getitem__(0)
assert np.array_equal(sfv[0][:,np.random.choice(sfv[0].shape[1], 1),:,:], sfv[0][:,np.random.choice(sfv[0].shape[1], 1),:,:])
if n_train_patients is not None and len(train_dataset) > n_train_patients:
# Subsample patients (used for ablation experiment)
indices = np.random.choice(len(train_dataset), n_train_patients, replace=False)
train_dataset = torch.utils.data.Subset(train_dataset, indices)
train_dataloader = torch.utils.data.DataLoader(train_dataset
, batch_size=batch_size
, num_workers=num_workers
, shuffle=True
, pin_memory=(device.type == "cuda")
, drop_last=True)
val_dataloader = torch.utils.data.DataLoader(hha.datasets.Echo(split="validate", **kwargs)
, batch_size=batch_size
, num_workers=num_workers
, shuffle=True
, pin_memory=(device.type == "cuda"))
dataloaders = {'train': train_dataloader, 'validate': val_dataloader}
if oversample and not weighted:
#############
# Oversample the minority classes
outcome = train_dataset.outcome
targets = [j[1] for j in outcome ]
class_count = np.unique(targets, return_counts=True)[1]
print(class_count)
weight = 1. / class_count
samples_weight = torch.from_numpy(np.array([weight[int(float(t))] for t in targets]))
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight, len(samples_weight)) #len(samples_weight))
weighted_loader = torch.utils.data.DataLoader(train_dataset
, batch_size=batch_size
, num_workers=num_workers
, shuffle=False
, pin_memory=(device.type == "cuda")
, drop_last=True
, sampler=sampler)
dataloaders = {'train': weighted_loader, 'validate': val_dataloader}
#############
# Run training and testing loops
with open(os.path.join(output, "log.csv"), "a") as f:
epoch_resume = 0
bestLoss = float("inf")
bestauPRC = float(0.)
try:
# Attempt to load checkpoint
checkpoint = torch.load(os.path.join(output, "checkpoint.pt"))
model.load_state_dict(checkpoint['state_dict'])
optim.load_state_dict(checkpoint['opt_dict'])
scheduler.load_state_dict(checkpoint['scheduler_dict'])
epoch_resume = checkpoint["epoch"] + 1
bestLoss = checkpoint["best_loss"]
f.write("Resuming from epoch {}\n".format(epoch_resume))
except FileNotFoundError:
f.write("Starting run from scratch\n")
for epoch in range(epoch_resume, num_epochs):
print("Epoch #{}".format(epoch), flush=True)
for phase in ['train', 'validate']:
start_time = time.time()
for i in range(torch.cuda.device_count()):
torch.cuda.reset_max_memory_allocated(i)
torch.cuda.reset_max_memory_cached(i)
## Running current epoch
loss, yhat, y, epoch_metrics, __ = hha.utils.video_dev.run_epoch(model
, dataloaders[phase]
, phase == "train"
, optim
, device
, binary=binary
, weighted=weighted
, loss_funct=loss_funct)
## Writing to file
if binary:
threshold = 0.5
yhat = expit(yhat)
metrics_predictions_ndx = 1
predictions = epoch_metrics[:, metrics_predictions_ndx]
calculated_metrics = pd.DataFrame(log_epoch_metrics(epoch_metrics))
print(roc_auc_score(y, yhat, average='weighted'))
print(average_precision_score(y, yhat, average='weighted'))
auprc = average_precision_score(y, yhat, average='weighted')
f.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(epoch
, phase
, loss
, calculated_metrics['0.0']['loss']
, calculated_metrics['1.0']['loss']
, f1_score(y, predictions, average='weighted')
, calculated_metrics['0.0']['f1-score']
, calculated_metrics['1.0']['f1-score']
, roc_auc_score(y, yhat, average='weighted')
, average_precision_score(y, yhat, average='weighted')
, time.time() - start_time
, y.size
, sum(torch.cuda.max_memory_allocated() for i in range(torch.cuda.device_count()))
, sum(torch.cuda.max_memory_cached() for i in range(torch.cuda.device_count()))
, batch_size))
else:
yhat = softmax(yhat)
metrics_predictions_ndx = 1
predictions = epoch_metrics[:, metrics_predictions_ndx]
y_encode = np.eye(np.int(y.max()+1))[y.astype(int)]
calculated_metrics = pd.DataFrame(log_epoch_metrics(epoch_metrics))
print(roc_auc_score(y_encode, yhat, average='weighted'))
print(average_precision_score(y_encode, yhat , average='weighted'))
auprc = average_precision_score(y_encode, yhat, average='weighted')
per_class_loss = calculated_metrics[[str(j) for j in np.arange(0, nodes).astype(float)]].loc['loss'].values.tolist()
per_class_f1score = calculated_metrics[[str(j) for j in np.arange(0, nodes).astype(float)]].loc['f1-score'].values.tolist()
line_out = [epoch, phase, loss] + per_class_loss + [f1_score(y, predictions, average='weighted')] + per_class_f1score + [roc_auc_score(y_encode, yhat, average='weighted')] + [average_precision_score(y_encode, yhat, average='weighted')] + [time.time() - start_time] + [y.size] + [sum(torch.cuda.max_memory_allocated() for i in range(torch.cuda.device_count()))] + [sum(torch.cuda.max_memory_cached() for i in range(torch.cuda.device_count()))] + [batch_size]
f.write(",".join(str(np.round(x,4)) if isinstance(x, np.float32) else str(x) for x in line_out) + '\n')
f.flush()
scheduler.step()
# Save checkpoint
save = {
'epoch': epoch,
'state_dict': model.state_dict(),
'period': period,
'frames': frames,
'best_loss': bestLoss,
'loss': loss,
'auprc': auprc,
'opt_dict': optim.state_dict(),
'scheduler_dict': scheduler.state_dict(),
}
torch.save(save, os.path.join(output, "checkpoint.pt"))
if loss < bestLoss:
torch.save(save, os.path.join(output, "best.pt"))
bestLoss = loss
if auprc > bestauPRC:
torch.save(save, os.path.join(output, "best_auprc.pt"))
bestauPRC = auprc
if rank_auprc:
# Loading best weights for highest auPRC
checkpoint = torch.load(os.path.join(output, "best_auprc.pt"), map_location=device)
print(os.path.join(output, "best_auprc.pt"))
model.load_state_dict(checkpoint['state_dict'])
optim.load_state_dict(checkpoint['opt_dict'])
scheduler.load_state_dict(checkpoint['scheduler_dict'])
for state in optim.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device)
f.write("Best auPRC {} from epoch {}\n".format(checkpoint["auprc"], checkpoint["epoch"]))
f.flush()
else:
# Loading best weights according to lowest loss
checkpoint = torch.load(os.path.join(output, "best.pt"))
print(os.path.join(output, "best.pt"))
model.load_state_dict(checkpoint['state_dict'])
f.write("Best validation loss {} from epoch {}\n".format(checkpoint["loss"], checkpoint["epoch"]))
f.flush()
if run_test:
for split in ["validate", "test"]:
# Performance without test-time augmentation
print("Running on ....", split)
dataloader = torch.utils.data.DataLoader(hha.datasets.Echo(split=split, **kwargs) #**kwargs_split)
,batch_size=batch_size
, num_workers=num_workers
, shuffle=True
, pin_memory=(device.type == "cuda"))
loss, yhat, y, epoch_metrics, fnames = hha.utils.video_dev.run_epoch(model,
dataloader,
False,
None,
device,
binary=binary,
weighted=weighted,
loss_funct=loss_funct)
# Write full performance to file
pred_out = os.path.join(output, "{}_predictions.csv".format(split))
if rank_auprc:
pred_out = os.path.join(output, "{}_predictions_auprc.csv".format(split))
boot_out = os.path.join(output, "{}_bootstrap.csv".format(split))
if rank_auprc:
boot_out = os.path.join(output, "{}_bootstrap_auprc.csv".format(split))
if binary:
yhat = expit(yhat)
with open(pred_out, "w") as g:
g.write("{},{},{}\n".format('filename', 'true_class', 'prob_class'))
for (filename, true, pred) in zip(fnames, y, yhat):
g.write("{},{},{:.4f}\n".format(filename, true, pred[0]))
g.flush()
threshold = 0.5
predictions = np.zeros(yhat.shape, dtype=int)
predictions[yhat < threshold] = 0
predictions[yhat >= threshold] = 1
print(classification_report(y, predictions)) #, target_names=target_names))
print(pd.DataFrame(confusion_matrix(y, predictions)))
with open(boot_out, "w") as g:
g.write("Split, metric, average, min, max \n")
g.write("{}, AUC, {:.3f}, {:.3f}, {:.3f}\n".format(split, *hha.utils.bootstrap(y, yhat, roc_auc_score) ))
g.write("{}, AP, {:.3f}, {:.3f}, {:.3f}\n".format(split, *hha.utils.bootstrap(y, yhat, average_precision_score) ))
g.write("{}, F1, {:.3f}, {:.3f}, {:.3f}\n".format(split, *hha.utils.bootstrap(y, predictions, f1_score) ))
g.flush()
else:
yhat = softmax(yhat)
with open(pred_out, "w") as g:
headers = ['filename', 'true_class'] + ['prob_' + str(i) for i in np.arange(0, nodes) ] + ['\n']
g.write(",".join(x for x in headers))
for (filename, true, pred) in zip(fnames, y, yhat):
line_out = [filename, true] + [i for i in pred]
g.write(",".join(str(np.round(x,4)) if isinstance(x, np.float32) else x for x in line_out) + '\n' )
g.flush()
pred = np.argmax(yhat, axis=1)
print(f1_score(y, pred, average=None))
print(classification_report(y, pred)) #, target_names=target_names))
print(pd.DataFrame(confusion_matrix(y, pred)))
y_encode = np.eye(np.int(y.max()+1))[y.astype(int)]
pred_encode = np.eye(np.int(y.max()+1))[pred.astype(int)]
with open(boot_out, "w") as g:
g.write("Split, group ,metric, average, min, max \n")
for node in range(0, nodes):
g.write("{}, {}, AUC, {:.3f}, {:.3f} , {:.3f}\n".format(split, node, *hha.utils.bootstrap(y_encode[:,node], pred_encode[:,node], roc_auc_score) ))
g.write("{}, {}, AP, {:.3f}, {:.3f} , {:.3f}\n".format(split, node, *hha.utils.bootstrap(y_encode[:,node], pred_encode[:,node], average_precision_score) ))
g.write("{}, {}, F1, {:.3f}, {:.3f} , {:.3f}\n".format(split, node, *hha.utils.bootstrap(y_encode[:,node], pred_encode[:,node], f1_score) ))
g.flush()
def run_epoch(model, dataloader, train, optim, device, save_all=False, block_size=None, binary=True, weighted=False, loss_funct=None):
"""Run one epoch of training/evaluation for classification.
Args:
model (torch.nn.Module): Model to train/evaulate.
dataloder (torch.utils.data.DataLoader): Dataloader for dataset.
train (bool): Whether or not to train model.
optim (torch.optim.Optimizer): Optimizer
device (torch.device): Device to run on
save_all (bool, optional): If True, return predictions for all
test-time augmentations separately. If False, return only
the mean prediction.
Defaults to False.
block_size (int or None, optional): Maximum number of augmentations
to run on at the same time. Use to limit the amount of memory
used. If None, always run on all augmentations simultaneously.
Default is None.
"""
## Setting self.training = True,
## beware that some layers have different behavior during train/and evaluation
## (like BatchNorm, Dropout) so setting it matters
model.train(train)
total = 0 # total training loss
n = 0 # number of videos processed
yhat = []
y = []
sample_loss = []
fnames = []
with torch.set_grad_enabled(train): # True:training, False:inference
with tqdm.tqdm(total=len(dataloader)) as pbar:
for (X, outcome, fname) in dataloader:
y.append(outcome.numpy())
X = X.to(device)
outcome = outcome.to(device)
fnames.append(fname)
average = (len(X.shape) == 6)
if average:
batch, n_clips, c, f, h, w = X.shape
X = X.view(-1, c, f, h, w)
if block_size is None:
outputs = model(X)
else:
outputs = torch.cat([model(X[j:(j + block_size), ...]) for j in range(0, X.shape[0], block_size)])
if save_all:
yhat.append(outputs.to("cpu").detach().numpy())
if average:
outputs = outputs.view(batch, n_clips, -1).mean(1)
if not save_all:
yhat.append(outputs.to("cpu").detach().numpy())
if binary:
# Loss
criterion = torch.nn.BCEWithLogitsLoss()
if loss_funct == 'focal':
criterion = hha.losses.FocalLoss(hha.losses.BINARY_MODE)
loss = criterion(outputs.view(-1), outcome)
# Track per sample loss
criterion_manual = torch.nn.BCEWithLogitsLoss(reduction='none')
if loss_funct == 'focal':
criterion_manual = hha.losses.FocalLoss(hha.losses.BINARY_MODE, reduction='none')
loss_manual = criterion_manual(outputs.view(-1), outcome)
sample_loss.append(np.expand_dims(loss_manual.to("cpu").detach().numpy(), axis=1))
else:
## Loss
criterion = torch.nn.CrossEntropyLoss()
if loss_funct == 'focal':
criterion = hha.losses.FocalLoss(hha.losses.BINARY_MODE)
loss = criterion(outputs, outcome.long())
# Track per sample loss
criterion_vec = torch.nn.CrossEntropyLoss(reduction='none')
if loss_funct == 'focal':
criterion_vec = hha.losses.FocalLoss(hha.losses.BINARY_MODE, reduction='none')
loss_vec = criterion_vec(outputs, outcome.long())
sample_loss.append(np.expand_dims(loss_vec.to("cpu").detach().numpy(), axis=1))
## statistics
total += loss.item() * X.size(0)
n += X.size(0)
pbar.set_postfix_str("{:.2f} ({:.2f}) ".format(total / n, loss.item()))
pbar.update()
## Calculating the FORWARD pass
if train:
optim.zero_grad()
loss.backward()
optim.step()
if not save_all:
yhat = np.concatenate(yhat)
y = np.concatenate(y)
fnames = np.concatenate(fnames)
flat_loss = [item for sublist in sample_loss for item in sublist]
y_true = np.expand_dims(y, axis=1)
per_sampleLoss = np.expand_dims(np.array(flat_loss).flatten(), axis=1)
if binary:
yhat_nn_function = expit(yhat)
threshold = 0.5
predictions = np.zeros(yhat_nn_function.shape, dtype=int)
predictions[yhat_nn_function < threshold] = 0
predictions[yhat_nn_function >= threshold] = 1
else:
yhat_nn_function = softmax(yhat)
predictions = np.expand_dims(np.argmax(yhat_nn_function, axis=1), axis=1)
epoch_metrics = np.concatenate([y_true, predictions, per_sampleLoss], axis=1)
return total / n, yhat, y, epoch_metrics, fnames
def log_epoch_metrics(metrics):
metrics_label_ndx = 0
metrics_pred_ndx = 1
metrics_loss_ndx = 2
metrics_dict = classification_report(metrics[:, metrics_label_ndx ], metrics[:,metrics_pred_ndx ], output_dict=True)
metrics_dict['loss/all'] = metrics[:, metrics_loss_ndx].mean()
for __,v in enumerate([*metrics_dict][:-4]):
class_index = metrics[:,metrics_label_ndx] == np.float(v)
metrics_dict[v]['loss'] = metrics[class_index, metrics_loss_ndx].mean()
metrics_dict[v]['correct'] = np.sum(metrics[class_index, 0 ] == metrics[class_index, 1]) / np.float32(np.sum(class_index)) * 100
print(confusion_matrix(metrics[:, 0 ], metrics[:, 1 ]))
for key in metrics_dict:
print(key, '->', metrics_dict[key])
return metrics_dict |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""State machine modelling, copied from TaskFlow project.
This work will be turned into a library.
See https://github.com/harlowja/automaton
This is being used in the implementation of:
http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html
"""
from collections import OrderedDict # noqa
import six
from ironic.common import exception as excp
from ironic.common.i18n import _
class _Jump(object):
"""A FSM transition tracks this data while jumping."""
def __init__(self, name, on_enter, on_exit):
self.name = name
self.on_enter = on_enter
self.on_exit = on_exit
class FSM(object):
"""A finite state machine.
This class models a state machine, and expects an outside caller to
manually trigger the state changes one at a time by invoking process_event
"""
def __init__(self, start_state=None):
self._transitions = {}
self._states = OrderedDict()
self._start_state = start_state
self._target_state = None
# Note that _current is a _Jump instance
self._current = None
@property
def start_state(self):
return self._start_state
@property
def current_state(self):
if self._current is not None:
return self._current.name
return None
@property
def target_state(self):
return self._target_state
@property
def terminated(self):
"""Returns whether the state machine is in a terminal state."""
if self._current is None:
return False
return self._states[self._current.name]['terminal']
def add_state(self, state, on_enter=None, on_exit=None,
target=None, terminal=None, stable=False):
"""Adds a given state to the state machine.
The on_enter and on_exit callbacks, if provided will be expected to
take two positional parameters, these being the state being exited (for
on_exit) or the state being entered (for on_enter) and a second
parameter which is the event that is being processed that caused the
state transition.
:param stable: Use this to specify that this state is a stable/passive
state. A state must have been previously defined as
'stable' before it can be used as a 'target'
:param target: The target state for 'state' to go to. Before a state
can be used as a target it must have been previously
added and specified as 'stable'
"""
if state in self._states:
raise excp.Duplicate(_("State '%s' already defined") % state)
if on_enter is not None:
if not six.callable(on_enter):
raise ValueError(_("On enter callback must be callable"))
if on_exit is not None:
if not six.callable(on_exit):
raise ValueError(_("On exit callback must be callable"))
if target is not None and target not in self._states:
raise excp.InvalidState(_("Target state '%s' does not exist")
% target)
if target is not None and not self._states[target]['stable']:
raise excp.InvalidState(
_("Target state '%s' is not a 'stable' state") % target)
self._states[state] = {
'terminal': bool(terminal),
'reactions': {},
'on_enter': on_enter,
'on_exit': on_exit,
'target': target,
'stable': stable,
}
self._transitions[state] = OrderedDict()
def add_transition(self, start, end, event):
"""Adds an allowed transition from start -> end for the given event."""
if start not in self._states:
raise excp.NotFound(
_("Can not add a transition on event '%(event)s' that "
"starts in a undefined state '%(state)s'")
% {'event': event, 'state': start})
if end not in self._states:
raise excp.NotFound(
_("Can not add a transition on event '%(event)s' that "
"ends in a undefined state '%(state)s'")
% {'event': event, 'state': end})
self._transitions[start][event] = _Jump(end,
self._states[end]['on_enter'],
self._states[start]['on_exit'])
def process_event(self, event):
"""Trigger a state change in response to the provided event."""
current = self._current
if current is None:
raise excp.InvalidState(_("Can only process events after"
" being initialized (not before)"))
if self._states[current.name]['terminal']:
raise excp.InvalidState(
_("Can not transition from terminal "
"state '%(state)s' on event '%(event)s'")
% {'state': current.name, 'event': event})
if event not in self._transitions[current.name]:
raise excp.InvalidState(
_("Can not transition from state '%(state)s' on "
"event '%(event)s' (no defined transition)")
% {'state': current.name, 'event': event})
replacement = self._transitions[current.name][event]
if current.on_exit is not None:
current.on_exit(current.name, event)
if replacement.on_enter is not None:
replacement.on_enter(replacement.name, event)
self._current = replacement
# clear _target if we've reached it
if (self._target_state is not None and
self._target_state == replacement.name):
self._target_state = None
# if new state has a different target, update the target
if self._states[replacement.name]['target'] is not None:
self._target_state = self._states[replacement.name]['target']
def is_valid_event(self, event):
"""Check whether the event is actionable in the current state."""
current = self._current
if current is None:
return False
if self._states[current.name]['terminal']:
return False
if event not in self._transitions[current.name]:
return False
return True
def initialize(self, state=None):
"""Sets up the state machine.
sets the current state to the specified state, or start_state
if no state was specified..
"""
if state is None:
state = self._start_state
if state not in self._states:
raise excp.NotFound(_("Can not start from an undefined"
" state '%s'") % (state))
if self._states[state]['terminal']:
raise excp.InvalidState(_("Can not start from a terminal"
" state '%s'") % (state))
self._current = _Jump(state, None, None)
self._target_state = self._states[state]['target']
def copy(self, shallow=False):
"""Copies the current state machine (shallow or deep).
NOTE(harlowja): the copy will be left in an *uninitialized* state.
NOTE(harlowja): when a shallow copy is requested the copy will share
the same transition table and state table as the
source; this can be advantageous if you have a machine
and transitions + states that is defined somewhere
and want to use copies to run with (the copies have
the current state that is different between machines).
"""
c = FSM(self.start_state)
if not shallow:
for state, data in six.iteritems(self._states):
copied_data = data.copy()
copied_data['reactions'] = copied_data['reactions'].copy()
c._states[state] = copied_data
for state, data in six.iteritems(self._transitions):
c._transitions[state] = data.copy()
else:
c._transitions = self._transitions
c._states = self._states
return c
def __contains__(self, state):
"""Returns if this state exists in the machines known states."""
return state in self._states
@property
def states(self):
"""Returns a list of the state names."""
return list(six.iterkeys(self._states))
def __iter__(self):
"""Iterates over (start, event, end) transition tuples."""
for state in six.iterkeys(self._states):
for event, target in six.iteritems(self._transitions[state]):
yield (state, event, target.name)
@property
def events(self):
"""Returns how many events exist."""
c = 0
for state in six.iterkeys(self._states):
c += len(self._transitions[state])
return c
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=logging-format-interpolation
import logging
import datetime
from kubernetes import client
_RAISE_EXCEPTION_KEY = 'raise_exception'
class FakeK8sClient(object):
"""A fake k8s client for development.
With this client we can decouple the dependency of k8s cluster.
"""
def close(self):
pass
def create_or_update_secret(self,
data,
metadata,
secret_type,
name,
namespace='default'):
# User may pass two type of data:
# 1. dictionary
# 2. K8s Object
# They are both accepted by real K8s client,
# but K8s Object is not iterable.
if isinstance(data, dict) and _RAISE_EXCEPTION_KEY in data:
raise RuntimeError('[500] Fake exception for save_secret')
# Otherwise succeeds
logging.info('======================')
logging.info('Saved a secret with: data: {}, '
'metadata: {}, type: {}'.format(data, metadata,
secret_type))
def delete_secret(self, name, namespace='default'):
logging.info('======================')
logging.info('Deleted a secret with: name: {}'.format(name))
def get_secret(self, name, namespace='default'):
return client.V1Secret(api_version='v1',
data={'test': 'test'},
kind='Secret',
metadata={
'name': name,
'namespace': namespace
},
type='Opaque')
def create_or_update_service(self,
metadata,
spec,
name,
namespace='default'):
logging.info('======================')
logging.info('Saved a service with: spec: {}, metadata: {}'.format(
spec, metadata))
def delete_service(self, name, namespace='default'):
logging.info('======================')
logging.info('Deleted a service with: name: {}'.format(name))
def get_service(self, name, namespace='default'):
return client.V1Service(
api_version='v1',
kind='Service',
metadata=client.V1ObjectMeta(name=name, namespace=namespace),
spec=client.V1ServiceSpec(selector={'app': 'nginx'}))
def create_or_update_ingress(self,
metadata,
spec,
name,
namespace='default'):
logging.info('======================')
logging.info('Saved a ingress with: spec: {}, metadata: {}'.format(
spec, metadata))
def delete_ingress(self, name, namespace='default'):
logging.info('======================')
logging.info('Deleted a ingress with: name: {}'.format(name))
def get_ingress(self, name, namespace='default'):
return client.NetworkingV1beta1Ingress(
api_version='networking.k8s.io/v1beta1',
kind='Ingress',
metadata=client.V1ObjectMeta(name=name, namespace=namespace),
spec=client.NetworkingV1beta1IngressSpec())
def create_or_update_deployment(self,
metadata,
spec,
name,
namespace='default'):
logging.info('======================')
logging.info('Saved a deployment with: spec: {}, metadata: {}'.format(
spec, metadata))
def delete_deployment(self, name, namespace='default'):
logging.info('======================')
logging.info('Deleted a deployment with: name: {}'.format(name))
def get_deployment(self, name, namespace='default'):
return client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(name=name, namespace=namespace),
spec=client.V1DeploymentSpec(
selector={'matchLabels': {
'app': 'fedlearner-operator'
}},
template=client.V1PodTemplateSpec(spec=client.V1PodSpec(
containers=[
client.V1Container(name='fedlearner-operator',
args=['test'])
]))))
def delete_flapp(self, flapp_name):
pass
def create_flapp(self, flapp_yaml):
pass
def get_flapp(self, flapp_name):
pods = {
'pods': {
'metadata': {
'selfLink': '/api/v1/namespaces/default/pods',
'resourceVersion': '780480990'
}
},
'items': [{
'metadata': {
'name': '{}-0'.format(flapp_name)
}
}, {
'metadata': {
'name': '{}-1'.format(flapp_name)
}
}]
}
flapp = {
'kind': 'FLAPP',
'metadata': {
'name': flapp_name,
'namesapce': 'default'
},
'status': {
'appState': 'FLStateRunning',
'flReplicaStatus': {
'Master': {
'active': {
'laomiao-raw-data-1223-v1-follower'
'-master-0-717b53c4-'
'fef7-4d65-a309-63cf62494286': {}
}
},
'Worker': {
'active': {
'laomiao-raw-data-1223-v1-follower'
'-worker-0-61e49961-'
'e6dd-4015-a246-b6d25e69a61c': {},
'laomiao-raw-data-1223-v1-follower'
'-worker-1-accef16a-'
'317f-440f-8f3f-7dd5b3552d25': {}
}
}
}
}
}
return {'flapp': flapp, 'pods': pods}
def get_webshell_session(self,
flapp_name,
container_name: str,
namespace='default'):
return {'id': 1}
def get_sparkapplication(self,
name: str,
namespace: str = 'default') -> dict:
logging.info('======================')
logging.info(
f'get spark application, name: {name}, namespace: {namespace}')
return {
'apiVersion': 'sparkoperator.k8s.io/v1beta2',
'kind': 'SparkApplication',
'metadata': {
'creationTimestamp': '2021-04-15T10:43:15Z',
'generation': 1,
'name': name,
'namespace': namespace,
},
'status': {
'applicationState': {
'state': 'COMPLETED'
},
}
}
def create_sparkapplication(self,
json_object: dict,
namespace: str = 'default') -> dict:
logging.info('======================')
logging.info(f'create spark application, namespace: {namespace}, '
f'json: {json_object}')
return {
'apiVersion': 'sparkoperator.k8s.io/v1beta2',
'kind': 'SparkApplication',
'metadata': {
'creationTimestamp': '2021-04-15T10:43:15Z',
'generation': 1,
'name': 'fl-transformer-yaml',
'namespace': 'fedlearner',
'resourceVersion': '348817823',
},
'spec': {
'arguments': [
'hdfs://user/feature/data.csv',
'hdfs://user/feature/data_tfrecords/'
],
}
}
def delete_sparkapplication(self,
name: str,
namespace: str = 'default') -> dict:
logging.info('======================')
logging.info(
f'delete spark application, name: {name}, namespace: {namespace}')
return {
'kind': 'Status',
'apiVersion': 'v1',
'metadata': {},
'status': 'Success',
'details': {
'name': name,
'group': 'sparkoperator.k8s.io',
'kind': 'sparkapplications',
'uid': '790603b6-9dd6-11eb-9282-b8599fb51ea8'
}
}
def get_pod_log(self, name: str, namespace: str, tail_lines: int):
return [str(datetime.datetime.now())]
def get_pods(self, namespace, label_selector):
return ['fake_fedlearner_web_console_v2']
|
def minion_game(string):
# your code goes here
kevin = 0
stuart = 0
length = len(string)
for i in range(length):
if string[i] in "AEIOU":
kevin += (length-i)
else:
stuart += (length-i)
if kevin > stuart:
print("Kevin", kevin)
elif kevin < stuart:
print("Stuart", stuart)
else:
print("Draw")
|
from django import test
from django.urls import reverse
from django.test import client
from indicators.models import LevelTierTemplate
from factories.workflow_models import (
RFProgramFactory,
TolaUserFactory,
grant_program_access,
)
class TestSaveCustomTemplateView(test.TestCase):
@classmethod
def setUpClass(cls):
super(TestSaveCustomTemplateView, cls).setUpClass()
cls.program = RFProgramFactory()
cls.tola_user = TolaUserFactory()
grant_program_access(cls.tola_user, cls.program, cls.tola_user.country, 'high')
cls.client = client.Client()
def test_view_respects_permissions(self):
no_permission_user = TolaUserFactory()
no_permission_client = client.Client()
no_permission_client.force_login(no_permission_user)
response = no_permission_client.post(
reverse('save_custom_template'), {'program_id': self.program.id, 'tiers': ['this', 'that']})
self.assertEqual(response.status_code, 403)
def test_template_saved(self):
self.client.force_login(self.tola_user.user)
self.client.post(
reverse('save_custom_template'),
{'program_id': self.program.id, 'tiers': ['this ', ' tha t ']},
content_type="application/json")
# Note that the extra white space on either side of the tier should be trimmed before saving.
self.assertEqual(LevelTierTemplate.objects.get(program=self.program).names, ['this', 'tha t'])
def test_illegal_chars(self):
self.client.force_login(self.tola_user.user)
# Test comma
response = self.client.post(
reverse('save_custom_template'),
{'program_id': self.program.id, 'tiers': ['this, ', ' that']},
content_type="application/json")
self.assertEqual(response.status_code, 400)
# Test colon
response = self.client.post(
reverse('save_custom_template'),
{'program_id': self.program.id, 'tiers': ['this: ', ' that']},
content_type="application/json")
self.assertEqual(response.status_code, 400)
|
from django.db import models
from apps.usuario.models import EstadoModel
from django.urls import reverse
from apps.usuario.templatetags.utils import PROCESO
class Ubicacion(EstadoModel):
latitudubicacion = models.CharField(max_length=30, blank=True, null=True,
verbose_name = 'Latitud',
help_text = 'Ingrese la Latitud')
longitudubicacion = models.CharField(max_length=30, blank=True, null=True,
verbose_name = 'Longitud',
help_text = 'Ingrese la Longitud')
descripcionubicacion = models.CharField(max_length=50, blank=True, null=True,
verbose_name = 'Descripcion de la Ubicacion',
help_text = 'Ingrese Descripcion de la Ubicacion')
def __str__(self):
return '%s %s' % (self.latitudubicacion,self.longitudubicacion)
class Meta:
verbose_name_plural = "Ubicaciones"
class Medida(EstadoModel):
largomedida = models.IntegerField(blank=True, null=True,verbose_name='Medida Largo del Lote')
anchomedida = models.IntegerField(blank=True, null=True,verbose_name='Medida Ancho del Lote')
superficietotal = models.IntegerField(blank=True, null=True)
def get_absolute_url(self):
return reverse('terreno:detalle_medida', kwargs={'pk': self.pk})
def __str__(self):
return '%s %s' % (self.largomedida, self.anchomedida)
def __repr__(self):
return self.largomedida
class Meta:
verbose_name_plural = "Medidas"
ordering = ['creacion']
class Distrito(EstadoModel):
numerodistrito = models.IntegerField(blank=True, null=True,
verbose_name = 'Numero del Distrito',
help_text = 'Ingrese el Numero del Distrito')
nombredistrito = models.CharField(max_length=50, blank=True, null=True,
verbose_name = 'Nombre Distrito',
help_text = 'Ingrese Nombre del Distrito')
sigladistrito = models.CharField(max_length=10, blank=True, null=True,
verbose_name = 'Sigla del Distrito',
help_text = 'Ingrese la Sigla del Distrito')
def __str__(self):
return '%s %s %s' % (self.numerodistrito,self.nombredistrito,self.sigladistrito)
class Meta:
verbose_name_plural = "Distritos"
class Manzano(EstadoModel):
distritos = models.ForeignKey(Distrito, null=True, blank=False, on_delete=models.CASCADE)
codigomanzano = models.CharField(max_length=80,null=True, blank=False,
verbose_name='Codigo Manzano',
help_text='Ingresa Codigo Manzano')
numeromanzano = models.IntegerField(blank=True, null=True,
verbose_name='Numero del Manzano',
help_text='Ingrese el Numero del Manzano')
siglamanzano = models.CharField(max_length=10, blank=True, null=True, default='',
verbose_name='Sigla del Manzano',
help_text='Ingrese la Sigla del Manzano')
procesomanzano = models.CharField(max_length=20,choices=PROCESO,
verbose_name='Proceso',
help_text = 'Ingrese Proceso Manzano')
#lotes = models.ManyToManyField(Lote, blank=True)
def __str__(self):
return '%s %s' % (self.numeromanzano, self.codigomanzano)
def __unicode__(self):
return (self.codigomanzano)
class Meta:
verbose_name_plural = "Manzanos"
ordering = ['creacion']
class Lote(EstadoModel):
manzanos = models.ForeignKey(Manzano, null=False, blank=False, on_delete=models.CASCADE)
codigolote = models.CharField(max_length=80,null=True, blank=False,
verbose_name='Codigo Lote',
help_text='Ingresa Codigo Lote')
numerolote = models.IntegerField(blank=True, null=True,
verbose_name='Numero del Lote',
help_text='Ingrese el Numero del Lote')
siglalote = models.CharField(max_length=10, blank=True, null=True, default='',
verbose_name='Sigla del Lote',
help_text='Ingrese la Sigla del Lote')
procesolote = models.CharField(max_length=20,choices=PROCESO,
verbose_name='Proceso',
help_text = 'Ingrese Proceso Lote')
medidas = models.ForeignKey(Medida, null=True, blank=True, on_delete=models.CASCADE)
ubicaciones = models.ForeignKey(Ubicacion, null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return '%s %s' % (self.codigolote, self.manzanos.codigomanzano)
def __unicode__(self):
return (self.codigolote)
class Meta:
verbose_name_plural = "Lotes"
ordering = ['creacion']
|
"""
===================================
Merging two instances in the design
===================================
This example demonstrate how to merge two instance in the design to create a new
merged definition
.. hdl-diagram:: ../../../examples/basic/_initial_design_merge.v
:type: netlistsvg
:align: center
:module: top
**Output1** Merged design Instance
.. hdl-diagram:: ../../../examples/basic/_merged_design.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
import logging
logger = logging.getLogger('spydrnet_logs')
sdn.enable_file_logging(LOG_LEVEL='INFO')
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
sdn.compose(netlist, '_initial_design_merge.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
top = netlist.top_instance.reference
inst1 = next(top.get_instances("inst_1_0"))
inst2 = next(top.get_instances("inst_1_1"))
top.merge_instance([inst1, inst2],
new_definition_name="merged_module",
new_instance_name="merged_module_instance_0")
top.create_unconn_wires()
sdn.compose(netlist, '_merged_design.v', skip_constraints=True)
|
from typing import TYPE_CHECKING, Callable, Tuple
from .exceptions import get_status_code_error
from .specs.openapi.checks import content_type_conformance, response_schema_conformance, status_code_conformance
from .utils import GenericResponse
if TYPE_CHECKING:
from .models import Case
def not_a_server_error(response: GenericResponse, case: "Case") -> None:
"""A check to verify that the response is not a server-side error."""
if response.status_code >= 500:
exc_class = get_status_code_error(response.status_code)
raise exc_class(f"Received a response with 5xx status code: {response.status_code}")
DEFAULT_CHECKS = (not_a_server_error,)
OPTIONAL_CHECKS = (status_code_conformance, content_type_conformance, response_schema_conformance)
ALL_CHECKS: Tuple[Callable[[GenericResponse, "Case"], None], ...] = DEFAULT_CHECKS + OPTIONAL_CHECKS
|
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestNumSupport(BaseNumpyAppTest):
def test_zeros(self):
from numpy import zeros
a = zeros(3)
assert len(a) == 3
assert a[0] == a[1] == a[2] == 0
def test_empty(self):
from numpy import empty
import gc
for i in range(1000):
a = empty(3)
assert len(a) == 3
if not (a[0] == a[1] == a[2] == 0):
break # done
a[0] = 1.23
a[1] = 4.56
a[2] = 7.89
del a
gc.collect()
else:
raise AssertionError(
"empty() returned a zeroed out array every time")
def test_where(self):
from numpy import where, ones, zeros, array
a = [1, 2, 3, 0, -3]
a = where(array(a) > 0, ones(5), zeros(5))
assert (a == [1, 1, 1, 0, 0]).all()
def test_where_differing_dtypes(self):
from numpy import array, ones, zeros, where
a = [1, 2, 3, 0, -3]
a = where(array(a) > 0, ones(5, dtype=int), zeros(5, dtype=float))
assert (a == [1, 1, 1, 0, 0]).all()
def test_where_broadcast(self):
from numpy import array, where
a = where(array([[1, 2, 3], [4, 5, 6]]) > 3, [1, 1, 1], 2)
assert (a == [[2, 2, 2], [1, 1, 1]]).all()
a = where(True, [1, 1, 1], 2)
assert (a == [1, 1, 1]).all()
def test_where_errors(self):
from numpy import where, array
raises(ValueError, "where([1, 2, 3], [3, 4, 5])")
raises(ValueError, "where([1, 2, 3], [3, 4, 5], [6, 7])")
assert where(True, 1, 2) == array(1)
assert where(False, 1, 2) == array(2)
assert (where(True, [1, 2, 3], 2) == [1, 2, 3]).all()
assert (where(False, 1, [1, 2, 3]) == [1, 2, 3]).all()
assert (where([1, 2, 3], True, False) == [True, True, True]).all()
#def test_where_1_arg(self):
# xxx
def test_where_invalidates(self):
from numpy import where, ones, zeros, array
a = array([1, 2, 3, 0, -3])
b = where(a > 0, ones(5), zeros(5))
a[0] = 0
assert (b == [1, 1, 1, 0, 0]).all()
def test_dot_basic(self):
from numpy import array, dot, arange
a = array(range(5))
assert dot(a, a) == 30.0
a = array(range(5))
assert a.dot(range(5)) == 30
assert dot(range(5), range(5)) == 30
assert (dot(5, [1, 2, 3]) == [5, 10, 15]).all()
a = arange(12).reshape(3, 4)
b = arange(12).reshape(4, 3)
c = a.dot(b)
assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all()
c = a.dot(b.astype(float))
assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all()
c = a.astype(float).dot(b)
assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all()
a = arange(24).reshape(2, 3, 4)
raises(ValueError, "a.dot(a)")
b = a[0, :, :].T
#Superfluous shape test makes the intention of the test clearer
assert a.shape == (2, 3, 4)
assert b.shape == (4, 3)
c = dot(a, b)
assert (c == [[[14, 38, 62], [38, 126, 214], [62, 214, 366]],
[[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all()
c = dot(a, b[:, 2])
assert (c == [[62, 214, 366], [518, 670, 822]]).all()
a = arange(3*2*6).reshape((3,2,6))
b = arange(3*2*6)[::-1].reshape((2,6,3))
assert dot(a, b)[2,0,1,2] == 1140
assert (dot([[1,2],[3,4]],[5,6]) == [17, 39]).all()
def test_dot_constant(self):
from numpy import array, dot
a = array(range(5))
b = a.dot(2.5)
for i in xrange(5):
assert b[i] == 2.5 * a[i]
c = dot(4, 3.0)
assert c == 12.0
c = array(3.0).dot(array(4))
assert c == 12.0
def test_dot_out(self):
from numpy import arange, dot
a = arange(12).reshape(3, 4)
b = arange(12).reshape(4, 3)
out = arange(9).reshape(3, 3)
c = dot(a, b, out=out)
assert (c == out).all()
assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all()
out = arange(9, dtype=float).reshape(3, 3)
exc = raises(ValueError, dot, a, b, out)
assert exc.value[0] == ('output array is not acceptable (must have the '
'right type, nr dimensions, and be a C-Array)')
def test_choose_basic(self):
from numpy import array
a, b, c = array([1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])
r = array([2, 1, 0]).choose([a, b, c])
assert (r == [7, 5, 3]).all()
def test_choose_broadcast(self):
from numpy import array
a, b, c = array([1, 2, 3]), [4, 5, 6], 13
r = array([2, 1, 0]).choose([a, b, c])
assert (r == [13, 5, 3]).all()
def test_choose_out(self):
from numpy import array
a, b, c = array([1, 2, 3]), [4, 5, 6], 13
r = array([2, 1, 0]).choose([a, b, c], out=None)
assert (r == [13, 5, 3]).all()
assert (a == [1, 2, 3]).all()
r = array([2, 1, 0]).choose([a, b, c], out=a)
assert (r == [13, 5, 3]).all()
assert (a == [13, 5, 3]).all()
def test_choose_modes(self):
from numpy import array
a, b, c = array([1, 2, 3]), [4, 5, 6], 13
raises(ValueError, "array([3, 1, 0]).choose([a, b, c])")
raises(ValueError, "array([3, 1, 0]).choose([a, b, c], mode='raises')")
raises(ValueError, "array([3, 1, 0]).choose([])")
raises(ValueError, "array([-1, -2, -3]).choose([a, b, c])")
r = array([4, 1, 0]).choose([a, b, c], mode='clip')
assert (r == [13, 5, 3]).all()
r = array([4, 1, 0]).choose([a, b, c], mode='wrap')
assert (r == [4, 5, 3]).all()
def test_choose_dtype(self):
from numpy import array
a, b, c = array([1.2, 2, 3]), [4, 5, 6], 13
r = array([2, 1, 0]).choose([a, b, c])
assert r.dtype == float
def test_choose_dtype_out(self):
from numpy import array
a, b, c = array([1, 2, 3]), [4, 5, 6], 13
x = array([0, 0, 0], dtype='i2')
r = array([2, 1, 0]).choose([a, b, c], out=x)
assert r.dtype == 'i2'
def test_put_basic(self):
from numpy import arange, array
a = arange(5)
a.put([0, 2], [-44, -55])
assert (a == array([-44, 1, -55, 3, 4])).all()
a = arange(5)
a.put([3, 4], 9)
assert (a == array([0, 1, 2, 9, 9])).all()
a = arange(5)
a.put(1, [7, 8])
assert (a == array([0, 7, 2, 3, 4])).all()
def test_put_modes(self):
from numpy import array, arange
a = arange(5)
a.put(22, -5, mode='clip')
assert (a == array([0, 1, 2, 3, -5])).all()
a = arange(5)
a.put(22, -5, mode='wrap')
assert (a == array([0, 1, -5, 3, 4])).all()
raises(IndexError, "arange(5).put(22, -5, mode='raise')")
raises(IndexError, "arange(5).put(22, -5, mode=2)") # raise
a.put(22, -10, mode='wrongmode_starts_with_w_so_wrap')
assert (a == array([0, 1, -10, 3, 4])).all()
a.put(22, -15, mode='cccccccc')
assert (a == array([0, 1, -10, 3, -15])).all()
a.put(23, -1, mode=1) # wrap
assert (a == array([0, 1, -10, -1, -15])).all()
raises(TypeError, "arange(5).put(22, -5, mode='zzzz')") # unrecognized mode
def test_result_type(self):
import numpy as np
exc = raises(ValueError, np.result_type)
assert str(exc.value) == "at least one array or dtype is required"
exc = raises(TypeError, np.result_type, a=2)
assert str(exc.value) == "result_type() takes no keyword arguments"
assert np.result_type(True) is np.dtype('bool')
assert np.result_type(1) is np.dtype('int')
assert np.result_type(1.) is np.dtype('float64')
assert np.result_type(1+2j) is np.dtype('complex128')
assert np.result_type(1, 1.) is np.dtype('float64')
assert np.result_type(np.array([1, 2])) is np.dtype('int')
assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128')
assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64')
assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64')
|
import pytest
from pyloan.loan import Loan
testdata = [
(
"loan1",
"r0",
[
{
"annualRate": 2.115,
"insurance": 1.2,
"periods": 120,
"principal": 10000,
"startDate": "2010-09-15",
}
],
),
(
"loan2",
"r0",
[
{
"annualRate": 2.115,
"insurance": 1.2,
"periods": 240,
"principal": 100000,
"startDate": "2010-09-15",
},
{
"annualRate": 1.115,
"insurance": 1.2,
"periods": 120,
"startDate": "2013-09-15",
},
],
),
]
@pytest.mark.parametrize("name,revision,phases", testdata)
def test_loan(name, revision, phases):
loan = Loan(name, revision, phases)
assert loan.name == name
assert loan.revision == revision
assert loan.phases == phases
assert loan.repayments == []
assert loan.early_repayments == []
assert loan.summary == {}
loan.compute_repayment_plan()
assert loan.sanity_checks() == True
|
"""Tests for sqlalchemy orm session."""
from unittest import TestCase
import astroid
from sqlalchemy.orm.scoping import scoped_session
class PluginTest(TestCase):
"""PluginTest test case."""
def setUp(self):
"""Init setup."""
self.node = astroid.MANAGER.ast_from_class(
scoped_session, scoped_session.__module__
)
def test_node(self):
"""Test the node."""
self.assertIsNotNone(self.node)
self.assertIsInstance(self.node.getattr("query")[0], astroid.FunctionDef)
self.assertIsInstance(self.node.getattr("add")[0], astroid.FunctionDef)
|
import requests
from .objects import TYPES_TO_COLLECTION_NAMES, Container, Sample
from .utils import url_path_join
class Client:
"""
This connects to an amostra HTTP server for sample management.
For each collection, we have a traitlets-based object to represent
documents from that collection and automatically sync any changes back to
the server to verify that they are valid and, if so, persist them in the
database.
"""
def __init__(self, url):
"""
Connect to an amostra HTTP server.
Parameters
----------
url: string
"""
self._session = requests.Session()
self._url = url
self._samples = CollectionAccessor(self, Sample)
self._containers = CollectionAccessor(self, Container)
def _make_url(self, *pieces):
return url_path_join(self._url, *pieces)
@property
def samples(self):
"""
Accessor for creating and searching Samples
"""
return self._samples
@property
def containers(self):
"""
Accessor for creating and searching Containers
"""
return self._containers
def _new_document(self, obj_type, args, kwargs):
"""
Insert a new document with a new uuid.
"""
# Make a new object (e.g. Sample)
obj = obj_type(self, *args, **kwargs)
# Find the assocaited MongoDB collection.
collection_name = TYPES_TO_COLLECTION_NAMES[obj_type]
# Insert the new object.
response = self._session.post(
self._make_url(collection_name, 'new'),
json={'parameters': obj.to_dict()})
response.raise_for_status()
# Let the server set the uuid.
uuid = response.json()['uuid']
obj.set_trait('uuid', uuid)
obj.set_trait('revision', 0) # paranoia
# Observe any updates to the object and sync them to MongoDB.
obj.observe(self._update)
return obj
def _update(self, change):
"""
Sync a change to an object, observed via traitlets, to MongoDB.
"""
# The 'revision' trait is a read-only trait, so if it is being changed
# it is being changed by us, and we don't need to process it.
# Short-circuit here to avoid an infinite recursion.
if change['name'] == 'revision':
return
change = change.copy()
owner = change.pop('owner') # pop because it is not serializable
collection_name = TYPES_TO_COLLECTION_NAMES[type(owner)]
response = self._session.put(
self._make_url(collection_name, owner.uuid),
json={'change': change})
response.raise_for_status()
def _revisions(self, obj):
"""
Access all revisions to an object with the most recent first.
"""
type_ = type(obj)
collection_name = TYPES_TO_COLLECTION_NAMES[type_]
response = self._session.get(
self._make_url(collection_name, obj.uuid, 'revisions'))
response.raise_for_status()
for document in response.json()['revisions']:
yield self._document_to_obj(type_, document)
class CollectionAccessor:
"""
Accessor used on Clients
"""
def __init__(self, client, obj_type):
self._client = client
self._obj_type = obj_type
self._collection_name = TYPES_TO_COLLECTION_NAMES[self._obj_type]
def new(self, *args, **kwargs):
return self._client._new_document(self._obj_type, args, kwargs)
def find(self, filter=None):
if filter is None:
filter = {}
response = self._client._session.post(
self._client._make_url(self._collection_name),
json={'filter': filter})
response.raise_for_status()
for document in response.json()['results']:
yield self._obj_type.from_document(self._client, document)
def find_one(self, filter):
# TODO Improve the performance once pagination support is added.
try:
return next(self.find(filter))
except StopIteration:
return None
|
import unittest
import os
import json
import datetime
from processes.insert_movies import Main
from processes.postgres import Postgres
try:
DB_SERVER = os.environ['DB_SERVER']
DB_PORT = os.environ['DB_PORT']
DB_DATABASE = os.environ['DB_DATABASE']
DB_USER = os.environ['DB_USER']
DB_PASSWORD = os.environ['DB_PASSWORD']
except KeyError:
try:
from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD
except ImportError:
print("No parameters provided")
exit()
with open('test_data.json') as data_file:
data = json.load(data_file)
class TestInsertMovies(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.main = Main()
cls.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
def test_insert_movies(self):
# Insert 'old/incorrect' information into kino tables, to check that inserting into movies,
# removes all previously stored data
# movies
self.pg.pg_cur.execute("insert into movies values"
" ('tt2562232', 'invalid', 0, 'N/A', '2000-01-01', 'invalid', 'invalid')")
# movies2companies
self.pg.pg_cur.execute("insert into companies values (0, 'invalid')")
self.pg.pg_cur.execute("insert into movies2companies values"
" ('tt2562232', 0, 'invalid')")
# movies2genres
self.pg.pg_cur.execute("insert into genres values ('invalid')")
self.pg.pg_cur.execute("insert into movies2genres values"
" ('tt2562232', 'invalid')")
# movies2keywords
self.pg.pg_cur.execute("insert into movies2keywords values"
" ('tt2562232', 'invalid')")
# movies2persons
self.pg.pg_cur.execute("insert into persons (person_id, fullname) values (0, 'invalid')")
self.pg.pg_cur.execute("insert into movies2persons values"
" ('tt2562232', 0, 'invalid')")
# movies2ratings
self.pg.pg_cur.execute("insert into movies2ratings values"
" ('tt2562232', 'invalid', 0)")
# movies2streams
self.pg.pg_cur.execute("insert into movies2streams values"
" ('tt2562232', 'invalid', 'invalid', '$', 0.00, 'hd', 'rental')")
# movies2trailer
self.pg.pg_cur.execute("insert into movies2trailers values "
" ( 'tt2562232', 'invalid', 'invalid', 'invalid', 'invalid', 'in'"
" , 0, 0, 0, 0, 0, null)")
# errored
self.pg.pg_cur.execute("insert into errored values ( 'tt2562232', 'invalid')")
self.pg.pg_conn.commit()
# Run the insert
self.main.run(data)
# Check that correct information has been populated in movies
self.pg.pg_cur.execute('select imdb_id, title, runtime, rated, released, orig_language, plot from kino.movies')
result = self.pg.pg_cur.fetchall()
self.assertEqual(result, [('tt2562232', 'Birdman', 119, 'R', datetime.date(2014, 8, 27), 'English',
'A fading actor best known for his portrayal of a popular superhero attempts to '
'mount a comeback by appearing in a Broadway play. As opening night approaches, '
'his attempts to become more altruistic, rebuild his career, and reconnect with '
'friends and family prove more difficult than expected.')])
self.pg.pg_cur.execute('select language from kino.languages')
result = self.pg.pg_cur.fetchall()
self.assertEqual(result, [('English',)])
# Check that all other information for this imdb_id has been removed from the
# kino tables
self.pg.pg_cur.execute('select count(*) from kino.movies2companies')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2genres')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2numbers')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2persons')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2ratings')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2streams')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.movies2trailers')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
self.pg.pg_cur.execute('select count(*) from kino.errored')
self.assertEqual([(0,)], self.pg.pg_cur.fetchall())
@classmethod
def tearDownClass(cls):
cls.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
cls.pg.pg_cur.execute('delete from kino.languages')
cls.pg.pg_cur.execute('delete from kino.movies')
cls.pg.pg_cur.execute('delete from kino.movies2companies')
cls.pg.pg_cur.execute('delete from kino.companies')
cls.pg.pg_cur.execute('delete from kino.genres')
cls.pg.pg_cur.execute('delete from kino.movies2genres')
cls.pg.pg_cur.execute('delete from kino.movies2numbers')
cls.pg.pg_cur.execute('delete from kino.movies2persons')
cls.pg.pg_cur.execute('delete from kino.persons')
cls.pg.pg_cur.execute('delete from kino.movies2ratings')
cls.pg.pg_cur.execute('delete from kino.movies2streams')
cls.pg.pg_cur.execute('delete from kino.movies2trailers')
cls.pg.pg_cur.execute('delete from kino.errored')
cls.pg.pg_conn.commit()
if __name__ == '__main__':
unittest.main()
|
import asyncio
class OnHold:
def __init__(self, bot, gif, mention):
self.bot = bot
self.gif = gif
self.mention = mention
async def create_hold(self):
await self.gif.add_reaction("๐")
def check(react, user):
return (user == self.mention
and str(react.emoji) == '๐'
and react.message.id == self.gif.id
)
return await self._timeout_hold(check)
async def _timeout_hold(self, check):
try:
reaction, _ = await self.bot.wait_for('reaction_add', timeout=600, check=check)
except asyncio.TimeoutError:
await self.gif.remove_reaction('๐')
confirm = False
else:
confirm = True
return confirm
|
input = """
% Most of this example is due to Francesco Scarcello.
% It shows the necessity to check whether a partial interpretation, which is
% about to be totalised, still contains "not false" (or "must be true")
% atoms. If it does, this represents an inconsistency.
a :- not g, not h.
b :- not g, not h.
c :- not g.
c :- a,b.
h :- a,b.
g :- a,b.
a :- c, not b.
b :- c, not a.
d v e.
f :- d.
h v g.
e :- not a, not b,d.
c v h :- g, not f.
"""
output = """
{a, c, d, f, h}
{a, c, e, g}
{a, c, e, h}
{b, c, d, f, h}
{b, c, e, g}
{b, c, e, h}
"""
|
##ppg
import numpy as np
import torch
import torch.nn as nn
from torch import distributions as td
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from collections import deque, namedtuple
import gym
import time
import scipy.signal
from core_lstm import *
from env import *
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
#device = torch.device('cuda')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
B = namedtuple('B',['obs', 'ret', 'act', 'adv', 'logp_old','logp', 'hidden_h', 'hidden_c'])
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_shuffled_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, shuffle = True)
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class PPGBuffer:
def __init__(self, obs_dim, act_dim, hidden_dim, size, gamma=0.99, lam=0.95, beta_s=0.01):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.hidden_h_buf = np.zeros(combined_shape(size, hidden_dim), dtype=np.float32)
self.hidden_c_buf = np.zeros(combined_shape(size, hidden_dim), dtype=np.float32)
print(self.hidden_h_buf.shape)
self.gamma, self.lam, self.beta_s = gamma, lam, beta_s
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp, hidden_h, hidden_c):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.hidden_h_buf[self.ptr] = hidden_h
self.hidden_c_buf[self.ptr] = hidden_c
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf, rew=self.rew_buf, val=self.val_buf,
adv=self.adv_buf, logp=self.logp_buf, hidden_h=self.hidden_h_buf, hidden_c=self.hidden_c_buf)
#data.to(device)
return {k: torch.as_tensor(v, dtype=torch.float32).to(device) for k,v in data.items()}
def ppg(env_fn, actor=nn.Module, critic=nn.Module, ac_kwargs=dict(), seed=0, epochs_aux=6, beta_clone=1,
steps_per_epoch=4000, epochs=50, gamma=0.999, beta_s=0.01, clip_ratio=0.2, minibatch_size=16,
lr=5e-4, lr_aux=5e-4, train_pi_iters=1, train_v_iters=1, n_pi=32, lam=0.95,
max_ep_len=1000, logger_kwargs=dict(), save_freq=10, pretrain=None,):
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.n
#act_dim = env.action_space.shape #ๅไปฃ็
# Create actor-critic module
if pretrain != None:
ac_pi = torch.load(pretrain)
else:
ac_pi = actor(obs_dim[0], act_dim, hidden_sizes=[64, 64], activation=nn.Tanh, pretrain=pretrain) # env.observation_space, env.action_space, nn.ReLU)
ac_v = critic(obs_dim[0], hidden_sizes=[64, 64], activation=nn.Tanh) # env.observation_space, nn.ReLU)
#device = torch.device('cuda')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ac_pi.to(device)
ac_v.to(device)
#ac_pi = nn.DataParallel(ac_pi)
#ac_v = nn.DataParallel(ac_v)
# Sync params across processes
# sync_params(ac_pi)
# sync_params(ac_v)
# Count variables
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
var_counts = tuple(count_vars(module) for module in [ac_pi, ac_v])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# Set up experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPGBuffer(obs_dim, env.action_space.shape, 512, local_steps_per_epoch, gamma, lam, beta_s)
def compute_loss_pi(data):
obs, act, adv, logp_old , hidden_h, hidden_c = data['obs'], data['act'], data['adv'], data['logp'], data['hidden_h'], data['hidden_c']
pi, logp ,_ = ac_pi(obs, act, (hidden_h, hidden_c))
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv
loss_pi = -(torch.min(ratio * adv, clip_adv)+beta_s * pi.entropy()).mean()
return loss_pi
def compute_loss_v(data):
obs, ret ,hidden_h, hidden_c = data['obs'], data['ret'], data['hidden_h'], data['hidden_c']
loss_v = (0.5*(ac_v(obs, (hidden_h, hidden_c)) - ret)**2).mean()
return loss_v
def compute_loss_aux(obs, ret ,hidden_h, hidden_c):
loss_aux = (0.5*(ac_v(obs, (hidden_h, hidden_c)) - ret)**2).mean()
return loss_aux
def compute_loss_joint(obs, act, adv, logp_old , hidden_h, hidden_c):
pi, logp ,_ = ac_pi(obs, act, (hidden_h, hidden_c))
loss_aux = compute_loss_aux(obs, ret ,hidden_h, hidden_c)
loss_kl = td.kl_divergence(logp_old, logp).mean()
policy_loss = aux_loss + beta_clone*loss_kl
return joint_loss
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac_pi.parameters(), lr=lr)
vf_optimizer = Adam(ac_v.parameters(), lr=lr)
joint_optimizer = Adam(ac_pi.parameters(), lr=lr_aux)
aux_optimizer = Adam(ac_v.parameters(), lr=lr_aux)
# Set up model saving
logger.setup_pytorch_saver(ac_pi)
def update():
pi_l_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_l_old = compute_loss_v(data).item()
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi = compute_loss_pi(data)
loss_pi.backward()
mpi_avg_grads(ac_pi)
pi_optimizer.step()
for i in range(train_v_iters):
vf_optimizer.zero_grad()
loss_v = compute_loss_v(data)
loss_v.backward()
mpi_avg_grads(ac_v)
vf_optimizer.step()
'''
obs = to_torch_tensor(obs)
ret = to_torch_tensor(ret)
act = to_torch_tensor(act)
adv = to_torch_tensor(adv)
logp_old = to_torch_tensor(logp_old)
logp = to_torch_tensor(logp)
hidden_h = to_torch_tensor(hidden_h)
hidden_c = to_torch_tensor(hidden_c)
'''
'''
'''
logger.store(LossPi=pi_l_old, LossV=v_l_old,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV=(loss_v.item() - v_l_old))
def updateaux():
for i in range(epochs_aux):
obss = []
rets = []
acts = []
advs = []
logp_olds = []
logps = []
hidden_hs = []
hidden_cs = []
for obs, ret, act, adv, logp_old , hidden_h, hidden_c in aux_memories:
obss.append(obs)
rets.append(ret)
acts.append(act)
advs.append(adv)
logp_olds.append(logp_old)
logps.append(logp)
hidden_hs.append(hidden_h)
hidden_cs.append(hidden_c)
obss = torch.cat(obss)
rets = torch.cat(rets)
acts = torch.cat(acts)
advs = torch.cat(advs)
logp_olds = torch.cat(logp_olds)
logps = torch.cat(logps)
hidden_hs = torch.cat(hidden_hs)
hidden_cs = torch.cat(hidden_cs)
dl = create_shuffled_dataloader([obss,rets,acts,advs,logp_olds,logps,hidden_hs,hidden_cs],batch_size=minibatch_size)
for obs, ret, act, adv, logp_old , hidden_h, hidden_c in dl:
joint_optimizer.zero_grad()
loss_joint = compute_loss_joint(obs, act, adv, logp_old , hidden_h, hidden_c)
loss_joint.backward()
mpi_avg_grads(ac_pi)
joint_optimizer.step()
aux_optimizer.zero_grad()
loss_aux = compute_loss_aux(obs, ret ,hidden_h, hidden_c)
loss_aux.backward()
mpi_avg_grads(ac_v)
aux_optimizer.step()
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
aux_memories = deque([])
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
hidden = (torch.zeros((1, 512), dtype=torch.float).to(device), torch.zeros((1, 512), dtype=torch.float).to(device))
for i in range(n_pi):
for t in range(local_steps_per_epoch):
# a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
with torch.no_grad():
rr = torch.from_numpy(o.copy()).float().to(device)#.unsqueeze(0)
pi, _, hidden_ = ac_pi(rr, None, hidden)
a = pi.sample()
# logp_a = self.pi._log_prob_from_distribution(pi, a)
logp = pi.log_prob(a)#.sum(axis=-1)
v = ac_v(torch.as_tensor(o, dtype=torch.float32).to(device), hidden)
next_o, r, d, _ = env.step(a.cpu().numpy().item())
ep_ret += r
ep_len += 1
# save and log
#print(hidden[0].shape)
buf.store(o, a.cpu().numpy(), r, v.cpu().numpy(), logp.cpu().numpy(), hidden[0].cpu().numpy(), hidden[1].cpu().numpy())
logger.store(VVals=v.cpu().numpy())
# Update obs (critical!)
o = next_o
hidden = hidden_
timeout = ep_len == max_ep_len
terminal = d #or timeout
epoch_ended = t==local_steps_per_epoch-1
if terminal or epoch_ended:
if epoch_ended and not(terminal):
print('Warning: trajectory cut off by n_pi at %d steps.'%ep_len, flush=True)
# if trajectory didn't reach terminal state, bootstrap value target
if epoch_ended:
print('epoch_end')
# _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
with torch.no_grad():
v =ac_v(torch.from_numpy(o).float().to(device), hidden).cpu().numpy()
else:
print('epret :',ep_ret)
v = 0
hidden= (torch.zeros((1, 512), dtype=torch.float).to(device), torch.zeros((1, 512), dtype=torch.float).to(device))
buf.finish_path(v)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Save model
if (i % save_freq == 0) or (i == i):
logger.save_state({'env': env}, None)
data = buf.get()
obs, ret, act, adv, logp_old , hidden_h, hidden_c = data['obs'], data['ret'], data['act'], data['adv'], data['logp'], data['hidden_h'], data['hidden_c']
pi, logp ,_ = ac_pi(obs, act, (hidden_h, hidden_c))
aux_memory = B(obs, ret, act, adv, logp_old, logp, hidden_h, hidden_c)
aux_memories.append(aux_memory)
# Perform PPG update!
update()
updateaux()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='LunarLander-v2')#'HalfCheetah-v2')
parser.add_argument('--world',type=str, default='1')
parser.add_argument('--stage',type=str, default='1')
parser.add_argument('--actiontype',type=str, default='complex')
parser.add_argument('--hid', type=int, default=64)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.999)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--cpu', type=int, default=1)
parser.add_argument('--steps', type=int, default=4000)
parser.add_argument('--epochs', type=int, default=350)
#parser.add_argument('--pretrain', type=str, default='pretrain/ppg_lstm/pyt_save/model.pt')
parser.add_argument('--pretrain', type=str, default='spinningup/data/race/race_s0/pyt_save/model.pt')
parser.add_argument('--exp_name', type=str, default='race')
args = parser.parse_args()
# import os
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
mpi_fork(args.cpu) # run parallel code with mpi
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
env_fn = lambda : create_train_env(args.world, args.stage, args.actiontype)
ppg(env_fn, actor=userActor, critic=userCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma,
seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs, logger_kwargs=logger_kwargs,
minibatch_size=16, clip_ratio=0.2, lr=0.0005, lr_aux=0.0005,beta_clone=1,
train_pi_iters=1, train_v_iters=1,epochs_aux=6, beta_s=0.01, n_pi=32 ,pretrain=None)#args.pretrain)
|
import json
import pickle as pkl
import pathlib
#paths = ["/srv/local1/estengel/blocks_data_for_release/good_robot_sim/stacks/",
# "/srv/local1/estengel/blocks_data_for_release/good_robot_sim/rows/",
# "/srv/local1/estengel/blocks_data_for_release/good_robot_real/rows/1/",
# "/srv/local1/estengel/blocks_data_for_release/good_robot_real/stacks/1/",
# "/srv/local1/estengel/blocks_data_for_release/good_robot_real/stacks/2/"]
paths = ["/srv/local1/estengel/blocks_data_for_release/good_robot_real/rows/1/",
"/srv/local1/estengel/blocks_data_for_release/good_robot_real/stacks/1/",
"/srv/local1/estengel/blocks_data_for_release/good_robot_real/stacks/2/"]
for p in paths:
p = pathlib.Path(p)
print(p)
read_path = p.joinpath("with_actions.pkl")
data = pkl.load(open(read_path, 'rb'))
lines = [pair.to_jsonline() for pair in data]
out_path = p.joinpath("pairs.jsonlines")
with open(out_path, "w") as f1:
for line in lines:
f1.write(line.strip() + "\n")
print(f"wrote to {out_path}")
|
import logging
import random
import re
import tempfile
import uuid
from enum import Enum, auto
from importlib import import_module
from pathlib import Path
from typing import List, Callable, Any, Dict, Union, Optional
import attr
from configuror import Config
from fake_useragent import UserAgent, FakeUserAgentError
from .message_pack import datetime_encoder, datetime_decoder
logger = logging.getLogger('scalpel')
def check_value_greater_or_equal_than_0(_, attribute: attr.Attribute, value: int) -> None:
if value < 0:
message = f'{attribute.name} must be a positive integer'
logger.exception(message)
raise ValueError(message)
def check_max_delay_greater_or_equal_than_min_delay(instance: 'Configuration', attribute: attr.Attribute,
value: int) -> None:
if instance.min_request_delay > value:
message = f'{attribute.name} must be greater or equal than min_request_delay'
logger.exception(message)
raise ValueError(message)
def check_file_presence(_, attribute: attr.Attribute, filename: str) -> None:
path = Path(filename)
if not path.exists():
message = f'File {filename} does not exist'
logger.exception(f'attribute {attribute.name} does not have a valid path: {message}')
raise FileNotFoundError(message)
def check_driver_presence(config: 'Configuration', attribute: attr.Attribute, filename: str) -> None:
if filename in ['chromedriver', 'geckodriver']:
return
check_file_presence(config, attribute, filename)
def validate_robots_folder(_, attribute: attr.Attribute, path: Path) -> None:
if not path.exists():
message = f'{attribute.name} does not exist'
logger.exception(message)
raise FileNotFoundError(message)
dummy_file = path / 'dummy_file'
try:
dummy_file.write_text('hello')
except PermissionError:
logger.exception(f'Cannot write file in {path}')
raise
try:
dummy_file.read_text()
except PermissionError:
logger.exception(f'Cannot read file in {path}')
raise
dummy_file.unlink()
def check_file_can_be_created(_, _attribute: attr.Attribute, value: str) -> None:
if value is not None:
p = Path(value)
# touch helps to see if a file can be created with the given path
p.touch()
# we don't want to have a created file if other attributes validation failed
p.unlink()
# I could just use return type "Any" but I want to insist on the fact that the function must
# first return a boolean and in the other cases, the value given at input
def bool_converter(value: Any) -> Union[bool, Any]:
if not isinstance(value, str):
logger.debug('%s is not a string, returned it as it is', value)
return value
if value.lower() in ['1', 'true', 'yes', 'y']:
logger.debug('converts %s to True', value)
return True
elif value.lower() in ['0', 'false', 'no', 'n']:
logger.debug('converts %s to False', value)
return False
else:
message = f'{value} does not represent a boolean'
logger.exception(message)
raise ValueError(message)
def get_callable_from_string(callable_string: str) -> Callable:
parts = callable_string.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
return getattr(module, parts[-1])
# The same logic as the bool converter applies to the type of return
def callable_list_converter(value: Any) -> Union[List[Callable], Any]:
if isinstance(value, list):
if not all(isinstance(item, str) for item in value):
logger.debug('not all items in the list are a string, returned it as it: %s', value)
return value
str_callable_list = value
elif isinstance(value, str):
str_callable_list = re.split(r',\s*|;\s*|:\s*|\s+', value)
else:
logger.debug('%s is not a string or a list of strings, returned it as it is', value)
return value
callables = []
for str_callable in str_callable_list:
callables.append(get_callable_from_string(str_callable))
logger.debug('returning callables: %s', callables)
return callables
def msgpack_converter(value: Any) -> Union[Callable, Any]:
if not isinstance(value, str):
logger.debug(f'{value} is not a string, returning it as it')
return value
return get_callable_from_string(value)
def str_converter(value: Any) -> Optional[str]:
if value is None:
return
if isinstance(value, Path):
return str(value.absolute())
return str(value)
class Browser(Enum):
"""An enum with different browser values."""
FIREFOX = auto()
CHROME = auto()
def browser_converter(value: Any) -> Any:
if isinstance(value, str):
upper_value = value.upper()
if upper_value == Browser.FIREFOX.name:
return Browser.FIREFOX
if upper_value == Browser.CHROME.name:
return Browser.CHROME
return value
positive_int_validators = [attr.validators.instance_of(int), check_value_greater_or_equal_than_0]
max_delay_validators = [*positive_int_validators, check_max_delay_greater_or_equal_than_min_delay]
positive_float_validators = [attr.validators.instance_of(float), check_value_greater_or_equal_than_0]
middleware_validator = attr.validators.deep_iterable(
member_validator=attr.validators.is_callable(),
iterable_validator=attr.validators.instance_of((list, tuple))
)
backup_filename_validators = [attr.validators.instance_of(str), check_file_can_be_created]
selenium_path_validators = [attr.validators.optional(attr.validators.instance_of(str)), check_file_can_be_created]
@attr.s(frozen=True)
class Configuration:
"""
Configure variables for your spider.
**Parameters:**
* **min_request_delay:** The minimum delay to wait between two http requests. Defaults to 0s.
* **max_request_delay:** The maximum delay to wait between two http requests. Defaults to 0s.
* **fetch_timeout:** The timeout to fetch http resources using the inner
[httpx](https://www.python-httpx.org/) client. Defaults to 5s.
* **selenium_find_timeout:** The timeout for selenium driver to find an element in a page. Defaults to 10s.
* **selenium_driver_log_file:** The file where the browser log debug messages. Defaults to *driver.log*.
If you want to not create one, just pass `None`.
* **selenium_browser:** The browser to use with the selenium spider. You can use the `Browser` enum to specify the
value. Possible values are `Browser.FIREFOX` and `Browser.CHROME`. Defaults to `Browser.FIREFOX`.
* **selenium_driver_executable_path:** The path to the browser driver. Defaults to *geckodriver* if
`Browser.FIREFOX` is selected as *selenium_browser*, otherwise defaults to *chromedriver*.
* **user_agent:** The user agent to fake. Mainly useful for the static spider. Defaults to a random value provided
by [fake-useragent](https://pypi.org/project/fake-useragent/) and if it does not work, fallback to
*Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36*
* **follow_robots_txt:** Decide whether or not the spider should follow robots.txt rules on the website you are
scraping. Defaults to `False`.
* **robots_cache_folder:** A folder to cache content of different website robots.txt file to avoid retrieving
it each time you want to analyze an html page. Default to the system temporary directory.
* **backup_filename:** The filename were scraped items will be written. If you don't want one, simple pass `None`.
Defaults to *backup-{uuid}.mp* where uuid is a `uuid.uuid4` string value. Note that values inserted in this file
are streamed using `msgpack`. Look at the documentation to see how to use it.
* **response_middlewares:** A list of callables that will be called with the callable that fetch the http resource.
This parameter is only useful for the **static spider**. Defaults to an empty list.
* **item_processors:** A list of callables that will be called with a scraped item. Defaults to an empty list.
* **msgpack_encoder:** A callable that will be called when `msgpack` serializes an item.
Defaults to `scalpel.datetime_encoder`.
* **msgpack_decoder:** A callable that will be called when `msgpack` deserializes an item.
Defaults to `scalpel.datetime_decoder`.
Usage:
```
from scalpel import Configuration, Browser
config = Configuration(
min_request_delay=1, max_request_delay=3, follow_robots_txt=True, selenium_browser=Browser.CHROME
)
```
"""
min_request_delay: int = attr.ib(default=0, converter=int, validator=positive_int_validators)
max_request_delay: int = attr.ib(default=0, converter=int, validator=max_delay_validators)
fetch_timeout: float = attr.ib(default=5.0, converter=float, validator=positive_float_validators)
selenium_find_timeout: float = attr.ib(default=10.0, converter=float, validator=positive_float_validators)
selenium_driver_log_file: Optional[str] = attr.ib(
converter=str_converter, default='driver.log', validator=selenium_path_validators
)
selenium_browser: Browser = attr.ib(
default=Browser.FIREFOX, converter=browser_converter, validator=attr.validators.in_(Browser)
)
# default value of this attribute depends if selenium browser, so the order is important here
selenium_driver_executable_path: str = attr.ib(
converter=str_converter, validator=[attr.validators.instance_of(str), check_driver_presence]
)
user_agent: str = attr.ib(validator=attr.validators.instance_of(str))
follow_robots_txt: bool = attr.ib(
default=False, converter=bool_converter, validator=attr.validators.instance_of(bool)
)
robots_cache_folder: Path = attr.ib(converter=Path, validator=validate_robots_folder)
backup_filename: str = attr.ib(validator=backup_filename_validators)
response_middlewares: List[Callable] = attr.ib(
repr=False, converter=callable_list_converter, factory=list, validator=middleware_validator
)
item_processors: List[Callable] = attr.ib(
repr=False, converter=callable_list_converter, factory=list, validator=middleware_validator
)
msgpack_encoder: Callable = attr.ib(
repr=False, converter=msgpack_converter, default=datetime_encoder, validator=attr.validators.is_callable()
)
msgpack_decoder: Callable = attr.ib(
repr=False, converter=msgpack_converter, default=datetime_decoder, validator=attr.validators.is_callable()
)
@user_agent.default
def _get_default_user_agent(self) -> str:
try:
ua = UserAgent()
user_agent = ua.random
logger.debug('returning a random user agent: %s', user_agent)
return user_agent
except FakeUserAgentError:
# for the fallback, I use a recent version found on http://useragentstring.com/
# not sure if this is the best strategy but we will stick with it for now
fallback = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/41.0.2225.0 Safari/537.36'
logger.debug('returning fallback value for user agent: %s', fallback)
return fallback
@robots_cache_folder.default
def _get_robots_cache_folder(self) -> Path:
temp_dir = Path(tempfile.mkdtemp(prefix='robots_'))
logger.debug('returning default created temporary directory: %s', temp_dir)
return temp_dir
@backup_filename.default
def _get_backup_filename(self) -> str:
name = f'backup-{uuid.uuid4()}.mp'
logger.debug('returning computed backup filename: %s', name)
return name
@selenium_driver_executable_path.default
def _get_driver_executable_path(self) -> str:
executable = 'geckodriver' if self.selenium_browser is Browser.FIREFOX else 'chromedriver'
# I don't use self.selenium_browser.name attribute because some tests fail here when testing browser attribute
# with a string
logger.debug(
'returning default executable path to %s since browser selected is %s', executable,
self.selenium_browser
)
return executable
@property
def request_delay(self) -> int:
"""
A read-only property which is a random value between `min_request_delay` and `max_request_delay`
(both sides included) and used to wait between two http requests.
"""
# for bandit, using random module to generate pseudo-random values is not a good
# idea for cryptography / security purposes, but since we are not in this case, we just
# ignore this warning.
# More about the warning: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
delay = random.randint(self.min_request_delay, self.max_request_delay) # nosec
logger.debug('returning computed request delay: %s s', delay)
return delay
@staticmethod
def _get_dict_with_lower_keys(data: Dict[str, Any]) -> Dict[str, Any]:
data = {key.lower(): value for key, value in data.items()}
logger.debug('returning dict with lower keys: %s', data)
return data
@classmethod
def _scalpel_attributes(cls, data: Dict[str, Any]) -> Dict[str, Any]:
data_key = 'scalpel'
attributes = {}
if data_key not in data:
logger.debug('no namespace "scalpel" in %s, returning empty attributes', data)
return attributes
data = cls._get_dict_with_lower_keys(data[data_key])
for attribute in attr.fields(cls):
if attribute.name != '_config' and attribute.name in data:
attributes[attribute.name] = data[attribute.name]
logger.debug('returning scalpel attributes: %s', attributes)
return attributes
@staticmethod
def _check_file(config_file: Union[Path, str], file_type: str) -> None:
if not isinstance(config_file, (Path, str)):
error_message = f'{file_type} file must be of type Path or str but you provided {type(config_file)}'
logger.exception(error_message)
raise TypeError(error_message)
config_file = Path(config_file)
if not config_file.is_file():
error_message = f'file {config_file} does not exist'
logger.exception(error_message)
raise FileNotFoundError(error_message)
@classmethod
def load_from_yaml(cls, yaml_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a yaml file.
**Returns:** `Configuration`
Usage:
```yaml
# conf.yaml
scalpel:
fetch_timeout: 4.0
user_agent: Mozilla/5.0
follow_robots_txt: true
```
```
from scalpel import Configuration
conf = Configuration.load_from_yaml('conf.yaml')
conf.fetch_timeout # equals to 4.0
```
"""
cls._check_file(yaml_file, 'yaml')
configuror = Config(mapping_files={'yaml': [f'{yaml_file}']})
logger.debug('loading configuration from yaml file: %s', f'{yaml_file}')
return cls(**cls._scalpel_attributes(configuror))
@classmethod
def load_from_toml(cls, toml_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a toml file.
**Returns:** `Configuration`
Usage:
```toml
# conf.toml
[scalpel]
user_agent = "Mozilla/5.0"
fetch_timeout = 4.0
follow_robots_txt = true
```
```
from scalpel import Configuration
conf = Configuration.load_from_toml('conf.toml')
conf.fetch_timeout # equals to 4.0
```
"""
cls._check_file(toml_file, 'toml')
configuror = Config(mapping_files={'toml': [f'{toml_file}']})
logger.debug('loading configuration from toml file: %s', f'{toml_file}')
return cls(**cls._scalpel_attributes(configuror))
@classmethod
def load_from_dotenv(cls, env_file: Union[Path, str]) -> 'Configuration':
"""
Loads configuration from a .env file.
**Returns:** `Configuration`
Usage:
```bash
# .env
SCALPEL_USER_AGENT = Mozilla/5.0
SCALPEL_FETCH_TIMEOUT = 4.0
SCALPEL_FOLLOW_ROBOTS_TXT = yes
```
```
from scalpel import Configuration
conf = Configuration.load_from_dotenv('.env')
conf.follow_robots_txt # equals to True
```
"""
cls._check_file(env_file, 'env')
configuror = Config(mapping_files={'env': [f'{env_file}']})
data = configuror.get_dict_from_namespace('SCALPEL_')
data = {'scalpel': data} # little trick to search attributes using _scalpel_attributes class method
logger.debug('loading configuration from .env file: %s', f'{env_file}')
return cls(**cls._scalpel_attributes(data))
|
import urllib
tickers = ['YHOO', 'AVP', 'BIIB', 'BP', 'CL', 'CVX',
'DNA', 'EXPE', 'GOOG', 'PG', 'XOM', 'AMGN']
shortest = 300
prices = {}
dates = None
for t in tickers:
url = 'http://ichart.finance.yahoo.com/table.csv?' + \
's=%s&d=11&e=26&f=2006&g=d&a=3&b=12&c=1996' % t + \
'&ignore=.csv'
print(url)
rows = urllib.request.urlopen(url).readlines()
prices[t] = [float(r.split(',')[5]) for r in rows[1:] if r.strip() != '']
if len(prices[t]) < shortest: shortest = len(prices[t])
if not dates:
dates = [r.split(',')[0] for r in rows[1:] if r.strip() != '']
pass
|
import gym
import robo_gym
import math
import numpy as np
import pytest
ur_models = [pytest.param('ur3', marks=pytest.mark.nightly), \
pytest.param('ur3e', marks=pytest.mark.nightly), \
pytest.param('ur5', marks=pytest.mark.commit), \
pytest.param('ur5e', marks=pytest.mark.nightly), \
pytest.param('ur10', marks=pytest.mark.nightly), \
pytest.param('ur10e', marks=pytest.mark.nightly), \
pytest.param('ur16e', marks=pytest.mark.nightly), \
]
@pytest.fixture(scope='module', params=ur_models)
def env(request):
env = gym.make('EndEffectorPositioningURSim-v0', ip='robot-servers', ur_model=request.param)
env.request_param = request.param
yield env
env.kill_sim()
@pytest.mark.commit
def test_initialization(env):
assert env.ur.model == env.request_param
env.reset()
done = False
env.step([0,0,0,0,0])
for _ in range(10):
if not done:
action = env.action_space.sample()
observation, _, done, _ = env.step(action)
assert env.observation_space.contains(observation)
@pytest.mark.nightly
@pytest.mark.flaky(reruns=3)
def test_self_collision(env):
collision_joint_config = {'ur3': [0.0, 0.0, -3.14, -1.77, 1.0], \
'ur3e': [0.0, -1.88, 2.8, -0.75, -1.88], \
'ur5': [0.0, -1.26, -3.14, 0.0, 0.0], \
'ur5e': [0.0, -0.50, -3.14, 3.14, 0.0], \
'ur10': [0.0, -1.5, 3.14, 0.0, 0.0], \
'ur10e': [0.0, -0.15, -2.83, -2.51, 1.63], \
'ur16e': [0.0, -1.15, 2.9, -0.19, 0.42]}
env.reset()
action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'collision'
@pytest.mark.nightly
@pytest.mark.flaky(reruns=3)
def test_collision_with_ground(env):
collision_joint_config = {'ur3': [0.0, 2.64, -1.95, -2.98, 0.41], \
'ur3e': [1.13, 1.88, -2.19, -3.43, 2.43], \
'ur5': [0.0, 1.0, 1.8, 0.0, 0.0], \
'ur5e': [0.0, 3.52, -2.58, 0.0, 0.0], \
'ur10': [0.0, 1.0, 1.15, 0.0, 0.0], \
'ur10e': [-2.14, -0.13, 0.63, -1.13, 1.63], \
'ur16e': [0.0, -0.15, 1.32, 0.0, 1.63]}
env.reset()
action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'collision'
@pytest.mark.nightly
def test_reset_joint_positions(env):
joint_positions = [0.2, -2.5, 1.1, -2.0, -1.2, 1.2]
state = env.reset(joint_positions=joint_positions)
assert np.isclose(env.ur.normalize_joint_values(joint_positions), state[3:9], atol=0.1).all()
@pytest.mark.commit
def test_object_coordinates(env):
params = {
#? robot up-right, target_coord_in_ee_frame 0.0, -0.3, 0.2, coordinates of target calculated using official dimensions from DH parameters.
#? first value is d4+d6
#? second value is: d1+a2+a3+d5
'ur3': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.194 +0.2), (0.692 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur3e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.223 +0.2), (0.694 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur5': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.191 +0.2), (1.001 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur5e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.233 +0.2), (1.079 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur10': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.256 +0.2), (1.428 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur10e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.485 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur16e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.139 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}
}
state = env.reset(joint_positions=params[env.ur.model]['joint_positions'], ee_target_pose=params[env.ur.model]['object_coords'])
assert np.isclose([params[env.ur.model]['polar_coords']['r']], state[0], atol=0.05).all()
assert np.isclose([params[env.ur.model]['polar_coords']['theta'], params[env.ur.model]['polar_coords']['phi']], state[1:3], atol=0.2).all()
test_ur_fixed_joints = [
('EndEffectorPositioningURSim-v0', True, False, False, False, False, False, 'ur3'), # fixed shoulder_pan
('EndEffectorPositioningURSim-v0', False, True, False, False, False, False, 'ur3e'), # fixed shoulder_lift
('EndEffectorPositioningURSim-v0', False, False, False, False, False, True, 'ur5'), # fixed wrist_3
('EndEffectorPositioningURSim-v0', True, False, True, False, False, False, 'ur5e'), # fixed Base and Elbow
('EndEffectorPositioningURSim-v0', False, False, True, False, False, False, 'ur10'), # fixed elbow
('EndEffectorPositioningURSim-v0', False, False, False, True, False, False, 'ur10e'), # fixed wrist_1
('EndEffectorPositioningURSim-v0', False, False, False, False, True, False, 'ur16e'), # fixed wrist_2
]
@pytest.mark.nightly
@pytest.mark.parametrize('env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model', test_ur_fixed_joints)
@pytest.mark.flaky(reruns=3)
def test_fixed_joints(env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model):
env = gym.make(env_name, ip='robot-servers', fix_base=fix_base, fix_shoulder=fix_shoulder, fix_elbow=fix_elbow,
fix_wrist_1=fix_wrist_1, fix_wrist_2=fix_wrist_2, fix_wrist_3=fix_wrist_3, ur_model=ur_model)
state = env.reset()
initial_joint_positions = state[3:9]
# Take 20 actions
action = env.action_space.sample()
for _ in range(20):
state, _, _, _ = env.step(action)
joint_positions = state[3:9]
if fix_base:
assert math.isclose(initial_joint_positions[0], joint_positions[0], abs_tol=0.05)
if fix_shoulder:
assert math.isclose(initial_joint_positions[1], joint_positions[1], abs_tol=0.05)
if fix_elbow:
assert math.isclose(initial_joint_positions[2], joint_positions[2], abs_tol=0.05)
if fix_wrist_1:
assert math.isclose(initial_joint_positions[3], joint_positions[3], abs_tol=0.05)
if fix_wrist_2:
assert math.isclose(initial_joint_positions[4], joint_positions[4], abs_tol=0.05)
if fix_wrist_3:
assert math.isclose(initial_joint_positions[5], joint_positions[5], abs_tol=0.05)
env.kill_sim()
@pytest.mark.commit
def test_success(env):
params = {
'ur3': {'object_coords':[0.0, 0.194, 0.692, 0.0, 0.0, 0.0]},
'ur3e': {'object_coords':[0.0, 0.223, 0.694, 0.0, 0.0, 0.0]},
'ur5': {'object_coords':[0.0, 0.191, 1.001, 0.0, 0.0, 0.0]},
'ur5e': {'object_coords':[0.0, 0.233, 1.079, 0.0, 0.0, 0.0]},
'ur10': {'object_coords':[0.0, 0.256, 1.428, 0.0, 0.0, 0.0]},
'ur10e': {'object_coords':[0.0, 0.291, 1.485, 0.0, 0.0, 0.0]},
'ur16e': {'object_coords':[0.0, 0.291, 1.139, 0.0, 0.0, 0.0]}
}
env.reset(joint_positions=[0.0, -1.3, 0.0, -1.3, 0.0, 0.0], ee_target_pose=params[env.ur.model]['object_coords'])
action = env.ur.normalize_joint_values([0.0, -1.57, 0.0, -1.57, 0.0])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'success'
@pytest.mark.commit
def test_continue_on_success(env):
params = {
'ur3': {'object_coords':[0.0, 0.194, 0.692, 0.0, 0.0, 0.0]},
'ur3e': {'object_coords':[0.0, 0.223, 0.694, 0.0, 0.0, 0.0]},
'ur5': {'object_coords':[0.0, 0.191, 1.001, 0.0, 0.0, 0.0]},
'ur5e': {'object_coords':[0.0, 0.233, 1.079, 0.0, 0.0, 0.0]},
'ur10': {'object_coords':[0.0, 0.256, 1.428, 0.0, 0.0, 0.0]},
'ur10e': {'object_coords':[0.0, 0.291, 1.485, 0.0, 0.0, 0.0]},
'ur16e': {'object_coords':[0.0, 0.291, 1.139, 0.0, 0.0, 0.0]}
}
env.reset(joint_positions=[0.0, -1.3, 0.0, -1.3, 0.0, 0.0], ee_target_pose=params[env.ur.model]['object_coords'])
action = env.ur.normalize_joint_values([0.0, -1.57, 0.0, -1.57, 0.0])
done = False
while not done:
state, _, done, info = env.step(action)
assert info['final_status'] == 'success'
joint_positions = state[3:9]
state = env.reset(continue_on_success=True)
assert np.isclose(joint_positions, state[3:9], atol=0.05).all()
|
import sys
import os
import copy
from yaku.context \
import \
get_bld, get_cfg
from yaku.scheduler \
import \
run_tasks
def configure(ctx):
ctx.use_tools(["ctasks"])
def build(ctx):
# To *override* options, you should clone a builder + replacing
# options there
builder = ctx.builders["ctasks"].clone()
builder.env["CFLAGS"] = ["-g", "-DNDEBUG"]
builder.program("main", sources=["src/main.c"], env={"CFLAGS": ["-O2"]})
builder.program("main2", sources=["src/main2.c"])
# env argument to methods *add* option - note that main3.c is not
# built with -g nor -DNDEBUG
program = ctx.builders["ctasks"].program
program("main2", sources=["src/main3.c"], env={"CFLAGS": ["-Os"]})
if __name__ == "__main__":
ctx = get_cfg()
configure(ctx)
ctx.store()
ctx = get_bld()
build(ctx)
run_tasks(ctx)
ctx.store()
|
import ConfigSpace as CS
import multiprocessing as mp
from .bayesopt.autogluon.searcher_factory import gp_fifo_searcher_factory, gp_multifidelity_searcher_factory, gp_fifo_searcher_defaults, gp_multifidelity_searcher_defaults
from .searcher import BaseSearcher
from ..utils.default_arguments import check_and_merge_defaults
__all__ = ['GPFIFOSearcher',
'GPMultiFidelitySearcher']
def _to_config_cs(config_space: CS.ConfigurationSpace, config: dict) \
-> CS.Configuration:
return CS.Configuration(config_space, values=config)
class GPFIFOSearcher(BaseSearcher):
"""Gaussian process Bayesian optimization for FIFO scheduler
This searcher must be used with `FIFOScheduler`. It provides Bayesian
optimization, based on a Gaussian process surrogate model. It is created
along with the scheduler, using `searcher='bayesopt'`:
Pending configurations (for which evaluation tasks are currently running)
are dealt with by fantasizing (i.e., target values are drawn from the
current posterior, and acquisition functions are averaged over this
sample, see `num_fantasy_samples`).
The GP surrogate model uses a Matern 5/2 covariance function with automatic
relevance determination (ARD) of input attributes, and a constant mean
function. The acquisition function is expected improvement (EI). All
hyperparameters of the surrogate model are estimated by empirical Bayes
(maximizing the marginal likelihood). In general, this hyperparameter
fitting is the most expensive part of a `get_config` call.
The following happens in `get_config`. For the first `num_init_random` calls,
a config is drawn at random (the very first call results in the default
config of the space). Afterwards, Bayesian optimization is used, unless
there are no finished evaluations yet.
First, model hyperparameter are refit. This step can be skipped (see
`opt_skip*` parameters). Next, `num_init_candidates` configs are sampled at
random, and ranked by a scoring function (`initial_scoring`). BFGS local
optimization is then run starting from the top scoring config, where EI
is minimized.
Parameters
----------
configspace : ConfigSpace.ConfigurationSpace
Config space of `train_fn`, equal to `train_fn.cs`
reward_attribute : str
Name of reward attribute reported by `train_fn`, equal to `reward_attr`
of `scheduler
debug_log : bool (default: False)
If True, both searcher and scheduler output an informative log, from
which the configs chosen and decisions being made can be traced.
first_is_default : bool (default: True)
If True, the first config to be evaluated is the default one of the
config space. Otherwise, this first config is drawn at random.
random_seed : int
Seed for pseudo-random number generator used.
num_init_random : int
Number of initial `get_config` calls for which randomly sampled configs
are returned. Afterwards, Bayesian optimization is used
num_init_candidates : int
Number of initial candidates sampled at random in order to seed the
search for `get_config`
num_fantasy_samples : int
Number of samples drawn for fantasizing (latent target values for
pending candidates)
initial_scoring : str
Scoring function to rank initial candidates (local optimization of EI
is started from top scorer). Values are 'thompson_indep' (independent
Thompson sampling; randomized score, which can increase exploration),
'acq_func' (score is the same (EI) acquisition function which is afterwards
locally optimized).
opt_nstarts : int
Parameter for hyperparameter fitting. Number of random restarts
opt_maxiter : int
Parameter for hyperparameter fitting. Maximum number of iterations
per restart
opt_warmstart : bool
Parameter for hyperparameter fitting. If True, each fitting is started
from the previous optimum. Not recommended in general
opt_verbose : bool
Parameter for hyperparameter fitting. If True, lots of output
opt_skip_init_length : int
Parameter for hyperparameter fitting, skip predicate. Fitting is never
skipped as long as number of observations below this threshold
opt_skip_period : int
Parameter for hyperparameter fitting, skip predicate. If >1, and number
of observations above `opt_skip_init_length`, fitting is done only
K-th call, and skipped otherwise
map_reward : str or MapReward (default: '1_minus_x')
AutoGluon is maximizing reward, while internally, Bayesian optimization
is minimizing the criterion. States how reward is mapped to criterion.
This must a strictly decreasing function. Values are '1_minus_x'
(criterion = 1 - reward), 'minus_x' (criterion = -reward).
From a technical standpoint, it does not matter what is chosen here,
because criterion is only used internally. Also note that criterion
data is always normalized to mean 0, variance 1 before fitted with a
GP.
Examples
--------
>>> import autogluon as ag
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True))
>>> def train_fn(args, reporter):
... reporter(accuracy = args.lr ** 2)
>>> searcher_options = {
... 'map_reward': 'minus_x',
... 'opt_skip_period': 2}
>>> scheduler = ag.scheduler.FIFOScheduler(
... train_fn, searcher='bayesopt', searcher_options=searcher_options,
... num_trials=10, reward_attr='accuracy')
"""
def __init__(self, **kwargs):
_gp_searcher = kwargs.get('_gp_searcher')
if _gp_searcher is None:
_kwargs = check_and_merge_defaults(
kwargs, *gp_fifo_searcher_defaults(),
dict_name='search_options')
_gp_searcher = gp_fifo_searcher_factory(**_kwargs)
super().__init__(
_gp_searcher.hp_ranges.config_space,
reward_attribute=kwargs.get('reward_attribute'))
self.gp_searcher = _gp_searcher
# This lock protects gp_searcher. We are not using self.LOCK, this
# can lead to deadlocks when superclass methods are called
self._gp_lock = mp.Lock()
def configure_scheduler(self, scheduler):
from ..scheduler.fifo import FIFOScheduler
assert isinstance(scheduler, FIFOScheduler), \
"This searcher requires FIFOScheduler scheduler"
super().configure_scheduler(scheduler)
def get_config(self, **kwargs):
with self._gp_lock:
config_cs = self.gp_searcher.get_config()
return config_cs.get_dictionary()
def update(self, config, **kwargs):
super().update(config, **kwargs)
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.update(
config_cs, reward=kwargs[self._reward_attribute])
def register_pending(self, config, milestone=None):
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.register_pending(config_cs)
def evaluation_failed(self, config, **kwargs):
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.evaluation_failed(config_cs)
def dataset_size(self):
with self._gp_lock:
return self.gp_searcher.dataset_size()
def cumulative_profile_record(self):
with self._gp_lock:
return self.gp_searcher.cumulative_profile_record()
def model_parameters(self):
with self._gp_lock:
return self.gp_searcher.get_params()
def get_state(self):
with self._gp_lock:
return self.gp_searcher.get_state()
def clone_from_state(self, state):
with self._gp_lock:
_gp_searcher = self.gp_searcher.clone_from_state(state)
# Use copy constructor
return GPFIFOSearcher(
reward_attribute=self._reward_attribute,
_gp_searcher=_gp_searcher)
@property
def debug_log(self):
with self._gp_lock:
return self.gp_searcher.debug_log
def _to_config_cs(self, config):
return _to_config_cs(self.gp_searcher.hp_ranges.config_space, config)
class GPMultiFidelitySearcher(BaseSearcher):
"""Gaussian process Bayesian optimization for Hyperband scheduler
This searcher must be used with `HyperbandScheduler`. It provides a novel
combination of Bayesian optimization, based on a Gaussian process surrogate
model, with Hyperband scheduling. In particular, observations across
resource levels are modelled jointly. It is created along with the
scheduler, using `searcher='bayesopt'`:
Most of `GPFIFOSearcher` comments apply here as well.
In multi-fidelity HPO, we optimize a function f(x, r), x the configuration,
r the resource (or time) attribute. The latter must be a positive integer.
In most applications, `time_attr` == 'epoch', and the resource is the number
of epochs already trained.
We model the function f(x, r) jointly over all resource levels r at which
it is observed (but see `searcher_data` in `HyperbandScheduler`). The kernel
and mean function of our surrogate model are over (x, r). The surrogate
model is selected by `gp_resource_kernel`. More details about the supported
kernels is in:
Tiao, Klein, Archambeau, Seeger (2020)
Model-based Asynchronous Hyperparameter Optimization
https://arxiv.org/abs/2003.10865
The acquisition function (EI) which is optimized in `get_config`, is obtained
by fixing the resource level r to a value which is determined depending on
the current state. If `resource_acq` == 'bohb', r is the largest value
<= max_t, where we have seen >= dimension(x) metric values. If
`resource_acq` == 'first', r is the first milestone which config x would
reach when started.
Parameters
----------
configspace : ConfigSpace.ConfigurationSpace
Config space of `train_fn`, equal to `train_fn.cs`
reward_attribute : str
Name of reward attribute reported by `train_fn`, equal to `reward_attr`
of scheduler
resource_attribute : str
Name of resource (or time) attribute reported by `train_fn`, equal to
`time_attr` of scheduler
debug_log : bool (default: False)
If True, both searcher and scheduler output an informative log, from
which the configs chosen and decisions being made can be traced.
first_is_default : bool (default: True)
If True, the first config to be evaluated is the default one of the
config space. Otherwise, this first config is drawn at random.
random_seed : int
Seed for pseudo-random number generator used.
num_init_random : int
See `GPFIFOSearcher`
num_init_candidates : int
See `GPFIFOSearcher`
num_fantasy_samples : int
See `GPFIFOSearcher`
initial_scoring : str
See `GPFIFOSearcher`
opt_nstarts : int
See `GPFIFOSearcher`
opt_maxiter : int
See `GPFIFOSearcher`
opt_warmstart : bool
See `GPFIFOSearcher`
opt_verbose : bool
See `GPFIFOSearcher`
opt_skip_init_length : int
See `GPFIFOSearcher`
opt_skip_period : int
See `GPFIFOSearcher`
map_reward : str or MapReward (default: '1_minus_x')
See `GPFIFOSearcher`
gp_resource_kernel : str
Surrogate model over criterion function f(x, r), x the config, r the
resource. Note that x is encoded to be a vector with entries in [0, 1],
and r is linearly mapped to [0, 1], while the criterion data is
normalized to mean 0, variance 1. The reference above provides details
on the models supported here. For the exponential decay kernel, the
base kernel over x is Matern 5/2 ARD.
Values are 'matern52' (Matern 5/2 ARD kernel over [x, r]),
'matern52-res-warp' (Matern 5/2 ARD kernel over [x, r], with additional
warping on r),
'exp-decay-sum' (exponential decay kernel, with delta=0. This is the
additive kernel from Freeze-Thaw Bayesian Optimization),
'exp-decay-delta1' (exponential decay kernel, with delta=1),
'exp-decay-combined' (exponential decay kernel, with delta in [0, 1]
a hyperparameter).
resource_acq : str
Determines how the EI acquisition function is used (see above).
Values: 'bohb', 'first'
opt_skip_num_max_resource : bool
Parameter for hyperparameter fitting, skip predicate. If True, and
number of observations above `opt_skip_init_length`, fitting is done
only when there is a new datapoint at r = max_t, and skipped otherwise.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>>
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(9):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e+1, accuracy=dummy_accuracy, lr=args.lr,
... wd=args.wd)
>>> searcher_options = {
... 'gp_resource_kernel': 'matern52-res-warp',
... 'opt_skip_num_max_resource': True}
>>> scheduler = ag.scheduler.HyperbandScheduler(
... train_fn, searcher='bayesopt', searcher_options=searcher_options,
... num_trials=10, reward_attr='accuracy', time_attr='epoch',
... max_t=10, grace_period=1, reduction_factor=3)
See Also
--------
GPFIFOSearcher
"""
def __init__(self, **kwargs):
_gp_searcher = kwargs.get('_gp_searcher')
if _gp_searcher is None:
_kwargs = check_and_merge_defaults(
kwargs, *gp_multifidelity_searcher_defaults(),
dict_name='search_options')
_gp_searcher = gp_multifidelity_searcher_factory(**_kwargs)
super().__init__(
_gp_searcher.hp_ranges.config_space,
reward_attribute=kwargs.get('reward_attribute'))
self.gp_searcher = _gp_searcher
self._resource_attribute = kwargs.get('resource_attribute')
# This lock protects gp_searcher. We are not using self.LOCK, this
# can lead to deadlocks when superclass methods are called
self._gp_lock = mp.Lock()
def configure_scheduler(self, scheduler):
from ..scheduler.hyperband import HyperbandScheduler
assert isinstance(scheduler, HyperbandScheduler), \
"This searcher requires HyperbandScheduler scheduler"
super().configure_scheduler(scheduler)
with self._gp_lock:
self.gp_searcher.set_map_resource_to_index(
scheduler.map_resource_to_index())
self._resource_attribute = scheduler._time_attr
def get_config(self, **kwargs):
with self._gp_lock:
config_cs = self.gp_searcher.get_config(**kwargs)
return config_cs.get_dictionary()
def update(self, config, **kwargs):
super().update(config, **kwargs)
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.update(
config_cs, reward=kwargs[self._reward_attribute],
resource=int(kwargs[self._resource_attribute]))
# If evaluation task has terminated, cleanup pending evaluations
# which may have been overlooked
if kwargs.get('terminated', False):
self.gp_searcher.cleanup_pending(config_cs)
def register_pending(self, config, milestone=None):
assert milestone is not None, \
"This searcher works with a multi-fidelity scheduler only"
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.register_pending(config_cs, milestone)
def remove_case(self, config, **kwargs):
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.remove_case(
config_cs, resource=int(kwargs[self._resource_attribute]))
def evaluation_failed(self, config, **kwargs):
with self._gp_lock:
config_cs = self._to_config_cs(config)
self.gp_searcher.evaluation_failed(config_cs)
def dataset_size(self):
with self._gp_lock:
return self.gp_searcher.dataset_size()
def cumulative_profile_record(self):
with self._gp_lock:
return self.gp_searcher.cumulative_profile_record()
def model_parameters(self):
with self._gp_lock:
return self.gp_searcher.get_params()
def get_state(self):
with self._gp_lock:
return self.gp_searcher.get_state()
def clone_from_state(self, state):
with self._gp_lock:
_gp_searcher = self.gp_searcher.clone_from_state(state)
# Use copy constructor
return GPMultiFidelitySearcher(
reward_attribute=self._reward_attribute,
resource_attribute=self._resource_attribute,
_gp_searcher=_gp_searcher)
@property
def debug_log(self):
with self._gp_lock:
return self.gp_searcher.debug_log
def _to_config_cs(self, config):
return _to_config_cs(self.gp_searcher.hp_ranges.config_space, config)
|
# ็ผๅไธไธช้ซๆ็็ฎๆณๆฅๅคๆญย m x nย ็ฉ้ตไธญ๏ผๆฏๅฆๅญๅจไธไธช็ฎๆ ๅผใ่ฏฅ็ฉ้ตๅ
ทๆๅฆไธ็นๆง๏ผ
# ๆฏ่กไธญ็ๆดๆฐไปๅทฆๅฐๅณๆๅๅบๆๅใ
# ๆฏ่ก็็ฌฌไธไธชๆดๆฐๅคงไบๅไธ่ก็ๆๅไธไธชๆดๆฐใ
# ็คบไพ 1๏ผ
# ่พๅ
ฅ๏ผmatrix = [[1,3,5,7],
# [10,11,16,20],
# [23,30,34,50]], target = 3
# ่พๅบ๏ผtrue
# ็คบไพ 2๏ผ
# ่พๅ
ฅ๏ผmatrix = [[1,3,5,7],
# [10,11,16,20],
# [23,30,34,50]], target = 13
# ่พๅบ๏ผfalse
# ็คบไพ 3๏ผ
# ่พๅ
ฅ๏ผmatrix = [], target = 0
# ่พๅบ๏ผfalse
# ย
# ๆ็คบ๏ผ
# m == matrix.length
# n == matrix[i].length
# 0 <= m, n <= 100
# -104 <= matrix[i][j], target <= 104
from typing import List
# Time: 60%
# O(mlogn)
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if len(matrix) == 0:
return False
if target < matrix[0][0] or target > matrix[-1][-1]:
return False
for row_index in range(len(matrix)):
current_row = matrix[row_index]
# ็ฎๆ ๅจไธ่กๅ
if target >= current_row[0] and target <= current_row[-1]:
return self.bin_search(current_row, target)
else:
if row_index < len(matrix) -1:
# ็ฎๆ ๅจไธค่กไน้ด
if target > current_row[-1] and target < matrix[row_index+1][0]:
return False
def bin_search(self, nums: List[int], target: int) -> bool:
"""Binary search without returning target index.
"""
low, high = 0, len(nums) - 1
while low <= high:
mid = (low + high) // 2
if target == nums[mid]:
return True
elif target < nums[mid]:
high = mid - 1
else:
low = mid + 1
return False |
import logging
from pandas import Categorical
from core import prepare
from core import draw
BENCH_NAME = 'parsec_var_input'
EXP_NAME = BENCH_NAME
BENCHMARK_ORDER = (
"blackscholes",
"streamcluster",
"swaptions",
"canneal",
)
OVERFLOWS = {
"perf": (
(-1.87, 8.25, "~12",),
),
"mem": (
(-10.07, 6.85, "21-26",), # streamcluster
(-4.87, 6.85, "9-26",), # swaptions
),
}
def filter_inputs(df):
df["input"] = Categorical(
df["input"], [
0,
1,
2,
],
ordered=True
)
df.sort_values(["input"], inplace=True)
def process_type(t, df, plot_args, benchmark_order):
if t == "perf":
df = prepare.calculate_overhead(df, column="time")
prepare.reorder_and_rename_benchmarks(df, benchmark_order)
prepare.reorder_compilers(df, t)
filter_inputs(df)
plot = draw.VarBarplotOverhead()
plot_args.update({
"ylabel": "Normalized runtime\n(w.r.t. native)",
"logy": True,
})
elif t == "mem":
df = prepare.calculate_overhead(df, column="maxsize")
prepare.reorder_and_rename_benchmarks(df, benchmark_order)
prepare.reorder_compilers(df, t)
filter_inputs(df)
plot = draw.VarBarplotOverhead()
plot_args.update({
"ylabel": "Memory overhead\n(w.r.t. native)",
"logy": True,
})
return plot, []
def main(t="perf"):
logging.info("Processing data")
# common processing
df = prepare.process_results(t)
plot_args = {
"ylim": (0.8, 10),
"vline_position": 11.6,
"title": "PARSEC",
"text_points": OVERFLOWS.get(t, ())
}
plot, columns = process_type(t, df, plot_args, BENCHMARK_ORDER)
plot.get_data(df, columns)
plot.build_plot(**plot_args)
plot.save_plot("%s_%s.pdf" % (BENCH_NAME, t))
|
##
## Artificial Intelligence Final Project
##
## ####
##
## James Clisham
##
##
## Ant Colony Optimization implemented in a variety of ways to solve the graph coloring problem.
## Primary focus on manipulating the various different components of ACO and pitting them against
## each other in trials.
##
import os, sys, time, random, curses
#
# DEBUG ISSUES
#
# If the program is crashing randomly on startup with an error saying something about addch error, or any kind of
# window initialization error, then the window is probably not large enough for the curses output. I'm developing
# and testing on a size of 1024x768, so running the program in a terminal at least as large as that shouldn't have
# this issue.
#
# ** Further Note **
# If experimenting with the number of nodes, the program will crash if the nodes go offscreen due to curses'
# handling of drawing. This could be fixed by having everything print out to a 'pad' instead of a 'window', if
# anyone so desires to go through the arduous process of doing so. I hate to phrase it like that, but hey, time
# limits.
#
# GLOBAL VALUES
#
NUM_OF_NODES_IN_GRAPH = 20
MAX_Y_VALUE_OF_GRAPH_NODE = 5
NUM_OF_CYCLES = 1000
NUM_OF_ANTS = 5
PHEROMONE_STRENGTH = .05
INITIAL_PHEROMONE_QUANTITY = .5
PHEROMONE_DECAY_RATE = .02
MOVEMENT_PHEROMONE_DECAY = .1
RANDOM_MOVE_CHANCE = 30
PHEROMONE_MOVE_CHANCE = 70
# Delay Value
#
# For delaying output for long enough between cycles to be visible to the human eye
#
# NOTICE: I am writing and testing this code on a laptop from 1999, and have less than 256mb of RAM.
# The delay will likely need to be adjusted for any other computer running this code.
#
# It is implemented as an iterator and has to reach the specified value each frame before the frame is
# actually processed. 100 works for me, but I'm willing to bet any modern computer will need a significantly
# larger number for testing.
#
# With a value of 0, there is no delay and will skip straight to the end of the calculation.
#
DISPLAY_DELAY = 0
#
# CLASS DEFINITIONS
#
class Ant:
def __init__(self):
self.xCoord = 0
self.yCoord = 0
#
# AntColony Algorithm 1
#
# -Random movement
# -Random color selection based on local conflicts
#
#
#
class AntColonyAlg1:
def __init__(self,solution,numberOfAnts,numberOfCycles,displayDelay): #Initialization of class variables
# if there are more ants than nodes, set number of ants to number of nodes
if(numberOfAnts>solution.numOfNodes):
numberOfAnts = solution.numOfNodes
self.numberOfAnts = numberOfAnts
self.numberOfCycles = numberOfCycles
self.randomMoveChance = 200 # Always chooses random movement over pheromone movement
self.displayDelay = displayDelay
self.antList = []
for i in range(0,numberOfAnts): # Sets up initial ant list
newAnt = Ant()
self.antList.append(newAnt)
def solve(self,solution,outputWindow): # Main Solving Loop
# ##
# determineNumOfConflictingNodes
# ##
#
# Returns the number of overall conflicting nodes in the entire problem (not used in this algorithm)
#
def determineNumOfConflictingNodes(solution):
conflictingNum = 0
for yList in solution.nodeList:
for eachNode in yList:
for i in eachNode.connectedNodeList:
if(i.color==eachNode.color):
conflictingNum += 1
return conflictingNum
# ##
# determineNumOfLocalConflicts
# ##
#
# Returns the number of local color conflicts around the specificed node
#
def determineNumOfLocalConflicts(node):
localConflicts = 0
for i in node.connectedNodeList:
if(i.color==node.color or node.color==0 or i.color==0):
localConflicts += 1
return localConflicts
# ##
# changeColorOfNode
# ##
#
# Randomly picks a new color for the specified node
#
def changeColorOfNode(node):
colorDifferent = False
while(colorDifferent==False):
newColor = random.randrange(1,4)
if(node.color==newColor):
pass
else:
node.color = newColor
colorDifferent = True
# ##
# moveAnt
# ##
#
# Randomly chooses a new location based on the neighboring nodes of the ant.
# Will not move into a node currently occupied by another ant.
#
def moveAnt(solution,ant):
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
nodesToRemove = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
nodesToRemove.append(i)
for eachNode in nodesToRemove:
try:
neighboringNodes.remove(eachNode)
except:
pass
if(neighboringNodes==[]): # surrounded by other ants, so stays in current place
pass
else: # goes to random node
nodeChoice = random.randrange(0,len(neighboringNodes))
ant.xCoord = neighboringNodes[nodeChoice].xCoord
ant.yCoord = neighboringNodes[nodeChoice].yCoord
# Initial ant setup
for ant in self.antList: # places each ant in a random and unique location in the graph
antLocationUnique = False
while(antLocationUnique==False):
randX = random.randrange(0,len(solution.nodeList))
randY = random.randrange(0,len(solution.nodeList[randX]))
testXCoord = solution.nodeList[randX][randY].xCoord
testYCoord = solution.nodeList[randX][randY].yCoord
antConflict = 0
for eachAnt in self.antList: # makes sure no other ant is currently occupied the chosen space
# otherwise, flags to not end the loop
if(eachAnt.xCoord==testXCoord and eachAnt.yCoord == testYCoord):
antConflict = 1
if(antConflict==0):
antLocationUnique = True
ant.xCoord = testXCoord
ant.yCoord = testYCoord
#
# SOLUTION MAIN LOOP
#
# Delay Values
#
# For delaying output for long enough between cycles to be visible to the human eye
#
# NOTICE: I am writing and testing this code on a laptop from 1999, and have less than 256mb of RAM.
# The delay will likely need to be adjusted for any other computer running this code.
#
# SEE GLOBAL VALUES AT TOP FOR FURTHER EXPLANATION (don't change these values, change the one at the top)
displayDelay = self.displayDelay
displayCounter = displayDelay
currentCycle = 1
cyclesToRun = self.numberOfCycles
solutionSolved = False
while(solutionSolved==False and cyclesToRun>0):
if(displayCounter>=displayDelay):
displayCounter = 0
# Display cycle number
cycleString = "Cycle Number: "+str(currentCycle)
outputWindow.addstr(0,0,cycleString)
stringLength = len(str(currentCycle))
outputWindow.addstr(0,14+stringLength,' ')
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
numOfConflictingNodes = determineNumOfConflictingNodes(solution)
for ant in self.antList:
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
# Finds the working node
currentNode = solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)]
# Determines the number of local conflicts (including whether nearby nodes are uncolored or not)
numOfLocalConflicts = determineNumOfLocalConflicts(currentNode)
# If there are conflicts, swap the color and add pheromones
if(numOfLocalConflicts==0):
pass
else:
changeColorOfNode(currentNode)
# Process ant movement
moveAnt(solution,ant)
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
# Iterate cycle counters
currentCycle += 1
cyclesToRun -= 1
else:
# Iterate display counter
displayCounter += 1
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
return currentCycle
#
# AntColony Algorithm 2
#
# -Random and pheromone-based movement
# -Random color selection based on local conflicts
#
# -Each node is filled with a given initial amount of pheromones, and pheromone level decays over time.
#
class AntColonyAlg2:
def __init__(self,solution,numberOfAnts,numberOfCycles,pheromoneStrength,initialPheromoneQuantity,pheromoneDecayRate,displayDelay,randomMoveChance,pheromoneMoveChance): #Initialization of class variables
# if there are more ants than nodes, set number of ants to number of nodes
if(numberOfAnts>solution.numOfNodes):
numberOfAnts = solution.numOfNodes
self.numberOfAnts = numberOfAnts
self.numberOfCycles = numberOfCycles
self.initialPheromoneQuantity = initialPheromoneQuantity
self.pheromoneStrength = pheromoneStrength
self.pheromoneDecayRate = pheromoneDecayRate
self.randomMoveChance = 30
self.pheromoneMoveChance = 70
self.displayDelay = displayDelay
self.antList = []
for i in range(0,numberOfAnts): # Sets up initial ant list
newAnt = Ant()
self.antList.append(newAnt)
def solve(self,solution,outputWindow): # Main Solving Loop
# ##
# determineNumOfConflictingNodes
# ##
#
# Returns the number of overall conflicting nodes in the entire problem (not used in this algorithm)
#
def determineNumOfConflictingNodes(solution):
conflictingNum = 0
for yList in solution.nodeList:
for eachNode in yList:
for i in eachNode.connectedNodeList:
if(i.color==eachNode.color):
conflictingNum += 1
return conflictingNum
# ##
# determineNumOfLocalConflicts
# ##
#
# Returns the number of local color conflicts around the specificed node
#
def determineNumOfLocalConflicts(node):
localConflicts = 0
for i in node.connectedNodeList:
if(i.color==node.color or node.color==0 or i.color==0):
localConflicts += 1
return localConflicts
# ##
# changeColorOfNode
# ##
#
# Randomly picks a new color for the specified node
#
def changeColorOfNode(node):
colorDifferent = False
while(colorDifferent==False):
newColor = random.randrange(1,4)
if(node.color==newColor):
pass
else:
node.color = newColor
colorDifferent = True
# ##
# moveAnt
# ##
#
# Randomly chooses a new location based on the neighboring nodes of the ant.
# Will not move into a node currently occupied by another ant.
#
def moveAnt(solution,ant):
moveDecision = random.randrange(0,self.randomMoveChance+self.pheromoneMoveChance)
if(moveDecision<=self.randomMoveChance):
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
nodesToRemove = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
nodesToRemove.append(i)
for eachNode in nodesToRemove:
try:
neighboringNodes.remove(eachNode)
except:
pass
if(neighboringNodes==[]): # surrounded by other ants, so stays in current place
pass
else: # goes to random node
nodeChoice = random.randrange(0,len(neighboringNodes))
ant.xCoord = neighboringNodes[nodeChoice].xCoord
ant.yCoord = neighboringNodes[nodeChoice].yCoord
else: # move to neighbor with highest amount of pheromones
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
removalList = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
removalList.append(i)
for i in removalList:
neighboringNodes.remove(i)
if(neighboringNodes==[]): # surrounded by other ants
pass
else: # goes to node with most pheromones, otherwise picks a random one
currentHighestNode = neighboringNodes[random.randrange(0,len(neighboringNodes))]
for i in neighboringNodes:
if(i.pheromoneConcentration>currentHighestNode.pheromoneConcentration):
currentHighestNode = i
ant.xCoord = currentHighestNode.xCoord
ant.yCoord = currentHighestNode.yCoord
# Initial ant setup
for ant in self.antList: # places each ant in a random and unique location in the graph
antLocationUnique = False
while(antLocationUnique==False):
randX = random.randrange(0,len(solution.nodeList))
randY = random.randrange(0,len(solution.nodeList[randX]))
testXCoord = solution.nodeList[randX][randY].xCoord
testYCoord = solution.nodeList[randX][randY].yCoord
antConflict = 0
for eachAnt in self.antList: # makes sure no other ant is currently occupied the chosen space
# otherwise, flags to not end the loop
if(eachAnt.xCoord==testXCoord and eachAnt.yCoord == testYCoord):
antConflict = 1
if(antConflict==0):
antLocationUnique = True
ant.xCoord = testXCoord
ant.yCoord = testYCoord
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration = self.initialPheromoneQuantity
#
# SOLUTION MAIN LOOP
#
# Delay Values
#
# For delaying output for long enough between cycles to be visible to the human eye
#
# NOTICE: I am writing and testing this code on a laptop from 1999, and have less than 256mb of RAM.
# The delay will likely need to be adjusted for any other computer running this code.
#
# SEE GLOBAL VALUES AT TOP FOR FURTHER EXPLANATION (don't change these values, change the one at the top)
displayDelay = self.displayDelay
displayCounter = displayDelay
currentCycle = 1
cyclesToRun = self.numberOfCycles
solutionSolved = False
while(solutionSolved==False and cyclesToRun>0):
if(displayCounter>=displayDelay):
displayCounter = 0
# Decay all pheromones by some amount
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration -= self.pheromoneDecayRate
# Display cycle number
cycleString = "Cycle Number: "+str(currentCycle)
outputWindow.addstr(0,0,cycleString)
stringLength = len(str(currentCycle))
outputWindow.addstr(0,14+stringLength,' ')
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
numOfConflictingNodes = determineNumOfConflictingNodes(solution)
for ant in self.antList:
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
# Finds the working node
currentNode = solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)]
# Determines the number of local conflicts (including whether nearby nodes are uncolored or not)
numOfLocalConflicts = determineNumOfLocalConflicts(currentNode)
# If there are conflicts, swap the color and add pheromones
if(numOfLocalConflicts==0):
pass
else:
changeColorOfNode(currentNode)
# Adds pheromones proportional to the amount of conflicts around the current node
currentNode.pheromoneConcentration = numOfLocalConflicts * self.pheromoneStrength
# Process ant movement
moveAnt(solution,ant)
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
# Iterate cycle counters
currentCycle += 1
cyclesToRun -= 1
else:
# Iterate display counter
displayCounter += 1
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
return currentCycle
#
# AntColony Algorithm 3
#
# -Random and pheromone-based movement
# -Random color selection based on local conflicts
#
# -Each node is filled with a given initial amount of pheromones, and pheromone level decays over time.
# -In addition, ant movement speeds up decay of pheromone levels in areas with no conflict
#
class AntColonyAlg3:
def __init__(self,solution,numberOfAnts,numberOfCycles,pheromoneStrength,initialPheromoneQuantity,pheromoneDecayRate,displayDelay,randomMoveChance,pheromoneMoveChance,movementPheromoneDecay): #Initialization of class variables
# if there are more ants than nodes, set number of ants to number of nodes
if(numberOfAnts>solution.numOfNodes):
numberOfAnts = solution.numOfNodes
self.numberOfAnts = numberOfAnts
self.numberOfCycles = numberOfCycles
self.initialPheromoneQuantity = initialPheromoneQuantity
self.pheromoneStrength = pheromoneStrength
self.pheromoneDecayRate = pheromoneDecayRate
self.movementPheromoneDecay = movementPheromoneDecay
self.randomMoveChance = 30
self.pheromoneMoveChance = 70
self.displayDelay = displayDelay
self.antList = []
for i in range(0,numberOfAnts): # Sets up initial ant list
newAnt = Ant()
self.antList.append(newAnt)
def solve(self,solution,outputWindow): # Main Solving Loop
# ##
# determineNumOfConflictingNodes
# ##
#
# Returns the number of overall conflicting nodes in the entire problem (not used in this algorithm)
#
def determineNumOfConflictingNodes(solution):
conflictingNum = 0
for yList in solution.nodeList:
for eachNode in yList:
for i in eachNode.connectedNodeList:
if(i.color==eachNode.color):
conflictingNum += 1
return conflictingNum
# ##
# determineNumOfLocalConflicts
# ##
#
# Returns the number of local color conflicts around the specificed node
#
def determineNumOfLocalConflicts(node):
localConflicts = 0
for i in node.connectedNodeList:
if(i.color==node.color or node.color==0 or i.color==0):
localConflicts += 1
return localConflicts
# ##
# changeColorOfNode
# ##
#
# Randomly picks a new color for the specified node
#
def changeColorOfNode(node):
colorDifferent = False
while(colorDifferent==False):
newColor = random.randrange(1,4)
if(node.color==newColor):
pass
else:
node.color = newColor
colorDifferent = True
# ##
# moveAnt
# ##
#
# Randomly chooses a new location based on the neighboring nodes of the ant.
# Will not move into a node currently occupied by another ant.
#
def moveAnt(solution,ant):
moveDecision = random.randrange(0,self.randomMoveChance+self.pheromoneMoveChance)
if(moveDecision<=self.randomMoveChance):
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
nodesToRemove = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
nodesToRemove.append(i)
for eachNode in nodesToRemove:
try:
neighboringNodes.remove(eachNode)
except:
pass
if(neighboringNodes==[]): # surrounded by other ants, so stays in current place
pass
else: # goes to random node
nodeChoice = random.randrange(0,len(neighboringNodes))
ant.xCoord = neighboringNodes[nodeChoice].xCoord
ant.yCoord = neighboringNodes[nodeChoice].yCoord
else: # move to neighbor with highest amount of pheromones
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
removalList = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
removalList.append(i)
for i in removalList:
neighboringNodes.remove(i)
if(neighboringNodes==[]): # surrounded by other ants
pass
else: # goes to node with most pheromones, otherwise picks a random one
currentHighestNode = neighboringNodes[random.randrange(0,len(neighboringNodes))]
for i in neighboringNodes:
if(i.pheromoneConcentration>currentHighestNode.pheromoneConcentration):
currentHighestNode = i
ant.xCoord = currentHighestNode.xCoord
ant.yCoord = currentHighestNode.yCoord
# Initial ant setup
for ant in self.antList: # places each ant in a random and unique location in the graph
antLocationUnique = False
while(antLocationUnique==False):
randX = random.randrange(0,len(solution.nodeList))
randY = random.randrange(0,len(solution.nodeList[randX]))
testXCoord = solution.nodeList[randX][randY].xCoord
testYCoord = solution.nodeList[randX][randY].yCoord
antConflict = 0
for eachAnt in self.antList: # makes sure no other ant is currently occupied the chosen space
# otherwise, flags to not end the loop
if(eachAnt.xCoord==testXCoord and eachAnt.yCoord == testYCoord):
antConflict = 1
if(antConflict==0):
antLocationUnique = True
ant.xCoord = testXCoord
ant.yCoord = testYCoord
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration = self.initialPheromoneQuantity
#
# SOLUTION MAIN LOOP
#
# Delay Values
#
# For delaying output for long enough between cycles to be visible to the human eye
#
# NOTICE: I am writing and testing this code on a laptop from 1999, and have less than 256mb of RAM.
# The delay will likely need to be adjusted for any other computer running this code.
#
# SEE GLOBAL VALUES AT TOP FOR FURTHER EXPLANATION (don't change these values, change the one at the top)
displayDelay = self.displayDelay
displayCounter = displayDelay
currentCycle = 1
cyclesToRun = self.numberOfCycles
solutionSolved = False
while(solutionSolved==False and cyclesToRun>0):
if(displayCounter>=displayDelay):
displayCounter = 0
# Decay all pheromones by some amount
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration -= self.pheromoneDecayRate
# Display cycle number
cycleString = "Cycle Number: "+str(currentCycle)
outputWindow.addstr(0,0,cycleString)
stringLength = len(str(currentCycle))
outputWindow.addstr(0,14+stringLength,' ')
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
numOfConflictingNodes = determineNumOfConflictingNodes(solution)
for ant in self.antList:
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
# Finds the working node
currentNode = solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)]
# Determines the number of local conflicts (including whether nearby nodes are uncolored or not)
numOfLocalConflicts = determineNumOfLocalConflicts(currentNode)
# If there are no conflicts, decay the current node's pheromones by given rate
if(numOfLocalConflicts==0):
currentNode.pheromoneConcentration -= self.movementPheromoneDecay
else:
changeColorOfNode(currentNode)
# Adds pheromones proportional to the amount of conflicts around the current node
currentNode.pheromoneConcentration = numOfLocalConflicts * self.pheromoneStrength
# Process ant movement
moveAnt(solution,ant)
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
# Iterate cycle counters
currentCycle += 1
cyclesToRun -= 1
else:
# Iterate display counter
displayCounter += 1
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
return currentCycle
#
# AntColony Algorithm 4
#
# -Random and pheromone-based movement
#
# -Each node is filled with a given initial amount of pheromones, and pheromone level decays over time.
# -In addition, ant movement speeds up decay of pheromone levels in areas with no conflict
#
class AntColonyAlg4:
def __init__(self,solution,numberOfAnts,numberOfCycles,pheromoneStrength,initialPheromoneQuantity,pheromoneDecayRate,displayDelay,randomMoveChance,pheromoneMoveChance,movementPheromoneDecay): #Initialization of class variables
# if there are more ants than nodes, set number of ants to number of nodes
if(numberOfAnts>solution.numOfNodes):
numberOfAnts = solution.numOfNodes
self.numberOfAnts = numberOfAnts
self.numberOfCycles = numberOfCycles
self.initialPheromoneQuantity = initialPheromoneQuantity
self.pheromoneStrength = pheromoneStrength
self.pheromoneDecayRate = pheromoneDecayRate
self.movementPheromoneDecay = movementPheromoneDecay
self.randomMoveChance = 30
self.pheromoneMoveChance = 70
self.displayDelay = displayDelay
self.antList = []
for i in range(0,numberOfAnts): # Sets up initial ant list
newAnt = Ant()
self.antList.append(newAnt)
def solve(self,solution,outputWindow): # Main Solving Loop
# ##
# determineNumOfConflictingNodes
# ##
#
# Returns the number of overall conflicting nodes in the entire problem (not used in this algorithm)
#
def determineNumOfConflictingNodes(solution):
conflictingNum = 0
for yList in solution.nodeList:
for eachNode in yList:
for i in eachNode.connectedNodeList:
if(i.color==eachNode.color):
conflictingNum += 1
return conflictingNum
# ##
# determineNumOfLocalConflicts
# ##
#
# Returns the number of local color conflicts around the specificed node
#
def determineNumOfLocalConflicts(node):
localConflicts = 0
for i in node.connectedNodeList:
if(i.color==node.color or node.color==0 or i.color==0):
localConflicts += 1
return localConflicts
# ##
# changeColorOfNodeRandom
# ##
#
# Randomly changes the color of the current node
def changeColorOfNodeRandom(node):
colorDifferent = False
while(colorDifferent==False):
newColor = random.randrange(1,4)
if(node.color==newColor):
pass
else:
node.color = newColor
colorDifferent = True
# ##
# changeColorOfNode
# ##
#
# Changes the color of the current node depending on what would locally be best suited
def changeColorOfNode(node):
acceptableColor = False
while(acceptableColor==False):
neighboringNodes = []
neighboringNodes.extend(node.connectedNodeList)
possibleColors = [1,2,3]
for i in neighboringNodes:
for g in possibleColors:
if(i.color==g):
possibleColors.remove(g)
# If surrounded by all possible colors, just pick randomly because no local best is possible
if(possibleColors==[]):
changeColorOfNodeRandom(node)
acceptableColor = True
else:
# picks randomly from the locally acceptable colors
node.color = possibleColors[random.randrange(0,len(possibleColors))]
acceptableColor = True
# ##
# moveAnt
# ##
#
# Randomly chooses a new location based on the neighboring nodes of the ant.
# Will not move into a node currently occupied by another ant.
#
def moveAnt(solution,ant):
moveDecision = random.randrange(0,self.randomMoveChance+self.pheromoneMoveChance)
if(moveDecision<=self.randomMoveChance):
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
nodesToRemove = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
nodesToRemove.append(i)
for eachNode in nodesToRemove:
try:
neighboringNodes.remove(eachNode)
except:
pass
if(neighboringNodes==[]): # surrounded by other ants, so stays in current place
pass
else: # goes to random node
nodeChoice = random.randrange(0,len(neighboringNodes))
ant.xCoord = neighboringNodes[nodeChoice].xCoord
ant.yCoord = neighboringNodes[nodeChoice].yCoord
else: # move to neighbor with highest amount of pheromones
neighboringNodes = []
neighboringNodes.extend(solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)].connectedNodeList)
removalList = []
for i in neighboringNodes: # builds a list of neighboring unoccupied nodes
for g in self.antList:
if(g.xCoord==i.xCoord and g.yCoord==i.yCoord):
removalList.append(i)
for i in removalList:
neighboringNodes.remove(i)
if(neighboringNodes==[]): # surrounded by other ants
pass
else: # goes to node with most pheromones, otherwise picks a random one
currentHighestNode = neighboringNodes[random.randrange(0,len(neighboringNodes))]
for i in neighboringNodes:
if(i.pheromoneConcentration>currentHighestNode.pheromoneConcentration):
currentHighestNode = i
ant.xCoord = currentHighestNode.xCoord
ant.yCoord = currentHighestNode.yCoord
# Initial ant setup
for ant in self.antList: # places each ant in a random and unique location in the graph
antLocationUnique = False
while(antLocationUnique==False):
randX = random.randrange(0,len(solution.nodeList))
randY = random.randrange(0,len(solution.nodeList[randX]))
testXCoord = solution.nodeList[randX][randY].xCoord
testYCoord = solution.nodeList[randX][randY].yCoord
antConflict = 0
for eachAnt in self.antList: # makes sure no other ant is currently occupied the chosen space
# otherwise, flags to not end the loop
if(eachAnt.xCoord==testXCoord and eachAnt.yCoord == testYCoord):
antConflict = 1
if(antConflict==0):
antLocationUnique = True
ant.xCoord = testXCoord
ant.yCoord = testYCoord
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration = self.initialPheromoneQuantity
#
# SOLUTION MAIN LOOP
#
# Delay Values
#
# For delaying output for long enough between cycles to be visible to the human eye
#
# NOTICE: I am writing and testing this code on a laptop from 1999, and have less than 256mb of RAM.
# The delay will likely need to be adjusted for any other computer running this code.
#
# SEE GLOBAL VALUES AT TOP FOR FURTHER EXPLANATION (don't change these values, change the one at the top)
displayDelay = self.displayDelay
displayCounter = displayDelay
currentCycle = 1
cyclesToRun = self.numberOfCycles
solutionSolved = False
while(solutionSolved==False and cyclesToRun>0):
if(displayCounter>=displayDelay):
displayCounter = 0
# Decay all pheromones by some amount
for yList in solution.nodeList:
for eachNode in yList:
eachNode.pheromoneConcentration -= self.pheromoneDecayRate
# Display cycle number
cycleString = "Cycle Number: "+str(currentCycle)
outputWindow.addstr(0,0,cycleString)
stringLength = len(str(currentCycle))
outputWindow.addstr(0,14+stringLength,' ')
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
numOfConflictingNodes = determineNumOfConflictingNodes(solution)
for ant in self.antList:
# Determines whether current solution is a solved state
if(solution.isSolutionState()==True):
solutionSolved = True
# Finds the working node
currentNode = solution.nodeList[int(ant.xCoord/6)][int(ant.yCoord/6)]
# Determines the number of local conflicts (including whether nearby nodes are uncolored or not)
numOfLocalConflicts = determineNumOfLocalConflicts(currentNode)
# If there are no conflicts, decay the current node's pheromones by given rate
if(numOfLocalConflicts==0):
currentNode.pheromoneConcentration -= self.movementPheromoneDecay
else:
changeColorOfNode(currentNode)
# Adds pheromones proportional to the amount of conflicts around the current node
currentNode.pheromoneConcentration = numOfLocalConflicts * self.pheromoneStrength
# Process ant movement
moveAnt(solution,ant)
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
# Iterate cycle counters
currentCycle += 1
cyclesToRun -= 1
else:
# Iterate display counter
displayCounter += 1
# Display output
solution.display(outputWindow)
solution.drawAnts(outputWindow,self.antList)
outputWindow.refresh()
return currentCycle
# Individual node for use by GraphSolution class
class GraphNode:
def __init__(self,xCoord):
self.xCoord = xCoord
self.yCoord = 0
self.color = 0#random.randrange(1,4)
self.icon = '#'
self.removalFlag = False
self.connectedNodeList = []
self.pheromoneConcentration = 0
#
# GraphSolution Class
#
# ##
# Parameters:
# ##
#
# - numOfNodes: number of GraphNodes to generate in the solution set
# - window: curses window to output to
class GraphSolution:
def __init__(self,numOfNodes,window):
#
# Class Properties
#
self.nodeList = []
self.numOfNodes = numOfNodes
self.lastTier = 0
#
# Solution Setup
#
def connectNodes(node1,node2):
node1.connectedNodeList.append(node2)
node2.connectedNodeList.append(node1)
currentTier = 0
sameTierCounter = 1
nodeCounter = 0
currentTierList = []
for i in range(0,self.numOfNodes):
#newNode = GraphNode(currentTier)
#if(lastNode!=None):
#connectNodes(newNode,lastNode)
#newNode = GraphNode(currentTier)
#currentTierList.append(newNode)
tierIterChoice = random.randrange(0,100)
if((tierIterChoice>70 or sameTierCounter>=MAX_Y_VALUE_OF_GRAPH_NODE) and sameTierCounter>1):
currentTier += 6
sameTierCounter = 1
self.nodeList.append(currentTierList)
currentTierList = []
else:
sameTierCounter += 1
newNode = GraphNode(currentTier)
currentTierList.append(newNode)
self.nodeList.append(currentTierList)
self.lastTier = int((currentTier/6))#+1)
# Seperates out each node on the same X coordinate
# to different Y coordinates
checkTier = -1
checkTierCounter = 0
for g in self.nodeList:
for node in g:
if(checkTier!=node.xCoord):
checkTier = node.xCoord
checkTierCounter = 0
else:
checkTierCounter += 6
node.yCoord = checkTierCounter
for k in self.nodeList:
for node in k:
node.xCoord += 3
node.yCoord += 3
# Connect nodes to each other
#
nodelistCounter = 1
for j in self.nodeList:
for node in j:
if(nodelistCounter>=len(self.nodeList)):
pass
else:
for otherNode in self.nodeList[nodelistCounter]:
if(node.yCoord==otherNode.yCoord):
connectNodes(node,otherNode)
for nodeOnSameY in j:
if(nodeOnSameY.yCoord+6==node.yCoord):
connectNodes(nodeOnSameY,node)
nodelistCounter += 1
def display(self,window):
# An insane mess, but it works.
#
# Draws connections between each node and it's immediate neighbors in the 4 cardinal directions.
# Originally was intended in support all sorts of more nonlinear connections, but for the sake of
# simplicity, this was removed. There's still some elements of that left in however.
def drawConnectingLines(node1,node2):
def drawUntilTouching(node1,node2,yOffset,xOffset,icon,directionCode,diagonalCode):
notTouching = True
xIncrease = 0
yIncrease = 0
diagonalSwitchCounter = 0
while(notTouching==True):
#if(node1.yCoord+yOffset+yIncrease==node2.yCoord+1 and node1.xCoord+xOffset+xIncrease==node2.xCoord+1):
#notTouching = False
window.addch(node1.yCoord+yOffset+yIncrease,node1.xCoord+xOffset+xIncrease,icon)
if(directionCode==0):
if(node1.yCoord+yOffset+yIncrease==node2.yCoord+3 and diagonalCode != 1 or node1.yCoord+yOffset+yIncrease==node2.yCoord-1 or node1.yCoord+yOffset+yIncrease==node2.yCoord+1):
if(diagonalCode==0):
notTouching = False
elif(diagonalCode==1 and diagonalSwitchCounter>0):
notTouching = False
else:
diagonalSwitchCounter += 1
directionCode = 1
icon = '-'
if(xOffset<0):
xIncrease -= 1
else:
xIncrease += 1
else:
if(node1.yCoord+yOffset+yIncrease-1==node2.yCoord+1 and diagonalCode==1):
icon = '/'
if(yOffset<0):
yIncrease -= 1
else:
yIncrease += 1
elif(directionCode==1):
if(node1.xCoord+xOffset+xIncrease==node2.xCoord-1 or node1.xCoord+xOffset+xIncrease==node2.xCoord+3 and diagonalCode != 1 or node1.xCoord+xOffset+xIncrease==node2.xCoord+1):
if(diagonalCode==0):
notTouching = False
elif(diagonalCode==1 and diagonalSwitchCounter>0):
notTouching = False
else:
diagonalSwitchCounter += 1
directionCode = 0
icon = '|'
if(yOffset<0):
yIncrease -= 1
else:
yIncrease += 1
else:
if(xOffset<0):
xIncrease -= 1
else:
xIncrease += 1
if(node1.xCoord==node2.xCoord):
if(node1.yCoord>node2.yCoord):
drawUntilTouching(node1,node2,-1,1,'|',0,0)
elif(node1.yCoord<node2.yCoord):
drawUntilTouching(node1,node2,3,1,'|',0,0)
else:
window.addstr(20,21,'error in same X')
else:
if(node1.xCoord<node2.xCoord):
if(node1.yCoord==node2.yCoord):
drawUntilTouching(node1,node2,1,3,'-',1,0)
elif(node1.yCoord<node2.yCoord):
# window.addstr(0,0,'IT HAPPENED #1')
pass# drawUntilTouching(node1,node2,1,-2,'|',0,1)
elif(node1.yCoord>node2.yCoord):
window.addch(node1.yCoord+1,node1.xCoord+3,'-')
window.addch(node1.yCoord+1,node1.xCoord+4,'/')
window.addch(node1.yCoord,node1.xCoord+4,'|')
drawUntilTouching(node1,node2,-1,4,'|',0,1)
elif(node1.xCoord>node2.xCoord):
if(node1.yCoord==node2.yCoord):
drawUntilTouching(node1,node2,1,-1,'-',1,0)
elif(node1.yCoord<node2.yCoord):
pass # window.addstr(0,1,'IT HAPPENED #2')
elif(note1.yCoord>node2.yCoord):
pass# window.addstr(0,2,'IT HAPPENED #3')
else:
window.addstr(20,20,'error in not X')
for nodeList in self.nodeList:
for node in nodeList:
# draw 9 characters in a square to represent
# each node
window.attron(curses.color_pair(node.color))
window.addch(node.yCoord,node.xCoord,node.icon)
window.addch(node.yCoord+1,node.xCoord,node.icon)
window.addch(node.yCoord,node.xCoord+1,node.icon)
window.addch(node.yCoord+1,node.xCoord+1,node.icon)
window.addch(node.yCoord+2,node.xCoord,node.icon)
window.addch(node.yCoord+2,node.xCoord+1,node.icon)
window.addch(node.yCoord+2,node.xCoord+2,node.icon)
window.addch(node.yCoord+1,node.xCoord+2,node.icon)
window.addch(node.yCoord,node.xCoord+2,node.icon)
window.attroff(curses.color_pair(node.color))
for i in node.connectedNodeList:
drawConnectingLines(node,i)
## INFO OUTPUTS ##
# Number of Nodes
nodeCounter = 0
for xList in self.nodeList:
for yList in xList:
nodeCounter += 1
numOfNodesString = "Number of Nodes: "+str(nodeCounter)
window.addstr(40,0,numOfNodesString)
## DEBUG OUTPUTS ##
# window.addstr(20,20,str(self.nodeList[0].xCoord))
def drawAnts(self,window,antList):
for i in antList:
window.attron(curses.color_pair(4))
window.addch(i.yCoord+1,i.xCoord+1,'*')
window.attroff(curses.color_pair(4))
def isSolutionState(self):
solutionFailed = 0
for eachY in self.nodeList:
for eachNode in eachY:
for i in eachNode.connectedNodeList:
if(i.color==eachNode.color or i.color==0):
solutionFailed = 1
break
if(solutionFailed==1):
return False
else:
return True
def resetSolution(self):
for yList in self.nodeList:
for eachNode in yList:
eachNode.color = 0
eachNode.pheromoneConcentration = 0
# ##
# testAlg#
# ##
#
# These functions all do a single test of the given algorithm, with a provided graph and
# over the amount of trials inputted. They return the average number of cycles needed to find a solution.
def testAlg1(outputWindow,numOfTests,solution):
resultList = []
for i in range(0,numOfTests):
solution.resetSolution()
newAntColonyAlg1 = AntColonyAlg1(solution,NUM_OF_ANTS,NUM_OF_CYCLES,DISPLAY_DELAY)
currentResult = newAntColonyAlg1.solve(solution,outputWindow)
resultList.append(currentResult)
resultSum = 0
for i in resultList:
resultSum += i
outputWindow.clear()
resultSum = resultSum/len(resultList)
outputWindow.addstr(2,3,"Current trial's average cycles of Algorithm 1 over the same graph:")
outputWindow.addstr(3,3,str(resultSum))
outputWindow.addstr(5,3,"PRESS ANY KEY TO CONTINUE")
outputWindow.getch()
outputWindow.clear()
return resultSum
def testAlg2(outputWindow,numOfTests,solution):
resultList = []
for i in range(0,numOfTests):
solution.resetSolution()
newAntColonyAlg2 = AntColonyAlg2(solution,NUM_OF_ANTS,NUM_OF_CYCLES,PHEROMONE_STRENGTH,INITIAL_PHEROMONE_QUANTITY,PHEROMONE_DECAY_RATE,DISPLAY_DELAY,RANDOM_MOVE_CHANCE,PHEROMONE_MOVE_CHANCE)
currentResult = newAntColonyAlg2.solve(solution,outputWindow)
resultList.append(currentResult)
resultSum = 0
for i in resultList:
resultSum += i
outputWindow.clear()
resultSum = resultSum/len(resultList)
outputWindow.addstr(2,3,"Current trial's average cycles of Algorithm 2 over the same graph:")
outputWindow.addstr(3,3,str(resultSum))
outputWindow.addstr(5,3,"PRESS ANY KEY TO CONTINUE")
outputWindow.getch()
outputWindow.clear()
return resultSum
def testAlg3(outputWindow,numOfTests,solution):
resultList = []
for i in range(0,numOfTests):
solution.resetSolution()
newAntColonyAlg3 = AntColonyAlg3(solution,NUM_OF_ANTS,NUM_OF_CYCLES,PHEROMONE_STRENGTH,INITIAL_PHEROMONE_QUANTITY,PHEROMONE_DECAY_RATE,DISPLAY_DELAY,RANDOM_MOVE_CHANCE,PHEROMONE_MOVE_CHANCE,MOVEMENT_PHEROMONE_DECAY)
currentResult = newAntColonyAlg3.solve(solution,outputWindow)
resultList.append(currentResult)
resultSum = 0
for i in resultList:
resultSum += i
outputWindow.clear()
resultSum = resultSum/len(resultList)
outputWindow.addstr(2,3,"Current trial's average cycles of Algorithm 3 over the same graph:")
outputWindow.addstr(3,3,str(resultSum))
outputWindow.addstr(5,3,"PRESS ANY KEY TO CONTINUE")
outputWindow.getch()
outputWindow.clear()
return resultSum
def testAlg4(outputWindow,numOfTests,solution):
resultList = []
for i in range(0,numOfTests):
solution.resetSolution()
newAntColonyAlg4 = AntColonyAlg4(solution,NUM_OF_ANTS,NUM_OF_CYCLES,PHEROMONE_STRENGTH,INITIAL_PHEROMONE_QUANTITY,PHEROMONE_DECAY_RATE,DISPLAY_DELAY,RANDOM_MOVE_CHANCE,PHEROMONE_MOVE_CHANCE,MOVEMENT_PHEROMONE_DECAY)
currentResult = newAntColonyAlg4.solve(solution,outputWindow)
resultList.append(currentResult)
resultSum = 0
for i in resultList:
resultSum += i
outputWindow.clear()
resultSum = resultSum/len(resultList)
outputWindow.addstr(2,3,"Current trial's average cycles of Algorithm 4 over the same graph:")
outputWindow.addstr(3,3,str(resultSum))
outputWindow.addstr(5,3,"PRESS ANY KEY TO CONTINUE")
outputWindow.getch()
outputWindow.clear()
return resultSum
# ##
# comparativeTrialsOfAlgorithms
# ##
#
# Runs each algorithm against each other in several tests
#
# Each algorithm is tested 100 times on 3 different graphs, and the results
# are averaged together.
#
def comparativeTrialsOfAlgorithms(outputWindow):
outputWindow.addstr(3,3,"Comparative Trials")
outputWindow.addstr(4,3,"------------------")
outputWindow.addstr(5,3,"These tests will be measuring the efficiency of Algorithm 1, Algorithm 2, and Algorithm 3")
outputWindow.addstr(6,3,"on a graph whose layout will change between each trial. Each algorithm will be put through")
outputWindow.addstr(7,3,"three trials of 100 tests each. The details of the algorithms are below.")
outputWindow.addstr(10,3,"Algorithm 1")
outputWindow.addstr(11,3,"-----------")
outputWindow.addstr(12,3,"* Random movement of ants")
outputWindow.addstr(13,3,"* New color of each node is chosen randomly")
outputWindow.addstr(14,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(16,3,"Algorithm 2")
outputWindow.addstr(17,3,"-----------")
outputWindow.addstr(18,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(19,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(20,3,"* New color of each node is chosen randomly")
outputWindow.addstr(21,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(22,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(23,3,"* All pheromone levels decay over time")
outputWindow.addstr(25,3,"Algorithm 3")
outputWindow.addstr(26,3,"-----------")
outputWindow.addstr(27,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(28,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(29,3,"* Pheromones are directly decayed each time an ant moves over a locally optimal node.")
outputWindow.addstr(30,3,"* New color of each node is chosen randomly")
outputWindow.addstr(31,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(32,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(33,3,"* All pheromone levels decay over time")
outputWindow.addstr(35,3,"Algorithm 4")
outputWindow.addstr(36,3,"-----------")
outputWindow.addstr(37,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(38,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(39,3,"* Pheromones are directly decayed each time an ant moves over a locally optimal node.")
outputWindow.addstr(40,3,"* New color of each node is chosen by determing the locally best color")
outputWindow.addstr(41,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(42,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(43,3,"* All pheromone levels decay over time")
outputWindow.addstr(46,3,"PRESS ANY KEY TO BEGIN THE TESTS")
outputWindow.getch()
graphSolution1 = GraphSolution(NUM_OF_NODES_IN_GRAPH,stdscr)
outputWindow.addstr(10,10,"TRIAL 1 OF ALGORITHM 1")
outputWindow.clear()
outputWindow.refresh()
Alg1_Trial1 = testAlg1(stdscr,100,graphSolution1)
outputWindow.addstr(10,10,"TRIAL 1 OF ALGORITHM 2")
outputWindow.clear()
outputWindow.refresh()
Alg2_Trial1 = testAlg2(stdscr,100,graphSolution1)
outputWindow.addstr(10,10,"TRIAL 1 OF ALGORITHM 3")
outputWindow.clear()
outputWindow.refresh()
Alg3_Trial1 = testAlg3(stdscr,100,graphSolution1)
outputWindow.addstr(10,10,"TRIAL 1 OF ALGORITHM 4")
outputWindow.clear()
outputWindow.refresh()
Alg4_Trial1 = testAlg4(stdscr,100,graphSolution1)
graphSolution2 = GraphSolution(NUM_OF_NODES_IN_GRAPH,stdscr)
outputWindow.addstr(10,10,"TRIAL 2 OF ALGORITHM 1")
outputWindow.clear()
outputWindow.refresh()
Alg1_Trial2 = testAlg1(stdscr,100,graphSolution2)
outputWindow.addstr(10,10,"TRIAL 2 OF ALGORITHM 2")
outputWindow.clear()
outputWindow.refresh()
Alg2_Trial2 = testAlg2(stdscr,100,graphSolution2)
outputWindow.addstr(10,10,"TRIAL 2 OF ALGORITHM 3")
outputWindow.clear()
outputWindow.refresh()
Alg3_Trial2 = testAlg3(stdscr,100,graphSolution2)
outputWindow.addstr(10,10,"TRIAL 2 OF ALGORITHM 4")
outputWindow.clear()
outputWindow.refresh()
Alg4_Trial2 = testAlg4(stdscr,100,graphSolution2)
graphSolution3 = GraphSolution(NUM_OF_NODES_IN_GRAPH,stdscr)
outputWindow.addstr(10,10,"TRIAL 3 OF ALGORITHM 1")
outputWindow.clear()
outputWindow.refresh()
Alg1_Trial3 = testAlg1(stdscr,100,graphSolution3)
outputWindow.addstr(10,10,"TRIAL 3 OF ALGORITHM 2")
outputWindow.clear()
outputWindow.refresh()
Alg2_Trial3 = testAlg2(stdscr,100,graphSolution3)
outputWindow.addstr(10,10,"TRIAL 3 OF ALGORITHM 3")
outputWindow.clear()
outputWindow.refresh()
Alg3_Trial3 = testAlg3(stdscr,100,graphSolution3)
outputWindow.addstr(10,10,"TRIAL 3 OF ALGORITHM 4")
outputWindow.clear()
outputWindow.refresh()
Alg4_Trial3 = testAlg4(stdscr,100,graphSolution3)
outputWindow.clear()
outputWindow.refresh()
outputWindow.addstr(3,3,"FINAL RESULTS")
outputWindow.addstr(4,3,"-------------")
outputWindow.addstr(6,3,"Algorithm 1")
outputWindow.addstr(7,3,"-----------")
outputWindow.addstr(8,3,"Trial 1: ")
outputWindow.addstr(8,13,str(Alg1_Trial1))
outputWindow.addstr(9,3,"Trial 2: ")
outputWindow.addstr(9,13,str(Alg1_Trial2))
outputWindow.addstr(10,3,"Trial 3: ")
outputWindow.addstr(10,13,str(Alg1_Trial3))
outputWindow.addstr(12,3,"Average: ")
outputWindow.addstr(12,13,str(round(((Alg1_Trial1+Alg1_Trial2+Alg1_Trial3)/3),2)))
outputWindow.addstr(14,3,"Algorithm 2")
outputWindow.addstr(15,3,"-----------")
outputWindow.addstr(16,3,"Trial 1: ")
outputWindow.addstr(16,13,str(Alg2_Trial1))
outputWindow.addstr(17,3,"Trial 2: ")
outputWindow.addstr(17,13,str(Alg2_Trial2))
outputWindow.addstr(18,3,"Trial 3: ")
outputWindow.addstr(18,13,str(Alg2_Trial3))
outputWindow.addstr(20,3,"Average: ")
outputWindow.addstr(20,13,str(round(((Alg2_Trial1+Alg2_Trial2+Alg2_Trial3)/3),2)))
outputWindow.addstr(22,3,"Algorithm 3")
outputWindow.addstr(23,3,"-----------")
outputWindow.addstr(24,3,"Trial 1: ")
outputWindow.addstr(24,13,str(Alg3_Trial1))
outputWindow.addstr(25,3,"Trial 2: ")
outputWindow.addstr(25,13,str(Alg3_Trial2))
outputWindow.addstr(26,3,"Trial 3: ")
outputWindow.addstr(26,13,str(Alg3_Trial3))
outputWindow.addstr(28,3,"Average: ")
outputWindow.addstr(28,13,str(round(((Alg3_Trial1+Alg3_Trial2+Alg3_Trial3)/3),2)))
outputWindow.addstr(30,3,"Algorithm 4")
outputWindow.addstr(31,3,"-----------")
outputWindow.addstr(32,3,"Trial 1: ")
outputWindow.addstr(32,13,str(Alg4_Trial1))
outputWindow.addstr(33,3,"Trial 2: ")
outputWindow.addstr(33,13,str(Alg4_Trial2))
outputWindow.addstr(34,3,"Trial 3: ")
outputWindow.addstr(34,13,str(Alg4_Trial3))
outputWindow.addstr(36,3,"Average: ")
outputWindow.addstr(36,13,str(round(((Alg4_Trial1+Alg4_Trial2+Alg4_Trial3)/3),2)))
stdscr.getch()
stdscr.clear()
stdscr.refresh()
def printMainMenu(outputWindow):
outputWindow.addstr(3,3,"Artificial Intelligence Research Project")
outputWindow.addstr(4,3,"----------------------------------------")
outputWindow.addstr(6,3,"By James Clisham")
outputWindow.addstr(10,3,"Enter one of the following keys to make a selection: ")
outputWindow.addstr(12,5,"1 - Run a test of a single algorithm")
outputWindow.addstr(13,5,"2 - Comparative Analysis of all algorithms")
outputWindow.addstr(14,5,"3 - Exit the program")
def singleAlgTestMenu(outputWindow):
outputWindow.addstr(38,3,"Enter one of the following keys to make a selection: ")
outputWindow.addstr(40,3,"1 - Test Algorithm 1")
outputWindow.addstr(41,3,"2 - Test Algorithm 2")
outputWindow.addstr(42,3,"3 - Test Algorithm 3")
outputWindow.addstr(43,3,"4 - Test Algorithm 4")
outputWindow.addstr(44,3,"5 - Return to Main Menu")
outputWindow.addstr(3,3,"Algorithm 1")
outputWindow.addstr(4,3,"-----------")
outputWindow.addstr(5,3,"* Random movement of ants")
outputWindow.addstr(6,3,"* New color of each node is chosen randomly")
outputWindow.addstr(7,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(9,3,"Algorithm 2")
outputWindow.addstr(10,3,"-----------")
outputWindow.addstr(11,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(12,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(13,3,"* New color of each node is chosen randomly")
outputWindow.addstr(14,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(15,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(16,3,"* All pheromone levels decay over time")
outputWindow.addstr(18,3,"Algorithm 3")
outputWindow.addstr(19,3,"-----------")
outputWindow.addstr(20,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(21,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(22,3,"* Pheromones are directly decayed each time an ant moves over a locally optimal node.")
outputWindow.addstr(23,3,"* New color of each node is chosen randomly")
outputWindow.addstr(24,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(25,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(26,3,"* All pheromone levels decay over time")
outputWindow.addstr(28,3,"Algorithm 4")
outputWindow.addstr(29,3,"-----------")
outputWindow.addstr(30,3,"* Pheromone-based and random movement of ants")
outputWindow.addstr(31,3,"* Pheromones are generated on the current node whenever a color is flipped")
outputWindow.addstr(32,3,"* Pheromones are directly decayed each time an ant moves over a locally optimal node.")
outputWindow.addstr(33,3,"* New color of each node is chosen by determing the locally best color")
outputWindow.addstr(34,3,"* Aware of the amount of local conflicts of current node")
outputWindow.addstr(35,3,"* Each node is given an initial pheromone level")
outputWindow.addstr(36,3,"* All pheromone levels decay over time")
#
#
# MAIN LOOP
#
#
# only executes if this file is being run itself, rather than being used as a library
if(__name__ == "__main__"):
# curses initialization
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
curses.start_color()
curses.curs_set(0)
# color initialization
curses.init_pair(1,curses.COLOR_RED,curses.COLOR_BLACK)
curses.init_pair(2,curses.COLOR_GREEN,curses.COLOR_BLACK)
curses.init_pair(3,curses.COLOR_BLUE,curses.COLOR_BLACK)
curses.init_pair(4,curses.COLOR_MAGENTA,curses.COLOR_BLACK)
# main while loop, ensures input is continually captured in main screen until correct input is found
mainLoopRunning = True
while(mainLoopRunning):
# clears and refreshes the screen
stdscr.clear()
stdscr.refresh()
# outputs the main menu text
printMainMenu(stdscr)
# waits for user input and captures whatever key is pressed
userInput = stdscr.getch()
if(userInput==ord('1')):
# ensures that input is continually captured until correct input is found
inSingleTestMenu = True
while(inSingleTestMenu):
# creates a new graph for whichever algorithm is chosen
singleGraphSolution = GraphSolution(NUM_OF_NODES_IN_GRAPH,stdscr)
stdscr.clear()
stdscr.refresh()
# outputs the single test menu text
singleAlgTestMenu(stdscr)
# waits for user input, captures keys, etc
userInput = stdscr.getch()
# statements handle what input is pressed
if(userInput==ord('1')):
stdscr.clear()
stdscr.refresh()
testAlg1(stdscr,1,singleGraphSolution)
elif(userInput==ord('2')):
stdscr.clear()
stdscr.refresh()
testAlg2(stdscr,1,singleGraphSolution)
elif(userInput==ord('3')):
stdscr.clear()
stdscr.refresh()
testAlg3(stdscr,1,singleGraphSolution)
elif(userInput==ord('4')):
stdscr.clear()
stdscr.refresh()
testAlg4(stdscr,1,singleGraphSolution)
elif(userInput==ord('5')):
stdscr.clear()
stdscr.refresh()
inSingleTestMenu = False
elif(userInput==ord('2')):
stdscr.clear()
stdscr.refresh()
comparativeTrialsOfAlgorithms(stdscr)
elif(userInput==ord('3')):
# resets the shell back to normal mode
curses.endwin()
exit()
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import *
def deconv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Create a transposed-convolutional layer with optional batch normalization """
# create a sequence of transpose + optional batch norm layers
## We don't need that in_channel in tensorflow
layers = []
transpose_conv_layer = Conv2DTranspose(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first")
# append transpose convolutional layer
layers.append(transpose_conv_layer)
if batch_norm:
# append batchnorm layer
layers.append(BatchNormalization())
## rtype: List[t_conv_layer, batch_norm] or List[t_conv_Layer]
return layers
class Generator(keras.Model):
## outputsize = stride * (inputsize - 1) + 2 * padding - kernelsize + 2. if padding == 1 than outputsize == inputsize. So we use padding = 'same' in tf
def __init__(self, z_size, conv_dim = 32):
## inherit init method from class Model in keras, if you have no idea with what inherit methods from
## parent model, please Google "super python"
super(Generator, self).__init__()
# complete init function
self.conv_dim = conv_dim
self.fc = Dense(conv_dim * 4 * 4 * 4, input_shape = (z_size,))
t_conv1 = deconv(conv_dim * 2, 4)
self.t_conv1 = t_conv1[0]
if len(t_conv1) == 2:
self.bn_1 = t_conv1[1]
t_conv2 = deconv(conv_dim, 4)
self.t_conv2 = t_conv2[0]
if len(t_conv2) == 2:
self.bn_2 = t_conv2[1]
# desired depth for RGB image is 3
## output here is in CHW format
self.t_conv3 = deconv(3, 4, batch_norm = False)[0]
def call(self, xx, training = None):
# call in tf is an equivalent with forward in torch
out = self.fc(xx)
out = tf.reshape(out, [-1, self.conv_dim * 4, 4, 4])
out = self.t_conv1(out)
if self.bn_1:
out = self.bn_1(out, training = training)
out = tf.nn.relu(out)
out = self.t_conv2(out)
if self.bn_2:
out = self.bn_2(out, training = training)
out = tf.nn.relu(out)
out = self.t_conv3(out)
out = tf.tanh(out)
## to HWC format
## Time complexity of numpy.transpose is O(1), according to: https://www.thetopsites.net/article/58279082.shtml
# out = tf.transpose(out, perm = [0, 3, 1, 2])
return out
def conv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a convolutional layer, with optional batch normalization.
"""
layers = []
conv_layer = Conv2D(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first")
# bias is set to False, so the layers are not offset by any amount
# append conv layer
layers.append(conv_layer)
if batch_norm:
# append batchnorm layer
layers.append(BatchNormalization())
## rtype: List[conv_layer, batch_norm] or List[conv_layer]
return layers
class Discriminator(keras.Model):
## outputsize = (inputsize - kernelsize + 2 * padding)/stride + 1, so when stride = 2, kernel_size = 4. if padding == 1 than outputsize == inputsize. So we use padding = 'same' in tf
## if you want to custom padding size, please read helper here https://stackoverflow.com/questions/37659538/custom-padding-for-convolutions-in-tensorflow
## tf.pad is still available in tf 2.0+
## you can also create a sequence and use sequence.add(layer) to add layers to model, see the tutorial here:
## https://www.tensorflow.org/tutorials/generative/dcgan
def __init__(self, conv_dim=32):
super(Discriminator, self).__init__()
self.conv_dim = conv_dim
self.conv1 = conv(conv_dim, 4, batch_norm= False)[0]
conv2 = conv(conv_dim * 2, 4)
self.conv2 = conv2[0]
if len(conv2) == 2:
self.bn_1 = conv2[1]
conv3 = conv(conv_dim * 4, 4)
self.conv3 = conv3[0]
if len(conv3) == 2:
self.bn_2 = conv3[1]
self.flatten = Flatten()
self.fc = Dense(1)
def call(self, xx, training = None):
out = self.conv1(xx)
out = tf.nn.leaky_relu(out, alpha = 0.2)
out = self.conv2(out)
if self.bn_1:
out = self.bn_1(out, training = training)
out = tf.nn.leaky_relu(out, alpha = 0.2)
out = self.conv3(out)
if self.bn_2:
out = self.bn_2(out, training = training)
out = tf.nn.leaky_relu(out, alpha = 0.2)
out = self.flatten(out)
out = self.fc(out)
return out
def real_loss(D_out, smooth=False):
batch_size = D_out.shape[0]
# label smoothing
if smooth:
# smooth, real labels = 0.9
labels = tf.ones(batch_size) * 0.9
else:
labels = tf.ones(batch_size) # real labels = 1
## Reference 1: https://stackoverflow.com/questions/55683729/bcewithlogitsloss-in-keras
## Reference 2: https://www.tensorflow.org/tutorials/generative/dcgan
## So we use BinaryCrossentropy here in tf to replace BCEWithLogitsLoss() in torch
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True)
loss = criterion(labels, D_out)
return loss
def fake_loss(D_out):
batch_size = D_out.shape[0]
labels = tf.zeros(batch_size) # fake labels = 0
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# calculate loss
loss = criterion(labels, D_out)
return loss
## I put in the loss calculation here instead of main function
def dis_loss(generator, discriminator, input_noise, real_image, is_training):
fake_image = generator(input_noise, is_training)
d_real_logits = discriminator(real_image, is_training)
d_fake_logits = discriminator(fake_image, is_training)
d_loss_real = real_loss(d_real_logits)
d_loss_fake = fake_loss(d_fake_logits)
loss = d_loss_real + d_loss_fake
return loss
def gen_loss(generator, discriminator, input_noise, is_training):
fake_image = generator(input_noise, is_training)
fake_loss = discriminator(fake_image, is_training)
loss = real_loss(fake_loss)
return loss |
'''
The following iterative sequence is defined for the set of positive integers:
n -> n/2 (n is even)
n -> 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 -> 40 -> 20 -> 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
'''
numbers = range(1, 1000000)
max_length = 0
max_target_number = 1
for x in numbers:
temp_target_number = x
temp_length = 0
while temp_target_number != 1:
if temp_target_number % 2 == 0:
temp_target_number = temp_target_number / 2
else:
temp_target_number = temp_target_number * 3 + 1
temp_length = temp_length + 1
if temp_length > max_length:
max_length = temp_length
max_target_number = x
print(max_target_number, max_length)
|
#Feature engineering functions for Bike sharing project
import pandas as pd
import numpy as np
import math
from sklearn.preprocessing import MinMaxScaler, StandardScaler, FunctionTransformer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.metrics import accuracy_score
# feature engineer training set data
def fe_train(data):
''' implement transformer workflow e.g.
transformer = StandardScaler()
transformer.fit(Xtrain[['desired_column']])
transformer.transform(X_train[['desired_column]])
'''
# drop correlated columns atemp/temp and season/weather
data_temp = data.drop(columns = ['season', 'holiday','workingday','humidity', 'weather', 'atemp'])
# add columns with the hours as sin/cos combination to capture cyclic nature of the hours to include hours in model training
data_temp['sin_time'] = np.sin(2*np.pi*data.index.hour/24)
data_temp['cos_time'] = np.cos(2*np.pi*data.index.hour/24)
c_transformer = ColumnTransformer([
('pass', 'passthrough', ['sin_time', 'cos_time']),
('scale1', StandardScaler(), [ 'temp']),
('scale3', StandardScaler(), [ 'windspeed']),
])
data_fe = pd.DataFrame(c_transformer.fit_transform(data_temp), index = data.index, columns = ['sin_time', 'cos_time', 'temp', 'windspeed'])
return data_fe
def fe_test(data2):
# drop correlated columns atemp/temp and season/weather
data2_temp = data2.drop(columns = ['season', 'holiday','workingday','humidity', 'weather', 'atemp'])
# add columns with the hours as sin/cos combination to capture cyclic nature of the hours to include hours in model training
data2_temp['sin_time'] = np.sin(2*np.pi*data2.index.hour/24)
data2_temp['cos_time'] = np.cos(2*np.pi*data2.index.hour/24)
c_transformer = ColumnTransformer([
('pass', 'passthrough', ['sin_time', 'cos_time']),
('scale1', StandardScaler(), [ 'temp']),
('scale3', StandardScaler(), [ 'windspeed']),
])
data2_fe = pd.DataFrame(c_transformer.fit_transform(data2_temp), index = data2.index, columns = ['sin_time', 'cos_time', 'temp', 'windspeed'])
return data2_fe
def prediction_to_csv(testdata, model):
ytarget_pred = model.predict(testdata)
submission = pd.DataFrame(ytarget_pred, index=testdata.index, columns=['count'])
submission.loc[submission["count"] < 0, "count"] = 0
submission.to_csv('data/bike-sharing-demand/bikeshare_submission.csv')
|
import discord, datetime, parsing as tool, pytz, asyncio
from discord.ext import tasks, commands
def status():
print(f"now running {asyncio.get_running_loop()}")
class MyCog(commands.Cog):
def __init__(self):
self.channels = dict()
self.info = [dict() for _ in range(4)]
self.prev_date = "22.02.09"
self.notice.start()
@tasks.loop(minutes=3)
async def notice(self):
where = ["๋ฐฑ๋ง ๊ด์ฅ", "ํ์ฌ ๊ณต์ง", "์ผ๋ฐ ์์", "์ฌ์
๋จ ์์"]
KST = pytz.timezone("Asia/Seoul")
withtime = str(datetime.datetime.now(KST)).replace("-", ".")[2:].split()
date = withtime[0]
time = withtime[1].split(".")[0]
if date != self.prev_date:
self.info = [dict() for _ in range(4)]
self.prev_date = date
uploaded = []
for i in range(4):
# ret[0] has a value that how many posts are uploaded in today
ret, cnt = tool.what_you_want(i, date), 0
temp = discord.Embed(title=where[i], description=ret[0], color=0x62c1cc)
uploaded.append(str(ret[0]).strip())
for j in range(1, len(ret)):
title = ret[j][1]
if title in self.info[i]:
continue
cnt += 1
self.info[i][title] = 1
title = str(ret[j][0] + " " + ret[j][1])
temp.add_field(name=title, value=ret[j][-1], inline=False)
await ch.channel.send("", embed=temp)
if cnt:
for ch in self.channels:
print(f"send to : {ch.guild, ch.id}")
await ch.send("", embed=temp)
print(f"Update : {uploaded}")
|
import logging
import requests
from .api_endpoints import API_ENDPOINTS
LOG = logging.getLogger(__name__)
# TODO: create nice docs
def requests_url(request, url):
access_token = request.session.get("access_token")
authorization_header = {"Authorization": "Bearer {}".format(access_token)}
resp = requests.get(url, headers=authorization_header)
return resp.json()
def get_new_releases(request): # pragma: no cover
url = API_ENDPOINTS["new_releases"]
results = requests_url(request, url)
return results["albums"]["items"]
def get_user_recently_played(request): # pragma: no cover
url = API_ENDPOINTS["user_recently_played"]
results = requests_url(request, url)
return results["items"]["track"]
def get_album(request, album_id): # pragma: no cover
url = API_ENDPOINTS["album"] + album_id
return requests_url(request, url)
def get_track(request, track_id): # pragma: no cover
url = API_ENDPOINTS["track"] + track_id
return requests_url(request, url)
def get_track_audio_features(request, track_id): # pragma: no cover
url = API_ENDPOINTS["track_audio_feature"] + track_id
return requests_url(request, url)
def get_search_results(request, searching): # pragma: no cover
url = API_ENDPOINTS["search"][0] + searching + API_ENDPOINTS["search"][1]
results = requests_url(request, url)
artists = results['artists']['items']
found_total = results['artists']['total']
return artists, found_total
def get_artist(request, artist_id): # pragma: no cover
url = API_ENDPOINTS['artist'] + artist_id
return requests_url(request, url)
def get_artist_and_albums(request, artist_id): # pragma: no cover
url = API_ENDPOINTS["artist_albums"][0] + artist_id + API_ENDPOINTS["artist_albums"][1]
artist_name = get_artist(request, artist_id)['name']
results = requests_url(request, url)
return artist_name, results["items"]
|
import os
from datetime import datetime
from app import db
from app import filesystem
from app.utils import get_random_string
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
dropbox_id = db.Column(db.Integer)
name = db.Column(db.String(80))
email = db.Column(db.String(120))
emailer = db.Column(db.String(120), unique=True)
added_bookmarklet = db.Column(db.Boolean)
uploaded_welcome_pdf = db.Column(db.Boolean)
active = db.Column(db.Boolean)
access_token = db.Column(db.Text)
cursor = db.Column(db.Text)
# @kindle.com email addresses.
kindle_names = db.relationship('KindleName', backref='user', lazy='dynamic',
cascade='delete')
# Hashes of the user's current books.
books = db.relationship('Book', backref='user', lazy='dynamic',
cascade='delete')
def __init__(self, dropbox_id):
self.dropbox_id = dropbox_id
self.added_bookmarklet = False
self.uploaded_welcome_pdf = False
self.active = False
def set_active(self, active):
self.active = active
def set_new_emailer(self):
random_base = get_random_string()
emailer_address = 'mailer+%s@mail.getbookdrop.com' % random_base
self.emailer = emailer_address
return random_base
def set_added_bookmarklet(self):
self.added_bookmarklet = True
def set_uploaded_welcome_pdf(self):
self.uploaded_welcome_pdf = True
class Book(db.Model):
__tablename__ = 'book'
id = db.Column(db.Integer, primary_key=True)
book_hash = db.Column(db.Text)
pathname = db.Column(db.Text)
size = db.Column(db.Integer)
unsent = db.Column(db.Boolean)
num_attempts = db.Column(db.Integer)
date_created = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, user_id, pathname, size, unsent=False, book_hash=''):
self.user_id = user_id
self.pathname = pathname
self.book_hash = book_hash
self.size = size
# Books are always unsent at first
self.mark_unsent(True)
self.num_attempts = 0
if self.date_created is None:
self.date_created = datetime.utcnow()
def __repr__(self):
return self.__str__()
def __str__(self):
return "<Book: {0}>".format(self.pathname)
def mark_unsent(self, unsent):
self.unsent = unsent
def get_size(self):
if self.size is None:
return 0
else:
return self.size
def get_tmp_pathname(self, tag):
return os.path.join(filesystem.get_user_directory(self.user_id, tag),
self.pathname.strip('/'))
class KindleName(db.Model):
__tablename__ = 'kindle_name'
id = db.Column(db.Integer, primary_key=True)
kindle_name = db.Column(db.String(120))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, user_id, kindle_name):
self.user_id = user_id
self.kindle_name = kindle_name
def __repr__(self):
return self.__str__()
def __str__(self):
return "<KindleName: {0}>".format(self.kindle_name)
|
from typing import List, Optional
from fastapi import Path, Depends, APIRouter
from starlette.responses import Response, RedirectResponse
from sqlalchemy.ext.asyncio import AsyncSession
from pol import sa, res, wiki
from pol.res import ErrorDetail, not_found_exception
from pol.utils import subject_images
from pol.config import CACHE_KEY_PREFIX
from pol.router import ErrorCatchRoute
from pol.depends import get_db, get_redis
from pol.db.const import Gender, get_character_rel
from pol.db.tables import (
ChiiPerson,
ChiiSubject,
ChiiCharacter,
ChiiPersonField,
ChiiCrtCastIndex,
ChiiCrtSubjectIndex,
)
from pol.api.v0.utils import person_images
from pol.api.v0.models import RelatedSubject, CharacterDetail, CharacterPerson
from pol.redis.json_cache import JSONRedis
router = APIRouter(tags=["่ง่ฒ"], route_class=ErrorCatchRoute)
api_base = "/v0/characters"
@router.get(
"/characters/{character_id}",
description="cache with 60s",
response_model=CharacterDetail,
responses={
404: res.response(model=ErrorDetail),
},
)
async def get_character_detail(
response: Response,
db: AsyncSession = Depends(get_db),
not_found: res.HTTPException = Depends(not_found_exception),
character_id: int = Path(..., gt=0),
redis: JSONRedis = Depends(get_redis),
):
cache_key = CACHE_KEY_PREFIX + f"character:{character_id}"
if value := await redis.get_with_model(cache_key, CharacterDetail):
response.headers["x-cache-status"] = "hit"
return value
character: Optional[ChiiCharacter] = await db.scalar(
sa.select(ChiiCharacter).where(ChiiCharacter.crt_id == character_id).limit(1)
)
if character is None:
raise not_found
if character.crt_redirect:
return RedirectResponse(f"{api_base}/{character.crt_redirect}")
if character.crt_ban:
raise not_found
data = {
"id": character.crt_id,
"name": character.crt_name,
"type": character.crt_role,
"summary": character.crt_summary,
"images": person_images(character.crt_img),
"locked": character.crt_lock,
"stat": {
"comments": character.crt_comment,
"collects": character.crt_collects,
},
}
field = await db.get(ChiiPersonField, character_id)
if field is not None:
if field.gender:
data["gender"] = Gender(field.gender).str()
data["blood_type"] = field.bloodtype or None
data["birth_year"] = field.birth_year or None
data["birth_mon"] = field.birth_mon or None
data["birth_day"] = field.birth_day or None
try:
data["infobox"] = wiki.parse(character.crt_infobox).info
except wiki.WikiSyntaxError: # pragma: no cover
pass
response.headers["x-cache-status"] = "miss"
await redis.set_json(cache_key, value=data, ex=60)
return data
@router.get(
"/characters/{character_id}/subjects",
summary="get character related subjects",
response_model=List[RelatedSubject],
responses={
404: res.response(model=ErrorDetail),
},
)
async def get_person_subjects(
db: AsyncSession = Depends(get_db),
not_found: res.HTTPException = Depends(not_found_exception),
character_id: int = Path(..., gt=0),
):
character: Optional[ChiiCharacter] = await db.scalar(
sa.select(ChiiCharacter)
.options(
sa.selectinload(ChiiCharacter.subjects).joinedload(
ChiiCrtSubjectIndex.subject
)
)
.where(ChiiCharacter.crt_id == character_id, ChiiCharacter.crt_ban == 0)
.limit(1)
)
if character is None:
raise not_found
subjects = []
for s in character.subjects:
if v := subject_images(s.subject.subject_image):
image = v["grid"]
else:
image = None
subjects.append(
{
"id": s.subject_id,
"name": s.subject.subject_name,
"name_cn": s.subject.subject_name_cn,
"staff": get_character_rel(s.crt_type),
"image": image,
}
)
return subjects
@router.get(
"/characters/{character_id}/persons",
summary="get character related persons",
response_model=List[CharacterPerson],
responses={
404: res.response(model=ErrorDetail),
},
)
async def get_character_persons(
db: AsyncSession = Depends(get_db),
not_found: res.HTTPException = Depends(not_found_exception),
character_id: int = Path(..., gt=0),
):
character: Optional[ChiiCharacter] = await db.scalar(
sa.select(ChiiCharacter)
.where(ChiiCharacter.crt_id == character_id, ChiiCharacter.crt_ban == 0)
.limit(1)
)
if character is None:
raise not_found
query = (
sa.select(
ChiiCrtCastIndex.crt_id,
ChiiCrtCastIndex.prsn_id,
ChiiPerson.prsn_name,
ChiiPerson.prsn_type,
ChiiPerson.prsn_img,
ChiiSubject.subject_id,
ChiiSubject.subject_name,
ChiiSubject.subject_name_cn,
)
.distinct()
.join(ChiiPerson, ChiiPerson.prsn_id == ChiiCrtCastIndex.prsn_id)
.join(ChiiSubject, ChiiSubject.subject_id == ChiiCrtCastIndex.subject_id)
.where(
ChiiCrtCastIndex.crt_id == character.crt_id,
ChiiPerson.prsn_ban == 0,
)
)
persons = [
{
"id": r["prsn_id"],
"name": r["prsn_name"],
"type": r["prsn_type"],
"images": person_images(r["prsn_img"]),
"subject_id": r["subject_id"],
"subject_name": r["subject_name"],
"subject_name_cn": r["subject_name_cn"],
}
for r in (await db.execute(query)).mappings().fetchall()
]
return persons
|
import argparse
import copy
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cogdl.data import Dataset
from cogdl.models.supervised_model import (
SupervisedHomogeneousNodeClassificationModel,
)
from cogdl.trainers.supervised_model_trainer import SupervisedHomogeneousNodeClassificationTrainer
from cogdl.utils.self_auxiliary_task import (
EdgeMask,
PairwiseDistance,
Distance2Clusters,
PairwiseAttrSim,
Distance2ClustersPP,
)
from . import register_trainer
@register_trainer("self_auxiliary_task")
class SelfAuxiliaryTaskTrainer(SupervisedHomogeneousNodeClassificationTrainer):
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""Add trainer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--auxiliary-task', default="none", type=str)
parser.add_argument('--alpha', default=10, type=float)
parser.add_argument('--label-mask', default=0, type=float)
# fmt: on
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
def __init__(self, args):
self.device = args.device_id[0] if not args.cpu else "cpu"
self.patience = args.patience
self.max_epoch = args.max_epoch
self.lr = args.lr
self.weight_decay = args.weight_decay
self.auxiliary_task = args.auxiliary_task
self.hidden_size = args.hidden_size
self.alpha = args.alpha
self.label_mask = args.label_mask
def resplit_data(self, data):
trained = torch.where(data.train_mask)[0]
perm = np.random.permutation(trained.shape[0])
preserve_nnz = int(len(perm) * (1 - self.label_mask))
preserved = trained[perm[:preserve_nnz]]
masked = trained[perm[preserve_nnz:]]
data.train_mask = torch.full((data.train_mask.shape[0],), False, dtype=torch.bool)
data.train_mask[preserved] = True
data.test_mask[masked] = True
def fit(self, model: SupervisedHomogeneousNodeClassificationModel, dataset: Dataset):
# self.resplit_data(dataset.data)
self.data = dataset.data
self.original_data = dataset.data
self.data.apply(lambda x: x.to(self.device))
self.original_data.apply(lambda x: x.to(self.device))
if self.auxiliary_task == "edgemask":
self.agent = EdgeMask(self.data.edge_index, self.data.x, self.hidden_size, self.device)
elif self.auxiliary_task == "pairwise-distance":
self.agent = PairwiseDistance(self.data.edge_index, self.data.x, self.hidden_size, 3, self.device)
elif self.auxiliary_task == "distance2clusters":
self.agent = Distance2Clusters(self.data.edge_index, self.data.x, self.hidden_size, 30, self.device)
elif self.auxiliary_task == "pairwise-attr-sim":
self.agent = PairwiseAttrSim(self.data.edge_index, self.data.x, self.hidden_size, 5, self.device)
elif self.auxiliary_task == "distance2clusters++":
self.agent = Distance2ClustersPP(
self.data.edge_index, self.data.x, self.data.y, self.hidden_size, 5, 1, self.device
)
else:
raise Exception(
"auxiliary task must be edgemask, pairwise-distance, distance2clusters, distance2clusters++ or pairwise-attr-sim"
)
self.model = model
self.optimizer = torch.optim.Adam(
list(model.parameters()) + list(self.agent.linear.parameters()), lr=self.lr, weight_decay=self.weight_decay
)
self.model.to(self.device)
epoch_iter = tqdm(range(self.max_epoch))
best_score = 0
best_loss = np.inf
max_score = 0
min_loss = np.inf
for epoch in epoch_iter:
if self.auxiliary_task == "distance2clusters++" and epoch % 40 == 0:
self.agent.update_cluster()
self._train_step()
train_acc, _ = self._test_step(split="train")
val_acc, val_loss = self._test_step(split="val")
epoch_iter.set_description(f"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}")
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= best_loss: # and val_acc >= best_score:
best_loss = val_loss
best_score = val_acc
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss))
max_score = np.max((max_score, val_acc))
print(f"Valid accurracy = {best_score}")
return best_model
def _train_step(self):
with self.data.local_graph():
self.data.edge_index, self.data.x = self.agent.transform_data()
self.model.train()
self.optimizer.zero_grad()
embeddings = self.model.get_embeddings(self.data)
loss = self.model.node_classification_loss(self.data) + self.alpha * self.agent.make_loss(embeddings)
loss.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.data = self.original_data
self.model.eval()
if split == "train":
mask = self.data.train_mask
elif split == "val":
mask = self.data.val_mask
else:
mask = self.data.test_mask
logits = self.model.predict(self.data)
loss = F.nll_loss(logits[mask], self.data.y[mask]).item()
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
return acc, loss
|
# Time complexity: O(len(n)!)
# Approach: Sort the initial array and run the nextPermute algorithm till it is completely converted in non-increasing.
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
nums = sorted(nums)
n = len(nums)
if n==1:
return [nums]
ans = [nums.copy()]
while True:
i = n-2
while i>=0 and nums[i]>=nums[i+1]:
i -= 1
if i<0:
break
j = n-1
while j>i and nums[j]<=nums[i]:
j-=1
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
nums[i+1:] = nums[i+1:][::-1]
ans.append(nums.copy())
return ans |
import re
from dataclasses import dataclass
from datetime import timedelta
from typing import Optional
HHHMMSS = r"(?P<hours>[0-9]+([,.][0-9]+)?)"
def parse_isoformatDuration(durationString: str) -> timedelta:
pass
def parse_HHMMSS(
duration_string: str, hm_separator: str = ":", ms_separator: str = ":"
) -> timedelta:
"""
r"(?P<hours>[0-9]+([,.][0-9]+)?)(:(?P<minutes>[0-6][0-9])(:(?P<seconds>[0-6][0-9](\.[0-9]+)?))?)?"
"""
check_is_string(duration_string, "duration_string")
check_is_string(hm_separator, "hm_separator")
check_is_string(ms_separator, "ms_separator")
HHHMMSS = (
r"^(?P<hours>[0-9]+([,.][0-9]+)?)("
+ hm_separator
+ r"(?P<minutes>[0-6][0-9])("
+ ms_separator
+ r"(?P<seconds>[0-6][0-9](\.[0-9]+)?))?)?$"
)
pattern = re.compile(HHHMMSS)
result = pattern.match(duration_string)
if not result:
raise ValueError(
f"{duration_string} does not match pattern HHHMMSS with hours-minutes separator {hm_separator} and minutes-seconds separator {ms_separator}"
)
hours_string = result.group("hours") or "0"
hours_string = hours_string.replace(",", "")
hours_string = hours_string.replace(".", "")
hours = int(hours_string)
minutes = int(result.group("minutes") or "0")
seconds_string = result.group("seconds") or "0"
seconds = float(seconds_string)
return timedelta(hours=hours, minutes=minutes, seconds=seconds)
def HHHMMSS_to_seconds_int(
duration_string: str, hm_separator: str = ":", ms_separator: str = ":"
) -> int:
"""Convenience method to convert a duration string to seconds.
"""
parsed_value = parse_HHMMSS(
duration_string=duration_string,
hm_separator=hm_separator,
ms_separator=ms_separator,
)
return int(parsed_value.total_seconds())
# FIXME move to arg checking lib?
def check_is_string(value: str, arg_name: Optional[str] = None):
if not isinstance(value, str):
raise TypeError(
f"With arg: {arg_name} expected a string, got {value} with type: {type(value)}"
)
# def parse_HHdotMM_To_timedelta(durationString: str, separator: str = ".") -> timedelta:
# """
# parses a string in the format "34.23", assuming HH.MM
# """
# hours, minutes = durationString.split(separator)
# hours, minutes = map(int, (hours, minutes)) # type: ignore
# return timedelta(hours=hours, minutes=minutes) # type: ignore
# def parse_HHscMM_to_timedelta(duration_string: str):
# result = parse_HHdotMM_To_timedelta(duration_string, ":")
# return result
@dataclass
class TimeDeltaSplit:
days: int = 0
hours: int = 0
minutes: int = 0
seconds: int = 0
microseconds: int = 0
def timedelta_split(timeDelta: timedelta) -> TimeDeltaSplit:
int_seconds = 0
if timeDelta.days:
int_seconds = int_seconds + (abs(timeDelta.days) * 86400)
if timeDelta.seconds:
int_seconds = int_seconds + timeDelta.seconds
minutes, seconds = divmod(int_seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
microseconds = timeDelta.microseconds
return TimeDeltaSplit(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
)
def timeDelta_TO_HHMMSS(
timeDelta: timedelta,
hm_separator: str = ":",
ms_separator: str = ":",
timespec=None,
):
timeSplit = timedelta_split(timeDelta)
totalHours = (timeSplit.days * 24) + timeSplit.hours
if timespec == "M":
return f"{totalHours}{hm_separator}{timeSplit.minutes:02d}"
if timeSplit.microseconds:
decimalSecondsString = f".{timeSplit.microseconds:06d}"
else:
decimalSecondsString = ""
return f"{totalHours}{hm_separator}{timeSplit.minutes:02d}{ms_separator}{timeSplit.seconds}{decimalSecondsString}"
def timedelta_To_isoformat(timeDelta: timedelta, strict=True) -> str:
"""
if strict then limit output fields to PddDThhHmmMss.sS # Not implemeted
"""
# int_seconds = 0
# if timeDelta.days:
# int_seconds = int_seconds + (abs(timeDelta.days)*86400)
# if timeDelta.seconds:
# int_seconds = int_seconds + timeDelta.seconds
# minutes, seconds = divmod(int_seconds, 60)
# hours, minutes = divmod(minutes, 60)
# days, hours = divmod(hours, 24)
# microseconds = timeDelta.microseconds
timeSplit = timedelta_split(timeDelta)
daystext = hourstext = minutestext = secondstext = microtext = ""
if timeSplit.days:
daystext = f"{timeSplit.days}D"
if timeSplit.hours:
hourstext = f"{timeSplit.hours}H"
if timeSplit.minutes:
minutestext = f"{timeSplit.minutes}M"
if timeSplit.microseconds:
if not timeSplit.seconds:
timeSplit.seconds = 0
microtext = f".{timeSplit.microseconds:06d}"
if timeSplit.seconds or timeSplit.microseconds:
secondstext = f"{timeSplit.seconds}{microtext}S"
if not (
timeSplit.hours
or timeSplit.minutes
or timeSplit.seconds
or timeSplit.microseconds
):
secondstext = f"{timeSplit.seconds}S"
isoString = f"P{daystext}T{hourstext}{minutestext}{secondstext}"
return isoString
def duration_to_seconds_int(
days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0
) -> int:
daySeconds = days * 24 * 60 * 60
hourSeconds = hours * 60 * 60
minuteSeconds = minutes * 60
return daySeconds + hourSeconds + minuteSeconds + seconds
|
import asyncio
from aiohttp import web
from aiohttp_basicauth_middleware import basic_auth_middleware
def hello(request):
return web.Response(text='Hello')
def get_app(loop, auth_dict=None, strategy=lambda x: x):
if auth_dict is None:
auth_dict = {}
app = web.Application(loop=loop)
app.router.add_route('GET', '/hello', hello)
app.router.add_route('GET', '/admin/hello', hello)
app.middlewares.append(
basic_auth_middleware(('/admin',), auth_dict, strategy)
)
return app
if __name__ == '__main__':
loop = asyncio.get_event_loop()
web.run_app(get_app(loop))
|
#!/usr/bin/env python
"""
TODO: Modify unittest doc.
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "4/10/14"
import unittest2 as unittest
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.groups import PointGroup, SpaceGroup
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.patterson_symmetry, "Fm-3m")
self.assertEqual(sg.point_group, "m-3m")
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.randint(0, 100 + 1, size=(3,)) / 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3mH")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
def test_subgroup_supergroup(self):
self.assertTrue(SpaceGroup('Pma2').is_subgroup(SpaceGroup('Pccm')))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(
SpaceGroup.from_int_number(230)))
if __name__ == '__main__':
unittest.main()
|
class IContentHost:
""" This interface is implemented by layouts which host System.Windows.ContentElement. """
def GetRectangles(self,child):
"""
GetRectangles(self: IContentHost,child: ContentElement) -> ReadOnlyCollection[Rect]
Returns a collection of bounding rectangles for a child element.
child: The child element that the bounding rectangles are returned for.
Returns: A collection of bounding rectangles for a child element.
"""
pass
def InputHitTest(self,point):
"""
InputHitTest(self: IContentHost,point: Point) -> IInputElement
Performs hit-testing for child elements.
point: Mouse coordinates relative to the ContentHost.
Returns: A descendant of System.Windows.IInputElement,or NULL if no such element exists.
"""
pass
def OnChildDesiredSizeChanged(self,child):
"""
OnChildDesiredSizeChanged(self: IContentHost,child: UIElement)
Called when a System.Windows.UIElement-derived class which is hosted by a
System.Windows.IContentHost changes its System.Windows.UIElement.DesiredSize.
child: Child element whose System.Windows.UIElement.DesiredSize has changed
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
HostedElements=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an enumeration containing all descendant System.Windows.ContentElement-derived classes,as well as all System.Windows.UIElement-derived classes that are a direct descendant of the System.Windows.IContentHost or one of its descendant System.Windows.ContentElement classes.
Get: HostedElements(self: IContentHost) -> IEnumerator[IInputElement]
"""
|
import logging
logger = logging.getLogger('Handler')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class Handler:
def does_supports(self, action_name, intent, ctx):
return False
def find_response(action_name, intent, ctx):
logger.debug(["Handler", ctx])
return ["cmd_not_clear"]
class NameUnderstoodHandler(Handler):
def does_supports(self, action_name, intent, ctx):
return action_name == "action_check_name"
def find_response(self, action_name, intent, ctx):
logger.debug("[NameUnderstoodHandler]Check if person_name exists in : {}".format(intent.entities))
if(not "person_name" in intent.entities):
return ["cmd_not_clear"]
class DontTravelHandler(Handler):
def does_supports(self, action_name, intent, ctx):
return action_name == "action_dont_travel"
def find_response(self, action_name, intent, ctx):
prev_intent = ctx.previous_intents[-1].key
logger.debug("[DontTravelHandler]previous intents : {}".format(prev_intent))
if(prev_intent=="deny" or prev_intent == "dont_know"):
return ["utter_please_comeback", "utter_bye"]
return ["utter_encourage"]
class TranportationOptionsHandler(Handler):
def does_supports(self, action_name, intent, ctx):
return action_name == "action_transportation_options"
def find_response(self, action_name, intent, ctx):
# print(">>>>>>>>>>>>>>>> TranportationOptionsHandler" + str(intent))
return []
class IdentifyFirstOstacleHandler(Handler):
def does_supports(self, action_name, intent, ctx):
return action_name == "action_identify_first_obstacle"
def find_response(self, action_name, intent, ctx):
if(not "first_obstacle" in intent.entities):
return ["cmd_not_clear"]
first_obstacle = intent.entities["first_obstacle"]
# print(">>>>>>>>>>>>>>>> IdentifyFirstOstacleHandler: " + str(first_obstacle=="jลซra"))
if(first_obstacle=="bala"):
return ["utter_no_swamp_in_map","cmd_utterance_reverted"]
elif(first_obstacle=="vandenynas"):
return ["utter_see_too_big","cmd_utterance_reverted"]
elif(first_obstacle=="jลซra"):
return ["utter_see_too_big","cmd_utterance_reverted"]
return []
class SolveSecondChallangeHandler(Handler):
def does_supports(self, action_name, intent, ctx):
return action_name == "action_identify_second_chalange"
def find_response(self, action_name, intent, ctx):
if(not "second_chalange" in intent.entities):
return ["cmd_not_clear"]
second_chalange = intent.entities["second_chalange"]
# print(">>>>>>>>>>>>>>>> SolveSecondChallangeHandler: " + str(second_chalange))
if(second_chalange=="burlaiviu"):
return ["cmd_restart_dialog"]
elif(second_chalange=="plaustu"):
return ["utter_manpower_boat_too_slow","cmd_utterance_reverted"]
elif(second_chalange=="irklente"):
return ["utter_manpower_boat_too_slow","cmd_utterance_reverted"]
elif(second_chalange=="motoru"):
return ["utter_motor_boat_requires_maintainances","cmd_utterance_reverted"]
return []
|
from .knowledge import *
class HebbianKnowledge(Knowledge):
def __init__(self):
pass
def store_all_knowledge(self):
pass
|
"""Function to work on a population in dynamic mode."""
import sys
from redis import StrictRedis
import cloudpickle as pickle
from time import sleep, time
import logging
from ..util import any_particle_preliminary
from .cmd import (
N_EVAL, N_ACC, N_REQ, N_FAIL, ALL_ACCEPTED, N_WORKER, N_LOOKAHEAD_EVAL,
SSA, QUEUE, BATCH_SIZE, IS_LOOK_AHEAD, ANALYSIS_ID, MAX_N_EVAL_LOOK_AHEAD,
SLEEP_TIME, DONE_IXS, idfy)
from .cli import KillHandler
logger = logging.getLogger("ABC.Sampler")
def work_on_population_dynamic(
analysis_id: str,
t: int,
redis: StrictRedis,
catch: bool,
start_time: float,
max_runtime_s: float,
kill_handler: KillHandler):
"""Work on population in dynamic mode.
Here the actual sampling happens.
"""
# short-form
ana_id = analysis_id
def get_int(var: str):
"""Convenience function to read an int variable."""
return int(redis.get(idfy(var, ana_id, t)).decode())
# set timers
population_start_time = time()
cumulative_simulation_time = 0
# read from pipeline
pipeline = redis.pipeline()
# extract bytes
(ssa_b, batch_size_b, all_accepted_b, is_look_ahead_b,
max_eval_look_ahead_b) = (
pipeline.get(idfy(SSA, ana_id, t))
.get(idfy(BATCH_SIZE, ana_id, t))
.get(idfy(ALL_ACCEPTED, ana_id, t))
.get(idfy(IS_LOOK_AHEAD, ana_id, t))
.get(idfy(MAX_N_EVAL_LOOK_AHEAD, ana_id, t)).execute())
# if the ssa object does not exist, something went wrong, return
if ssa_b is None:
return
# notify sign up as worker
n_worker = redis.incr(idfy(N_WORKER, ana_id, t))
logger.info(
f"Begin generation {t}, I am worker {n_worker}")
# only allow stopping the worker at particular points
kill_handler.exit = False
# convert from bytes
simulate_one, sample_factory = pickle.loads(ssa_b)
batch_size = int(batch_size_b.decode())
all_accepted = bool(int(all_accepted_b.decode()))
is_look_ahead = bool(int(is_look_ahead_b.decode()))
max_n_eval_look_ahead = float(max_eval_look_ahead_b.decode())
# counter for number of simulations
internal_counter = 0
# create empty sample
sample = sample_factory(is_look_ahead=is_look_ahead)
# loop until no more particles required
# all numbers are re-loaded in each iteration as they can dynamically
# update
while get_int(N_ACC) < get_int(N_REQ) and (
not all_accepted or
get_int(N_EVAL) - get_int(N_FAIL) < get_int(N_REQ)):
# check whether the process was externally asked to stop
if kill_handler.killed:
logger.info(
f"Worker {n_worker} received stop signal. "
"Terminating in the middle of a population "
f"after {internal_counter} samples.")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
sys.exit(0)
# check whether time's up
current_runtime = time() - start_time
if current_runtime > max_runtime_s:
logger.info(
f"Worker {n_worker} stops during population because "
f"runtime {current_runtime} exceeds "
f"max runtime {max_runtime_s}")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
# return to task queue
return
# check whether the analysis was terminated or replaced by a new one
ana_id_new_b = redis.get(ANALYSIS_ID)
if ana_id_new_b is None or str(ana_id_new_b.decode()) != ana_id:
logger.info(
f"Worker {n_worker} stops during population because "
"the analysis seems to have been stopped.")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
# return to task queue
return
# check if the analysis left the look-ahead mode
if is_look_ahead and not bool(int(
redis.get(idfy(IS_LOOK_AHEAD, ana_id, t)).decode())):
# reload SSA object
ssa_b = redis.get(idfy(SSA, ana_id, t))
simulate_one, sample_factory = pickle.loads(ssa_b)
# cache
is_look_ahead = False
# create new empty sample for clean split
sample = sample_factory(is_look_ahead=is_look_ahead)
# check if in look-ahead mode and should sleep
if is_look_ahead and get_int(N_EVAL) >= max_n_eval_look_ahead:
# sleep ... seconds
sleep(SLEEP_TIME)
continue
# increase global evaluation counter (before simulation!)
particle_max_id: int = redis.incr(
idfy(N_EVAL, ana_id, t), batch_size)
if is_look_ahead:
# increment look-ahead evaluation counter
redis.incr(idfy(N_LOOKAHEAD_EVAL, ana_id, t), batch_size)
# timer for current simulation until batch_size acceptances
this_sim_start = time()
# collect accepted particles
accepted_samples = []
# whether any particle in this iteration is preliminary
any_prel = False
# make batch_size attempts
for n_batched in range(batch_size):
# increase evaluation counter
internal_counter += 1
try:
# simulate
new_sim = simulate_one()
except Exception as e:
logger.warning(f"Redis worker number {n_worker} failed. "
f"Error message is: {e}")
# increment the failure counter
redis.incr(idfy(N_FAIL, ana_id, t), 1)
if not catch:
raise e
continue
# append to current sample
sample.append(new_sim)
# check for acceptance
if new_sim.accepted:
# The order of the IDs is reversed, but this does not
# matter. Important is only that the IDs are specified
# before the simulation starts
# append to accepted list
accepted_samples.append(
pickle.dumps((particle_max_id - n_batched, sample)))
any_prel = any_prel or any_particle_preliminary(sample)
# initialize new sample
sample = sample_factory(is_look_ahead=is_look_ahead)
# update total simulation-specific time
cumulative_simulation_time += time() - this_sim_start
# new pipeline
pipeline = redis.pipeline()
# push to pipeline if at least one sample got accepted
if len(accepted_samples) > 0:
# update particles counter if nothing is preliminary,
# otherwise final acceptance is done by the sampler
if not any_prel:
pipeline.incr(idfy(N_ACC, ana_id, t), len(accepted_samples))
# note: samples are appended 1-by-1
pipeline.rpush(idfy(QUEUE, ana_id, t), *accepted_samples)
# append to list of done simulations
pipeline.rpush(
idfy(DONE_IXS, ana_id, t),
*range(particle_max_id - batch_size + 1, particle_max_id + 1),
)
# execute all commands
pipeline.execute()
# end of sampling loop
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
kill_handler.exit = True
population_total_time = time() - population_start_time
logger.info(
f"Finished generation {t}, did {internal_counter} samples. "
f"Simulation time: {cumulative_simulation_time:.2f}s, "
f"total time {population_total_time:.2f}.")
|
consumer_key = 'YOUR CONSUMER KEY'
consumer_secret = 'YOUR CONSUMER SECRET'
access_token = 'YOUR ACCESS TOKEN'
access_token_secret = 'YOUR ACCESS TOKEN SECRET'
|
#################################
# --- Day 23: Safe Cracking --- #
#################################
import AOCUtils
from math import factorial
class VM:
def __init__(self, program):
self.program = program[:]
self.pc = 0
self.registers = {"a": 0, "b": 0, "c": 0, "d": 0}
self.toggled = set()
self.toggledProgram = [cmd.split() for cmd in program]
for i in range(len(self.toggledProgram)):
if len(self.toggledProgram[i]) == 2: # one-argument instruction
if self.toggledProgram[i][0] == "inc":
self.toggledProgram[i][0] = "dec"
else:
self.toggledProgram[i][0] = "inc"
else: # two-argument instruction
if self.toggledProgram[i][0] == "jnz":
self.toggledProgram[i][0] = "cpy"
else:
self.toggledProgram[i][0] = "jnz"
self.toggledProgram = [" ".join(cmd) for cmd in self.toggledProgram]
def run(self):
while self.pc < len(self.program):
if self.pc in self.toggled:
cmd = self.toggledProgram[self.pc].split()
else:
cmd = self.program[self.pc].split()
inst = cmd[0]
x = cmd[1]
xVal = int(x) if not x.isalpha() else self.registers[x]
if len(cmd) > 2:
y = cmd[2]
yVal = int(y) if not y.isalpha() else self.registers[y]
if inst == "cpy":
if y.isalpha():
self.registers[y] = xVal
elif inst == "inc":
if x.isalpha():
self.registers[x] += 1
elif inst == "dec":
if x.isalpha():
self.registers[x] -= 1
elif inst == "jnz":
if xVal != 0:
self.pc += yVal - 1
elif inst == "tgl":
n = self.pc + xVal
if 0 <= n < len(self.program):
if n in self.toggled:
self.toggled.remove(n)
else:
self.toggled.add(n)
self.pc += 1
#################################
program = AOCUtils.loadInput(23)
# vm = VM(program)
# vm.registers["a"] = 7
# vm.run()
# print("Part 1: {}".format(vm.registers["a"]))
# vm = VM(program)
# vm.registers["a"] = 12
# vm.run()
# print("Part 2: {}".format(vm.registers["a"]))
X = int(program[19].split()[1])
Y = int(program[20].split()[1])
a = factorial(7) + X * Y
print("Part 1: {}".format(a))
a = factorial(12) + X * Y
print("Part 2: {}".format(a))
AOCUtils.printTimeTaken()
'''
0 | cpy a b | jnz a b | b = 12 | |
1 | dec b | inc b | b -= 1 | b -= 1 |
2 | cpy a d | jnz a d | d = b <<<<<<<<<<<<<<<<<<< | d = b | d = 11*12, 10*11*12, ...
3 | cpy 0 a | jnz 0 a | a = 0 ^ | a = 0 |
4 | cpy b c | jnz b c | c = b <<<<<<<<<<<<<<< ^ | c = b | c = 11, 10, 9, ...
| | | ^ ^ | |
5 | inc a | dec a | a += 1 <<<<<<<<<<< ^ ^ | a += c * d | 11*12, 10*11*12, 9*10*11*12, ...
6 | dec c | inc c | c -= 1 ^ ^ ^ | |
7 | jnz c -2 | cpy c -2 | while c != 0: >>>> ^ ^ | |
| | | ^ ^ | |
8 | dec d | inc d | d -= 1 ^ ^ | |
9 | jnz d -5 | cpy d -5 | while d != 0: >>>>>>> ^ | |
| | | ^ | |
10 | dec b | inc b | b -= 1 ^ | b -= 1 | b = 10, 9, 8, ...
11 | cpy b c | jnz b c | c = b ^ | |
12 | cpy c d | jnz c d | d = b ^ | |
| | | ^ | |
13 | dec d | inc d | d -= 1 <<<<<<<<<<< ^ | c = 2*b | c = 20, 18, 16, ...
14 | inc c | dec c | c += 1 ^ ^ | |
15 | jnz d -2 | cpy d -2 | while d != 0: >>>> ^ | |
| | | ^ | |
16 | tgl c | inc c | tgl c ^ | tgld | c += 1 | toggles all inst below on even offsets
17 | cpy -16 c | jnz -16 c | c = -16 ^ | c = -16 |
| | | ^ | |
18 | jnz 1 c | cpy 1 c | >>>>.. / c = 1 ..>>>>>>> | tgld |
| | | |
| | | | After calculating a!, c becomes 0 and inst 16
| | | | toggles itself, thus exiting the loop and
| | | | running the (modified/toggled) code below
| | | |
19 | cpy 75 c | jnz 75 c | c = 75 | c = 75 | c = 75
| | | | |
20 | jnz 97 d | cpy 97 d | >>...? / d = 97 <<<<< | tgld | d = 97
| | | ^ | |
21 | inc a | dec a | a += 1 <<<<<<<<<<< ^ | | a += c * d
22 | inc d | dec d | d += 1 / d -= 1 ^ ^ | tgld |
23 | jnz d -2 | cpy d -2 | while d != 0: >>>> ^ | |
| | | ^ | |
24 | inc c | dec c | c += 1 / c -= 1 ^ | tgld |
25 | jnz c -5 | cpy c -5 | while c != 0: >>>>>>> | |
''' |
'''
Created on Sep 9, 2011
@author: Ankhazam & Piotr & OpenCV team
'''
from algorithms.AlgorithmBase import AlgorithmBase
import classification.SURFFlannMatcher as SFM
import classification.TrainedObject as TO
import image.ImageDescriptionReader as IDR
import image.ImageDescriptionWriter as IDW
import common.Utils as TU
import cv2
import os
import shutil
class SURFFlannMatchingAlgorithm(AlgorithmBase):
'''
Simple algorithm used for matching orientations using Flann matching method.
'''
def __init__(self, threshold=400):
'''
Constructor
'''
self.threshold = threshold
def __train(self, learningPath):
'''
Trains the system with new object data
@param learningPath: Has to be root of the following structure
@param threshold: SURF Hessian threshold used for training
learningPath
|_ObjectA
| |_1.imd, 1.ren
| |_...
|_ObjectB
| |_...
|_ObjectC
|_...
@return: List of @see: TrainedObject
'''
trainedObjects = list() # list of trained objects
trainingUtils = TU.Utils(self.threshold)
for (root, dirs, files) in os.walk(learningPath):
if len(dirs) == 0: # we're in an object folder
# ObjectFilename
objName = os.path.basename(root)
print "root: ", objName
# currently trained object
trainedObject = TO.TrainedObject(objName, self.threshold)
# real training
for file1 in files: # we won't implement natural human sorting
# do not use .* and *.imd files
if file1.startswith('.') or file1.endswith(".imd"):
continue
# fetching ImageDescription
imDescPath = os.path.join(root, file1[:-4]) + ".imd"
print "imd: ", imDescPath
with open(imDescPath, 'r') as imDF:
# read this file using reader
reader = IDR.ImageDescriptionReader()
imageDesc = reader.read(imDF)
# fetching relevant SURF features
imagePath = os.path.join(root, file1)
image = cv2.imread(imagePath)
(keypoints, descriptors) = trainingUtils.findSURF(image, self.threshold)
# adding orientation to trainedObject
trainedObject.addOrientation(self.threshold, (imageDesc, keypoints, descriptors, imagePath))
# once trained all orientations we can add the object to the DBase
trainedObjects.append(trainedObject)
return trainedObjects
def learn(self, inputFolder):
self.trainedObjects = self.__train(inputFolder)
def test(self, inputFolder, outputFolder):
cvUtilities = TU.Utils(self.threshold)
imageDescWriter = IDW.ImageDescriptionWriter()
for file1 in os.listdir(inputFolder):
# do not use .* files
if file1.startswith("."):
continue
# save output (the name of the object without .bmp / .jpg etc)
fileName = os.path.basename(file1)
fileName = os.path.splitext(fileName)[0]
imgOutPath = os.path.join(outputFolder, fileName)
if not os.path.exists(imgOutPath):
os.mkdir(imgOutPath)
# with image files do ...
if not file1.endswith(".imd"):
# flags are set to 0 = meaning grey scale
testImage = cv2.imread(os.path.join(inputFolder, file1), flags=0)
utils = TU.Utils(self.threshold)
(kp, desc) = utils.findSURF(testImage, self.threshold)
print "Loaded test image : '%s'" % file1
kpImage = cv2.imread(os.path.join(inputFolder, file1))
utils.drawKeypoints(kpImage, kp, color=(255, 255, 0))
cv2.imwrite(os.path.join(outputFolder, file1), kpImage)
matcher = SFM.SURFFlannMatcher(self.trainedObjects, self.threshold)
match = matcher.matchObject(testImage)
print "Finished processing file '%s'" % file1
for obj in match:
print "Object Name: ", obj[0].name
print "OrientationName: ", obj[0].orientations[obj[1]][0].name
with open(os.path.join(imgOutPath, "computed") + ".imd", 'w') as fileStream:
imageDescWriter.write(fileStream, obj[0].orientations[obj[1]][0])
matchedPath = obj[0].orientations[obj[1]][3]
#show the match
matchedImage = cv2.imread(matchedPath, cv2.IMREAD_GRAYSCALE)
vis = utils.draw_match(matchedImage, testImage, obj[4][0], obj[4][1], obj[2], obj[3])
# show image
cv2.imshow("match!", vis)
cv2.waitKey()
# with .imd files to this
else :
src = os.path.join(inputFolder, file1)
dst = os.path.join(imgOutPath, "expected.imd")
print "Coping the file '%s' into '%s'" % (src, dst)
shutil.copyfile(src, dst)
|
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class NetworkAddresses(APIClassTemplate):
"""
The NetworkAddresses Object in the FMC.
"""
URL_SUFFIX = "/object/networkaddresses"
def __init__(self, fmc, **kwargs):
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for NetworkAddresses class.")
self.parse_kwargs(**kwargs)
def post(self):
logging.info("POST method for API for NetworkAddresses not supported.")
pass
def put(self):
logging.info("PUT method for API for NetworkAddresses not supported.")
pass
def delete(self):
logging.info("DELETE method for API for NetworkAddresses not supported.")
pass
class IPAddresses(NetworkAddresses):
"""Dispose of this Class after 20210101."""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: IPAddresses() should be called via NetworkAddresses()."
)
super().__init__(fmc, **kwargs)
|
from dynet import *
train_sentence = [('the','D'), ('dog','N'), ('walks','V')]
i2w = dict(enumerate(set(w for w,t in train_sentence)))
w2i = dict((w,i) for i,w in i2w.items())
t2i = dict((t,i) for i,t in enumerate(set(t for w,t in train_sentence)))
num_words = len(w2i)
num_tags = len(t2i)
model = Model()
sgd = SimpleSGDTrainer(model)
WEMB_DIM = 128
RNN_HIDDEN_DIM = 64
HIDDEN_DIM = 32
pWembs = model.add_lookup_parameters((num_words, WEMB_DIM))
pH = model.add_parameters((HIDDEN_DIM, RNN_HIDDEN_DIM))
pHb = model.add_parameters(HIDDEN_DIM)
pO = model.add_parameters((num_tags, HIDDEN_DIM))
pOb = model.add_parameters(num_tags)
rnn_builder = BiRNNBuilder(1, WEMB_DIM, RNN_HIDDEN_DIM, model, LSTMBuilder)
renew_cg()
H = parameter(pH)
Hb = parameter(pHb)
O = parameter(pO)
Ob = parameter(pOb)
indexed_words, indexed_gold_tags = zip(*[(w2i[w], t2i[t]) for w,t in train_sentence])
wembs = [pWembs[wi] for wi in indexed_words]
noised_wembs = [noise(we, 0.1) for we in wembs]
rnn_outputs = rnn_builder.transduce(noised_wembs)
errs = []
for rnn_output, gold_tag in zip(rnn_outputs, indexed_gold_tags):
hidden = tanh(affine_transform([Hb, H, rnn_output]))
model_tag = affine_transform([Ob, O, hidden])
err = pickneglogsoftmax(model_tag, gold_tag)
errs.append(err)
sum_errs = esum(errs)
print_graphviz(compact=False,
show_dims=True,
expression_names={ pWembs: "word_emb",
H: "H", Hb: "Hb",
O: "O", Ob: "Ob",
wembs[0]: "first word"},
lookup_names={"word_emb": i2w},
collapse_birnns=True)
# Alternatively:
# expression_names = dict((v,k) for (k,v) in dict(globals().items()+locals().items()).iteritems() if isinstance(v,Expression))
|
#encoding=utf-8
from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter,StemFilter
from whoosh.analysis import Tokenizer,Token
from whoosh.lang.porter import stem
import jieba
import re
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
'you', 'your',u'็',u'ไบ',u'ๅ',u'ไปไน'))
accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
class ChineseTokenizer(Tokenizer):
def __call__(self,text,**kargs):
words = jieba.tokenize(text,mode="search")
token = Token()
for (w,start_pos,stop_pos) in words:
if not accepted_chars.match(w):
if len(w)>1:
pass
else:
continue
token.original = token.text = w
token.pos = start_pos
token.startchar = start_pos
token.endchar = stop_pos
yield token
def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1,stemfn=stem,cachesize=50000):
return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)\
|StemFilter(stemfn=stemfn, ignore=None,cachesize=cachesize)
|
from Microsoft.Scripting.Silverlight import DynamicApplication
engine = DynamicApplication.Current.Runtime.GetEngine('IronRuby')
scope = engine.CreateScope()
def execute(str, type = 'file'):
global engine, scope
return engine.Execute(str, scope)
|
#!/usr/bin/env python3
"""
****************************************************************************
Copyright (C) 2018 Datirium. LLC.
All rights reserved.
Contact: Datirium, LLC (datirium@datirium.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************"""
import os
import logging
from json import dumps
_logger = logging.getLogger(__name__)
def available(workflow=None):
workflows_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__))+"/workflows")
all_workflows = {}
for root, dirs, files in os.walk(workflows_folder):
all_workflows.update(
{filename: os.path.join(root, filename) for filename in files if os.path.splitext(filename)[1] == '.cwl'}
)
_logger.debug("all_workflows: {0}".format(dumps(all_workflows, indent=4)))
if workflow and workflow not in all_workflows:
raise Exception("Can't find workflow %s" % workflow)
return all_workflows[workflow] if workflow else all_workflows
from .workflows_create import create_biowardrobe_workflow as workflow
|
"""The official genshin map
Gets data from the official genshin map such as categories, points and similar.
"""
import json
from typing import Any, Dict, List
from urllib.parse import urljoin
from .caching import permanent_cache
from .genshinstats import fetch_endpoint
OS_MAP_URL = "https://api-os-takumi-static.hoyoverse.com/common/map_user/ys_obc/v1/map/"
__all__ = [
"fetch_map_endpoint",
"get_map_image",
"get_map_icons",
"get_map_labels",
"get_map_locations",
"get_map_points",
"get_map_tile",
]
def fetch_map_endpoint(endpoint: str, **kwargs) -> Dict[str, Any]:
"""Fetch an enpoint from mihoyo's webstatic map api.
Only currently liyue is supported.
Takes in an endpoint url which is joined with the base url.
A request is then sent and returns a parsed response.
"""
kwargs.setdefault("params", {}).update({"map_id": 2, "app_sn": "ys_obc", "lang": "en-us"})
url = urljoin(OS_MAP_URL, endpoint)
return fetch_endpoint(url, cookie={}, **kwargs)
@permanent_cache()
def get_map_image() -> str:
"""Get the url to the entire map image"""
data = fetch_map_endpoint("info")["info"]["detail"]
return json.loads(data)["slices"][0][0]["url"]
@permanent_cache()
def get_map_icons() -> Dict[int, str]:
"""Get all icons for the map"""
data = fetch_map_endpoint("spot_kind/get_icon_list")["icons"]
return {i["id"]: i["url"] for i in data}
@permanent_cache()
def get_map_labels() -> List[Dict[str, Any]]:
"""Get labels and label categories"""
return fetch_map_endpoint("label/tree")["tree"]
def get_map_locations() -> List[Dict[str, Any]]:
"""Get all locations on the map"""
return fetch_map_endpoint("map_anchor/list")["list"]
def get_map_points() -> List[Dict[str, Any]]:
"""Get points on the map"""
return fetch_map_endpoint("point/list")["point_list"]
def get_map_tile(
x: int, y: int, width: int, height: int, resolution: int = 1, image: str = None
) -> str:
"""Gets a map tile at a position
You may set an x, y, width and height of the resulting image
however you shoudl prefer to use multiples of 256 because they are cached
on the mihoyo servers.
Resolution dictates the resolution of the image as a percentage. 100 is highest and 0 is lowest.
You should pick values from 100, 50, 25 and 12.5
"""
image = image or get_map_image()
return (
image
+ f"?x-oss-process=image/resize,p_{round(resolution)}/crop,x_{x},y_{y},w_{width},h_{height}"
)
|
# ็ตไบใณใใณใๅฎ่ฃ
import sys
# discordใฎAPI
import discord
# Googleๆค็ดข
from googlesearch import search
# discordใซๆฅ็ถ
client = discord.Client()
# ใขใผใๅๆฟ
ModeFlag = 0
# ่ตทๅๆใฎใกใใปใผใธ
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
await client.change_presence(activity=discord.Game(name='ใใฏใใฉไผ่ญฐ'))
# ใกใใปใผใธใๅใใๆใฎๅไฝ
@client.event
async def on_message(message):
# ใคใใณใๅ
ฅใใใณใซๅๆๅใฏใพใใใฎใงใฐใญใผใใซๅคๆฐใง
global ModeFlag
# botใฎ็บ่จใฏ็ก่ฆใใ(็ก้ใซใผใๅ้ฟ)
if message.author.bot:
return
# ็ตไบใณใใณใ
if message.content == '!end':
await message.channel.send('ไผ่ญฐ็ตใใ๏ผ')
sys.exit()
# googleๆค็ดขใขใผใ(ๆฌกใซไฝใๅ
ฅๅใใใใจใใใๆค็ดข)
if ModeFlag == 1:
kensaku = message.content
ModeFlag = 0
count = 0
# ๆฅๆฌ่ชใงๆค็ดขใใไธไฝ5ไปถใ้ ็ชใซ่กจ็คบ
for url in search(kensaku, lang="jp",num = 5):
await message.channel.send(url)
count += 1
if(count == 5):
break
# googleๆค็ดขใขใผใใธใฎๅใๆฟใ
if message.content == '!og':
ModeFlag = 1
await message.channel.send('ๆฐใซใชใใใจใgoogleใงๆค็ดขใใใใจใใใฎใ๏ผใใฃใใใซ็บ่จใใฆใฟใใ')
# ๅ็ดใชๅฟ็ญ
if message.content == 'ใฎใซใฌใกใใทใฅ๏ผ':
await message.channel.send('ไบบ้ใใ ใๆญปใซๅคใใใ')
# ็นๅฎใฎๆๅญใใๅงใพใๆ็ซ ใ็บ่จใใใใจใ
if message.content.startswith('็กๆจ'):
lose = message.author.name + "่ปฝใ
ใใ็งใฎๅใๅผใถใงใชใใๆญปใซๅคใใใ"
await message.channel.send(lose)
#ใชใใฉใคใๅใๅใฃใๆ
if client.user in message.mentions:
reply = f'{message.author.mention} ๆฐใซๅ
ฅใฃใใ็งใฎ่กใใตใใ ใใซๅใใฆใใใใ'
await message.channel.send(reply)
# botใฎ่ตทๅใจๆฅ็ถ
client.run('NjkyNjU4NzY0MDc1MjM3Mzk3.XoLeJg.6Y7jEt1LzRwOetL9sr7XnTQiZhw') |
from dcsolve import dcsolver
word_pairs = [('line', 'cake'), ('kit', 'zap'),('ask', 'why'), ('face',
'pill'), ('oozy', 'aqua'), ('icky', 'poop'), ('quiz',
'kook')]
costs = ['steps', 'scrabble', 'frequency']
for w1, w2 in word_pairs:
for cost in costs:
dcsolver(w1, w2, cost)
print
|
'''
Created on Mar 30, 2021
@author: jpannizzo
'''
from core.utilities.experiment_utils import hcl_mix, update_dispense_action_set, update_lsr_edoc
from core.models.view_tables import Edocument
import numpy as np
def liquid_solid_extraction(data, q3,experiment_copy_uuid,exp_name, exp_template):
'''
# logic for liquid solid extraction experiment
'''
#workflow for experiment
related_exp = 'workflow__experiment_workflow_workflow__experiment'
'''
# q3 contains concentration logic
# original code contained an if nested in a class within experiment.py
# I don't believe it is necessary in order to run properly now that it is factored out
# if there is an issue down the line uncomment the if...else and re-indent the logic in order to reimplement
'''
lsr_edoc = Edocument.objects.get(ref_edocument_uuid=exp_template.uuid, title='LSR file')
xls_edoc = Edocument.objects.get(ref_edocument_uuid=exp_template.uuid, title='XLS file')
hcl_vols, h2o_vols = hcl_mix(data['stock_concentration'],
data['total_vol'],
np.fromstring(data['hcl_concentrations'].strip(']['), sep=',')
)
h2o_dispense_action_set = WorkflowActionSet.objects.get(**{f'{related_exp}': experiment_copy_uuid,
'description__contains': 'H2O'})
hcl_dispense_action_set = WorkflowActionSet.objects.get(**{f'{related_exp}': experiment_copy_uuid,
'description__contains': 'HCl'})
update_dispense_action_set(h2o_dispense_action_set, h2o_vols)
update_dispense_action_set(hcl_dispense_action_set, hcl_vols)
new_lsr_pk, lsr_msg = update_lsr_edoc(lsr_edoc,
experiment_copy_uuid,
exp_name,
vol_hcl=list(hcl_vols*1000),
vol_h2o=list(h2o_vols*1000))
#else:
# new_lsr_pk = None
return new_lsr_pk, lsr_msg |
import torch
from torch import nn
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
losses.update(loss_aux)
cls_score = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
if self.feature_extraction:
# perform spatial pooling
avg_pool = nn.AdaptiveAvgPool2d(1)
x = avg_pool(x)
# squeeze dimensions
x = x.reshape((batches, num_segs, -1))
# temporal average pooling
x = x.mean(axis=1)
return x
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
# should have cls_head if not extracting features
cls_score = self.cls_head(x, num_segs)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def _do_fcn_test(self, imgs):
# [N, num_crops * num_segs, C, H, W] ->
# [N * num_crops * num_segs, C, H, W]
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = self.test_cfg.get('num_segs', self.backbone.num_segments)
if self.test_cfg.get('flip', False):
imgs = torch.flip(imgs, [-1])
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
else:
x = x.reshape((-1, num_segs) +
x.shape[1:]).transpose(1, 2).contiguous()
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
cls_score = self.cls_head(x, fcn_test=True)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
if self.test_cfg.get('fcn_test', False):
# If specified, spatially fully-convolutional testing is performed
assert not self.feature_extraction
assert self.with_cls_head
return self._do_fcn_test(imgs).cpu().numpy()
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
outs = self.cls_head(x, num_segs)
if softmax:
outs = nn.functional.softmax(outs)
return (outs, )
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
assert self.with_cls_head
return self._do_test(imgs)
|
# -*- coding: utf-8 -*-
from __future__ import annotations
__all__ = ["Transform", "Linear", "Cholesky", "Subspace"]
from functools import partial
from typing import Any, Callable, Sequence, Union
import jax.numpy as jnp
from jax.scipy import linalg
from .kernels import Kernel
from .types import JAXArray
class Transform(Kernel):
"""Apply a transformation to the input coordinates of the kernel
Args:
transform: (Callable): A callable object that accepts coordinates as
inputs and returns transformed coordinates.
kernel (Kernel): The kernel to use in the transformed space.
"""
# This type signature is a hack for Sphinx sphinx-doc/sphinx#9736
def __init__(self, transform: Callable[[Any], Any], kernel: Kernel):
self.transform = transform
self.kernel = kernel
def evaluate(self, X1: JAXArray, X2: JAXArray) -> JAXArray:
return self.kernel.evaluate(self.transform(X1), self.transform(X2))
class Linear(Transform):
"""Apply a linear transformation to the input coordinates of the kernel
For example, the following transformed kernels are all equivalent, but the
second supports more flexible transformations:
.. code-block:: python
>>> import numpy as np
>>> from tinygp import kernels, transforms
>>> kernel0 = kernels.Matern32(4.5)
>>> kernel1 = transforms.Linear(1.0 / 4.5, kernels.Matern32())
>>> np.testing.assert_allclose(
... kernel0.evaluate(0.5, 0.1), kernel1.evaluate(0.5, 0.1)
... )
Args:
scale (JAXArray): A 0-, 1-, or 2-dimensional array specifying the
scale of this transform.
kernel (Kernel): The kernel to use in the transformed space.
"""
def __init__(self, scale: JAXArray, kernel: Kernel):
self.scale = scale
if jnp.ndim(scale) < 2:
self.transform = partial(jnp.multiply, scale)
elif jnp.ndim(scale) == 2:
self.transform = partial(jnp.dot, scale)
else:
raise ValueError("'scale' must be 0-, 1-, or 2-dimensional")
self.kernel = kernel
class Cholesky(Transform):
"""Apply a Cholesky transformation to the input coordinates of the kernel
For example, the following transformed kernels are all equivalent, but the
second supports more flexible transformations:
.. code-block:: python
>>> import numpy as np
>>> from tinygp import kernels, transforms
>>> kernel0 = kernels.Matern32(4.5)
>>> kernel1 = transforms.Cholesky(4.5, kernels.Matern32())
>>> np.testing.assert_allclose(
... kernel0.evaluate(0.5, 0.1), kernel1.evaluate(0.5, 0.1)
... )
Args:
factor (JAXArray): A 0-, 1-, or 2-dimensional array specifying the
Cholesky factor. If 2-dimensional, this must be a lower or
upper triangular matrix as specified by ``lower``, but this is
not checked.
kernel (Kernel): The kernel to use in the transformed space.
lower: (bool, optional): Is ``factor`` lower (vs upper) triangular.
"""
def __init__(
self, factor: JAXArray, kernel: Kernel, *, lower: bool = True
):
self.factor = factor
if jnp.ndim(factor) < 2:
self.transform = partial(jnp.multiply, 1.0 / factor)
elif jnp.ndim(factor) == 2:
self.transform = partial(
linalg.solve_triangular, factor, lower=lower
)
else:
raise ValueError("'scale' must be 0-, 1-, or 2-dimensional")
self.kernel = kernel
@classmethod
def from_parameters(
cls, diagonal: JAXArray, off_diagonal: JAXArray, kernel: Kernel
) -> "Cholesky":
"""Build a Cholesky transform with a sensible parameterization
Args:
diagonal (JAXArray): An ``(ndim,)`` array with the diagonal
elements of ``factor``. These must be positive, but this
is not checked.
off_diagonal (JAXArray): An ``((ndim - 1) * ndim,)`` array
with the off-diagonal elements of ``factor``.
kernel (Kernel): The kernel to use in the transformed space.
"""
ndim = diagonal.size
if off_diagonal.size != ((ndim - 1) * ndim) // 2:
raise ValueError(
"Dimension mismatch: expected "
f"(ndim-1)*ndim/2 = {((ndim - 1) * ndim) // 2} elements in "
f"'off_diagonal'; got {off_diagonal.size}"
)
factor = jnp.zeros((ndim, ndim))
factor = factor.at[jnp.diag_indices(ndim)].add(diagonal)
factor = factor.at[jnp.tril_indices(ndim, -1)].add(off_diagonal)
return cls(factor, kernel, lower=True)
class Subspace(Transform):
"""A kernel transform that selects a subset of the input dimensions
For example, the following kernel only depends on the coordinates in the
second (`1`-th) dimension:
.. code-block:: python
>>> import numpy as np
>>> from tinygp import kernels, transforms
>>> kernel = transforms.Subspace(1, kernels.Matern32())
>>> np.testing.assert_allclose(
... kernel.evaluate(np.array([0.5, 0.1]), np.array([-0.4, 0.7])),
... kernel.evaluate(np.array([100.5, 0.1]), np.array([-70.4, 0.7])),
... )
Args:
axis: (Axis, optional): An integer or tuple of integers specifying the
axes to select.
kernel (Kernel): The kernel to use in the transformed space.
"""
def __init__(self, axis: Union[Sequence[int], int], kernel: Kernel):
self.transform = lambda X: X[axis]
self.kernel = kernel
|
import os
import argparse
import tarfile
import re
import json
import logging
from collections import defaultdict
from bgcore import tsv
from bgcore.re import ReContext
from fannsdb.utils import RatedProgress
__CB = {
"A" : "T",
"T" : "A",
"G" : "C",
"C" : "G"
}
def complementary_sequence(seq):
return "".join([__CB[base] if base in __CB else base for base in seq.upper()])
MUT_CDS_RE = re.compile(r"^c.\d+([ACGT]+)>([ACGT]+)$")
MUT_AA_RE = re.compile(r"^p.([ARNDCEQGHILKMFPSTWYV]+)(\d+)([ARNDCEQGHILKMFPSTWYV]+)$")
MUT_POS_RE = re.compile(r"(.+):(\d+)(-\d+)?")
class Dataset(object):
def __init__(self, name):
self.name = name
self.f = None
self._size = 0
def __enter__(self):
self.f = tsv.open(self.name, "w")
self._size = 0
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.f is not None:
self.f.close()
def write(self, line):
self._size += 1
#self.f.write("{}\tID{}\n".format(line, self._size))
self.f.write(line + "\n")
@property
def size(self):
return self._size
def get_transcripts(fanns_db, mut_cds, mut_aa, mut_pos, mut_strand, acc, logger):
fields = [mut_cds, mut_aa, mut_pos, mut_strand, acc]
cds_ctx = ReContext(mut_cds)
aa_ctx = ReContext(mut_aa)
if cds_ctx.match(MUT_CDS_RE):
ref, alt = [cds_ctx.group(i) for i in xrange(1, 3)]
aa_ref_len = len(ref)
if aa_ref_len != len(alt):
logger.warn("Found substitution with different alleles: {}".format(fields))
if mut_strand == "-":
ref = complementary_sequence(ref)
alt = complementary_sequence(alt)
pos_ctx = ReContext(mut_pos)
if not pos_ctx.match(MUT_POS_RE):
logger.warn("Unexpected mutation position: {}".format(fields))
return
chrom, start = [pos_ctx.group(i) for i in xrange(1, 3)]
if chrom == "25":
return
start = int(start)
for i in xrange(aa_ref_len):
#logger.info("{}{}:{}:{}/{}:{} ({}, {})".format(chrom, mut_strand, start+i, ref[i], alt[i], acc, mut_cds, mut_aa))
for row in fanns_db.query_scores(chr=chrom, start=start + i, ref=ref[i], alt=alt[i],
strand=mut_strand, transcript=acc, maps=["symbol"]):
#logger.info(" -> {}".format(row))
yield row
elif aa_ctx.match(MUT_AA_RE):
aa_ref, aa_pos, aa_alt = [aa_ctx.group(i) for i in xrange(1, 4)]
aa_ref_len = len(aa_ref)
if aa_ref_len != len(aa_alt):
logger.warn("Found substitution with different alleles: {}".format(fields))
aa_pos = int(aa_pos)
for i in xrange(aa_ref_len):
for row in fanns_db.query_scores(protein=acc, aa_pos=aa_pos + i, aa_ref=aa_ref[i], aa_alt=aa_alt[i],
maps=["symbol", "prot_transcript"]):
yield row
def extract_snvs(fanns_db, data_path, logger=None):
logger = logger or logging.getLogger("perf-cosmic")
snvs = dict()
logger.info("Reading mutations ...")
progress = RatedProgress(logger, name="mutations")
with tsv.open(data_path, "r") as df:
columns = [
"Genome-wide screen",
"Mutation Description",
"Mutation CDS",
"Mutation AA",
"Mutation GRCh37 genome position",
"Mutation GRCh37 strand",
"Accession Number",
"ID_sample"]
total_rows = queried_rows = dbfound_rows = 0
for fields in tsv.rows(df, columns=columns, header=True):
total_rows += 1
wide_screen, mut_desc, mut_cds, mut_aa, mut_pos, mut_strand, acc, sample_id = fields
# wide_screen != "y"
if mut_desc != "Substitution - Missense":
continue
queried_rows += 1
for row in get_transcripts(fanns_db, mut_cds, mut_aa, mut_pos, mut_strand, acc, logger):
dbfound_rows += 1
k = tuple([row[k] for k in ["protein", "aa_pos", "aa_ref", "aa_alt"]])
if k not in snvs:
snvs[k] = snv = dict(
transcript=row["transcript"],
symbol=row["xrefs"]["symbol"],
msamples=set(), wsamples=set())
else:
snv = snvs[k]
if wide_screen == "y":
snv["wsamples"].add(sample_id)
else:
snv["msamples"].add(sample_id)
progress.update()
progress.log_totals()
logger.info("Counting the number of samples per mutation ...")
for data in snvs.itervalues():
data["msamples"] = len(data["msamples"])
data["wsamples"] = len(data["wsamples"])
logger.info("Total: total_rows={}, queried_rows={}, found_rows={}, protein_changes={}".format(total_rows, queried_rows, dbfound_rows, len(snvs)))
return snvs
def save_snvs(snvs, path, header=False):
with open(path, "w") as f:
if header:
f.write("\t".join(["PROTEIN", "POS", "REF", "ALT", "SYM", "TRS", "MSAMPLES", "WSAMPLES"]) + "\n")
for snv, data in snvs.iteritems():
f.write("\t".join([str(v) for v in snv]) + "\t")
symbols = data.get("symbol") or ""
if isinstance(symbols, basestring):
symbols = [symbols]
f.write(",".join(symbols) + "\t" + (data.get("transcript") or "") + "\t")
f.write("{}\t{}\n".format(data["msamples"], data["wsamples"]))
def load_snvs(path):
snvs = {}
with open(path) as f:
for line in f:
fields = line.rstrip("\n").split("\t")
protein, pos, ref, alt = fields[0:4]
symbols = fields[4].split(",")
symbols = symbols[0] if len(symbols) == 1 else set(symbols)
snvs[(protein, int(pos), ref, alt)] = dict(
symbol=symbols,
transcript=fields[5],
msamples=int(fields[6]),
wsamples=int(fields[7]))
return snvs
def create_datasets(snvs, cgc_path, tdrivers_path, pdrivers_path, output_prefix, logger=None):
logger = logger or logging.getLogger("perf-cosmic")
prefix = output_prefix or "cosmic-"
logger.info("Loading CGC genes ...")
cgc_genes = set()
with open(cgc_path, "r") as f:
for line in f:
cgc_genes.add(line.rstrip("\n"))
logger.info("Loading TD drivers ...")
tdrivers = set()
with open(tdrivers_path, "r") as f:
for line in f:
tdrivers.add(line.rstrip("\n").split("\t")[0])
logger.info("Loading PD drivers ...")
pdrivers = set()
with open(pdrivers_path, "r") as f:
for line in f:
pdrivers.add(line.rstrip("\n").split("\t")[0])
logger.info("Creating datasets ...")
progress = RatedProgress(logger, name="mutations")
with Dataset(prefix + "1") as rec1,\
Dataset(prefix + "2") as rec2,\
Dataset(prefix + "4") as rec4,\
Dataset(prefix + "CGC") as cgc,\
Dataset(prefix + "noCGC") as nocgc,\
Dataset(prefix + "TD") as td,\
Dataset(prefix + "noTD") as notd,\
Dataset(prefix + "PD") as pd,\
Dataset(prefix + "noPD") as nopd:
for (protein, aa_pos, aa_ref, aa_alt), snv in snvs.items():
num_samples = len(snv["samples"])
line = "\t".join([str(v) for v in [protein, aa_pos, aa_ref, aa_alt]])
symbol = snv["symbol"] or ""
if isinstance(symbol, basestring):
symbol = set([symbol])
elif isinstance(symbol, list):
symbol = set(symbol)
if num_samples == 1:
rec1.write(line)
if num_samples >= 2:
rec2.write(line)
if num_samples >= 4:
rec4.write(line)
if len(symbol & cgc_genes) > 0:
cgc.write(line)
elif num_samples == 1:
nocgc.write(line)
if len(symbol & tdrivers) > 0:
td.write(line)
elif num_samples == 1:
notd.write(line)
if len(symbol & pdrivers) > 0:
pd.write(line)
elif num_samples == 1:
nopd.write(line)
progress.update()
progress.log_totals()
logger.info("Datasets: {}".format(", ".join(["{}={}".format(os.path.basename(d.name), d.size) for d in [
rec1, rec2, rec4, cgc, nocgc, td, notd, pd, nopd]])))
|
import pytest
from alchemtest.gmx import load_benzene
from alchemlyb.parsing import gmx
from alchemlyb.convergence import forward_backward_convergence
@pytest.fixture()
def gmx_benzene():
dataset = load_benzene()
return [gmx.extract_dHdl(dhdl, T=300) for dhdl in dataset['data']['Coulomb']], \
[gmx.extract_u_nk(dhdl, T=300) for dhdl in dataset['data']['Coulomb']]
def test_convergence_ti(gmx_benzene):
dHdl, u_nk = gmx_benzene
convergence = forward_backward_convergence(dHdl, 'TI')
assert convergence.shape == (10, 5)
assert convergence.iloc[0, 0] == pytest.approx(3.07, 0.01)
assert convergence.iloc[0, 2] == pytest.approx(3.11, 0.01)
assert convergence.iloc[-1, 0] == pytest.approx(3.09, 0.01)
assert convergence.iloc[-1, 2] == pytest.approx(3.09, 0.01)
def test_convergence_mbar(gmx_benzene):
dHdl, u_nk = gmx_benzene
convergence = forward_backward_convergence(u_nk, 'MBAR')
assert convergence.shape == (10, 5)
assert convergence.iloc[0, 0] == pytest.approx(3.02, 0.01)
assert convergence.iloc[0, 2] == pytest.approx(3.06, 0.01)
assert convergence.iloc[-1, 0] == pytest.approx(3.05, 0.01)
assert convergence.iloc[-1, 2] == pytest.approx(3.04, 0.01)
def test_convergence_bar(gmx_benzene):
dHdl, u_nk = gmx_benzene
convergence = forward_backward_convergence(u_nk, 'BAR')
assert convergence.shape == (10, 5)
assert convergence.iloc[0, 0] == pytest.approx(3.02, 0.01)
assert convergence.iloc[0, 2] == pytest.approx(3.06, 0.01)
assert convergence.iloc[-1, 0] == pytest.approx(3.05, 0.01)
assert convergence.iloc[-1, 2] == pytest.approx(3.04, 0.01)
def test_convergence_wrong_estimator(gmx_benzene):
dHdl, u_nk = gmx_benzene
with pytest.raises(ValueError, match="{} is not a valid estimator".format("www")):
convergence = forward_backward_convergence(u_nk, 'www')
|
import pytest
def test_createTask():
pass |
from django.db import migrations, models
import django.db.models.deletion
import morphodict.lexicon.models
class Migration(migrations.Migration):
replaces = [
("lexicon", "0001_initial"),
("lexicon", "0002_alter_wordform_linguist_info"),
("lexicon", "0003_remove_unused_dictionarysource_fields"),
]
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="DictionarySource",
fields=[
(
"abbrv",
models.CharField(max_length=8, primary_key=True, serialize=False),
),
(
"title",
models.CharField(
help_text="What is the primary title of the dictionary source?",
max_length=256,
),
),
(
"author",
models.CharField(
blank=True,
help_text="Separate multiple authors with commas. See also: editor",
max_length=512,
),
),
(
"editor",
models.CharField(
blank=True,
help_text="Who edited or compiled this volume? Separate multiple editors with commas.",
max_length=512,
),
),
(
"year",
models.IntegerField(
blank=True,
help_text="What year was this dictionary published?",
null=True,
),
),
(
"publisher",
models.CharField(
blank=True, help_text="What was the publisher?", max_length=128
),
),
(
"city",
models.CharField(
blank=True,
help_text="What is the city of the publisher?",
max_length=64,
),
),
],
),
migrations.CreateModel(
name="Wordform",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=60)),
(
"raw_analysis",
models.JSONField(
encoder=morphodict.lexicon.models.DiacriticPreservingJsonEncoder,
null=True,
),
),
(
"paradigm",
models.CharField(
default=None,
help_text="If provided, this is the name of a static paradigm that this wordform belongs to. This name should match the filename in res/layouts/static/ WITHOUT the file extension.",
max_length=60,
null=True,
),
),
(
"is_lemma",
models.BooleanField(
default=False,
help_text="The wordform is chosen as lemma. This field defaults to true if according to fst the wordform is not analyzable or it's ambiguous",
),
),
(
"slug",
models.CharField(
help_text="\n A stable unique identifier for a lemma. Used in public-facing URLs,\n and for import reconciliation.\n ",
max_length=60,
null=True,
unique=True,
),
),
(
"linguist_info",
models.JSONField(
blank=True,
help_text="\n Various pieces of information about wordforms/lemmas that are of\n interest to linguists, and are available for display in templates,\n but that are not used by any of the logic in the morphodict code.\n ",
),
),
(
"lemma",
models.ForeignKey(
help_text="The identified lemma of this wordform. Defaults to self",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="inflections",
to="lexicon.wordform",
),
),
],
),
migrations.CreateModel(
name="TargetLanguageKeyword",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=60)),
(
"wordform",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="target_language_keyword",
to="lexicon.wordform",
),
),
],
),
migrations.CreateModel(
name="SourceLanguageKeyword",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=60)),
(
"wordform",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="lexicon.wordform",
),
),
],
),
migrations.CreateModel(
name="Definition",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=200)),
(
"auto_translation_source",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="lexicon.definition",
),
),
("citations", models.ManyToManyField(to="lexicon.DictionarySource")),
(
"wordform",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="definitions",
to="lexicon.wordform",
),
),
],
),
migrations.AddIndex(
model_name="wordform",
index=models.Index(
fields=["raw_analysis"], name="lexicon_wor_raw_ana_99bdb4_idx"
),
),
migrations.AddIndex(
model_name="wordform",
index=models.Index(fields=["text"], name="lexicon_wor_text_f6cec4_idx"),
),
migrations.AddIndex(
model_name="wordform",
index=models.Index(
fields=["is_lemma", "text"], name="lexicon_wor_is_lemm_916282_idx"
),
),
migrations.AddIndex(
model_name="targetlanguagekeyword",
index=models.Index(fields=["text"], name="lexicon_tar_text_69f04a_idx"),
),
migrations.AddIndex(
model_name="sourcelanguagekeyword",
index=models.Index(fields=["text"], name="lexicon_sou_text_d9d495_idx"),
),
migrations.AlterField(
model_name="wordform",
name="linguist_info",
field=models.JSONField(
blank=True,
help_text="\n Various pieces of information about wordforms/lemmas that are of\n interest to linguists, and are available for display in templates,\n but that are not used by any of the logic in the morphodict code.\n ",
null=True,
),
),
migrations.RemoveField(
model_name="dictionarysource",
name="author",
),
migrations.RemoveField(
model_name="dictionarysource",
name="city",
),
migrations.RemoveField(
model_name="dictionarysource",
name="editor",
),
migrations.RemoveField(
model_name="dictionarysource",
name="publisher",
),
migrations.RemoveField(
model_name="dictionarysource",
name="title",
),
migrations.RemoveField(
model_name="dictionarysource",
name="year",
),
]
|
import logging
from peewee import *
from ..db import db, BarbotModel, ModelError, addModel
from ..bus import bus
_logger = logging.getLogger('Models.Glass')
class Glass(BarbotModel):
type = CharField()
size = IntegerField()
units = CharField()
description = TextField(null = True)
@staticmethod
def saveFromDict(item):
if 'id' in item.keys() and item['id'] != False:
g = Glass.get(Glass.id == item['id'])
else:
g = Glass()
g.set(item)
g.save()
@staticmethod
def deleteById(id):
g = Glass.get(Glass.id == id)
g.delete_instance()
# override
def save(self, *args, **kwargs):
g = Glass.select().where(Glass.type == self.type, Glass.size == self.size, Glass.units == self.units).first()
if g and self.id != g.id:
raise ModelError('The same glass already exists!')
if super().save(*args, **kwargs):
bus.emit('model/glass/saved', self)
# override
def delete_instance(self, *args, **kwargs):
if self.drinks.execute():
raise ModelError('This glass is used by at least one drink!')
super().delete_instance(*args, **kwargs)
bus.emit('model/glass/deleted', self)
def set(self, dict):
if 'type' in dict:
self.type = str(dict['type'])
if 'size' in dict:
self.size = int(dict['size'])
if 'units' in dict:
self.units = str(dict['units'])
if 'description' in dict:
self.description = str(dict['description'])
def name(self):
return str(self.size) + ' ' + self.units + ' ' + self.type
def toDict(self, drinks = False):
out = {
'id': self.id,
'type': self.type,
'size': self.size,
'units': self.units,
'description' : self.description,
'name': self.name()
}
if drinks:
out['drinks'] = [d.toDict() for d in self.drinks]
return out
class Meta:
database = db
only_save_dirty = True
indexes = (
(('type', 'size', 'units'), True),
)
addModel(Glass)
|
"""Tests for Erisoglu 2011 algorithm"""
import unittest
import numpy as np
import sklearn.datasets as skdatasets
from initialisations.erisoglu import Erisoglu
# pylint: disable=R0201
class ErisogluTestSuite(unittest.TestCase):
"""Tests for Erisoglu 2011 algorithm"""
def setUp(self):
self._e = Erisoglu(np.array([[1], [2]]), 3)
self._set_up_data()
# Test a few calculation functions ----------------------------------------
# TODO: this is currently pending due to the fact the original
# method won't work with standardised data
def __test_variation_coefficient(self):
"""Test calculation of variation coefficient"""
self.assertEqual(self._e.variation_coefficient([1, 1, 1]), 0)
self.assertAlmostEqual(self._e.variation_coefficient([1, 2, 3]),
0.40824829046)
self.assertAlmostEqual(self._e.variation_coefficient([-1, -2, -3]),
0.40824829046)
def test_correlation_coefficient(self):
"""Note discrepancy between Erisoglu and Pearson. Currently I've used
Pearson which provides the published numbers"""
self.assertAlmostEqual(
self._e.correlation_coefficient([1, 2], [2, 4]),
1)
self.assertAlmostEqual(
self._e.correlation_coefficient([2, 1], [2, 4]),
-1)
self.assertAlmostEqual(
self._e.correlation_coefficient([1, 2, 3, 4, 5],
[2, 4, 6, 8, 10]), 1)
self.assertAlmostEqual(
self._e.correlation_coefficient([10, 2, 3, 4, 5, 6, 99],
[1, 2, 3, 4, 3, 2, 1]),
-0.546, 4)
def test_distances(self):
"""Tests distances between points"""
# Between two points
self.assertEqual(self._e.distance([-7, -4], [17, 6]), 26)
self.assertAlmostEqual(
self._e.distance([1, 7, 98, 56, 89], [8, 6, 56, 5, 0]),
111.0675470)
# Between n points
self.assertEqual(self._e.distance([0, 1], [1, 1], [1, 1]), 2)
self.assertEqual(self._e.distance([0, 1], [1, 1], [1, 1], [0, 3]), 4)
# And by unpacking a list
mypoints = [[1, 1], [1, 1], [0, 3]]
self.assertEqual(self._e.distance([0, 1], *mypoints), 4)
# Test the actual algorithm -----------------------------------------------
# TODO: again., this won't work now we've changed the variation coefficient
# to work with standardised data
def __test_iris(self):
"""Test against the Iris dataset"""
num_clusters = 3
dataset = skdatasets.load_iris()
data = dataset.data
# direct from the paper
m_1 = [5.1774, 3.6516, 1.4903, 0.2677]
m_2 = [6.4024, 2.9506, 5.1193, 1.7916]
m_3 = [5.1278, 2.7917, 2.5722, 0.6361]
expected = [m_1, m_2, m_3]
my_e = Erisoglu(data, num_clusters)
np.testing.assert_array_almost_equal(my_e.find_centers(),
expected,
decimal=4)
# misc setup methods ------------------------------------------------------
def _set_up_data(self):
# Center is [1,8] - means of columns 3 and 4
self._data1 = np.array([
[0, 190, 3, 1000, 9], # Furthest from ...,-999,8 so 3rd centroid
[1, 200, 2, -999, 8], # Furthest from ...,1001,7 so 2nd centroid
[1, 190, 3, 1001, 7], # Furthest from ...,1,8 so initial centroid
[1, 189, 1, -998, 8]
])
self._data2 = np.array([
[9, 0, 2, 3, 1000, 9],
[9, 1, 3, 2, -999, 8],
[7, 1, 2, 3, 1001, 7],
[8, 1, 3, 1, -998, 8]
])
|
from .coil_agent import CoILAgent
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Post , personal_details , details
def projects(request):
all_post = Post.objects.all()
return render(request,'pro.html',{'posts':all_post})
# Create your views here.
|
#! /usr/bin/env python
from os import makedirs
from os.path import join, dirname, exists
from sys import path
path.insert(0, '.')
from time import time
from argparse import ArgumentParser
parser = ArgumentParser(description="Train MNIST generator with DP-WGAN-GP")
parser.add_argument('--capacity', type=int, default=64,
help="number-of-filters factor in GAN")
parser.add_argument('--critic-steps', type=int, default=4,
help="number of critic steps per generator step")
parser.add_argument('--nodp', action='store_true',
help="Train without differential privacy")
parser.add_argument('--sigma', type=float, default=0.5,
help="Ratio of noise std. dev. and mechanism L2-norm")
parser.add_argument('--grad-clip', type=float, default=1.0,
help="L2-norm clipping parameter")
parser.add_argument('--epochs', type=int, default=100,
help="number of epochs to train the GAN")
parser.add_argument('--batch-size', type=int, default=128,
help="set mini-batch size for training")
parser.add_argument('--print-every', type=int, default=25,
help="print every x steps")
parser.add_argument('--eval-every', type=int, default=500,
help="evaluate every x steps")
parser.add_argument('--seed', type=int, default=42 * 42, help="pytorch seed")
parser.add_argument('--continue-from', type=str, default=None,
help="continue training from a checkpoint")
opt = parser.parse_args()
import torch
import numpy as np
from PIL import Image
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
torch.manual_seed(opt.seed)
from ganlib import scripts
from ganlib.gan import GenerativeAdversarialNet
from ganlib.logger import Logger
from ganlib.dataset import Dataset
from ganlib.privacy import compute_renyi_privacy
from ganlib.trainer import DPWGANGPTrainer, WGANGPTrainer
from ganlib.generator import MNISTGenerator, Optimizable
cuda = torch.cuda.is_available()
class MNISTCritic(Optimizable):
def __init__(self, capacity):
super().__init__()
C = capacity
kw = {'padding': 2, 'stride': 2, 'kernel_size': 5}
self.activation = nn.LeakyReLU(negative_slope=0.2)
self.conv1 = nn.Conv2d(1, 1 * C, **kw)
self.conv2 = nn.Conv2d(1 * C, 2 * C, **kw)
self.conv3 = nn.Conv2d(2 * C, 4 * C, **kw)
self.flatten = nn.Flatten()
self.projection = nn.Linear(4 * 4 * 4 * C, 1)
def forward(self, images):
images = self.activation(self.conv1(images))
images = self.activation(self.conv2(images))
images = self.activation(self.conv3(images))
images = self.flatten(images)
images = self.projection(images)
criticism = images.squeeze(-1)
return criticism
def log(logger, info, tag, network, global_step):
if global_step % opt.print_every == 0:
logger.add_scalars(tag, info, global_step)
s = f"[Step {global_step}] "
s += ' '.join(f"{tag}/{k} = {v:.3g}" for k, v in info.items())
print(s)
if global_step % opt.eval_every == 0:
ckpt = logger.add_checkpoint(network, global_step)
scripts.generate(logger=logger, params=ckpt,
step=global_step)
if exists(join("cache", "mnist_classifier.ckpt")):
scripts.inception(logger=logger, params=ckpt,
step=global_step)
network.train()
# Set default parameters
delta = 1e-5
lr_per_example = 3.125e-6
# Process parameters
learning_rate = opt.batch_size * lr_per_example
logdir = join('cache', 'logs')
logdir = join(logdir, f"cap_{opt.capacity}-steps_{opt.critic_steps}-batchsize_{opt.batch_size}")
if not opt.nodp:
logdir += f"-sig_{opt.sigma}-clip_{opt.grad_clip}"
# Initialize generator and critic. We wrap generator and critic into
# `GenerativeAdversarialNet` and provide methods `cuda` and `state_dict`
generator = MNISTGenerator(opt.capacity)
critic = MNISTCritic(opt.capacity)
gan = GenerativeAdversarialNet(generator, critic)
gan = gan.cuda() if cuda else gan
dset = Dataset()
dataloader = DataLoader(dset, batch_size=opt.batch_size,
shuffle=True, num_workers=4)
# Initialize optimization. We make optimizers part of the network and provide
# methods `.zero_grad` and `.step` to simplify the code.
generator.init_optimizer(torch.optim.Adam, lr=learning_rate, betas=(0.5, 0.9))
critic.init_optimizer(torch.optim.Adam, lr=learning_rate, betas=(0.5, 0.9))
if opt.nodp:
trainer = WGANGPTrainer(opt.batch_size)
else:
print("training with differential privacy")
print(f"> delta = {delta}")
print(f"> sigma = {opt.sigma}")
print(f"> L2-clip = {opt.grad_clip}")
trainer = DPWGANGPTrainer(opt.sigma, opt.grad_clip, batch_size=opt.batch_size)
print(f"> learning rate = {learning_rate} (at {opt.batch_size}-minibatches)")
if opt.continue_from:
ckpt = torch.load(opt.continue_from)
global_step = ckpt['global_step'] + 1
ckpt = ckpt['state_dict']
gan.generator.load_state_dict(ckpt['generator']['params'])
gan.critic.load_state_dict(ckpt['critic']['params'])
print(f"Continuing from step {global_step} ..")
else:
global_step = 0
logs = {}
logger = Logger(logdir=logdir)
for epoch in range(opt.epochs):
t0 = time()
for imgs in dataloader:
if global_step % opt.critic_steps == 0:
genlog = trainer.generator_step(gan)
logs.update(**genlog)
critlog = trainer.critic_step(gan, imgs)
t1 = time()
logs.update(**critlog)
logs['sampling_rate'] = imgs.shape[0] / (t1 - t0)
if not opt.nodp and global_step % opt.print_every == 0:
spent = compute_renyi_privacy(
len(dset), opt.batch_size, global_step + 1, opt.sigma, delta)
logs['epsilon'] = spent.eps
log(logger, logs, 'train', gan, global_step)
global_step += 1
t0 = t1
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example program adds the AD info field to a VCF file.
It assumes that the AD field of the individual variant calls is already
populated.
Sample usage:
$ add_ad_to_vcf input.vcf.gz output.vcf.gz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import app
import six
from nucleus.io import vcf
from nucleus.util import variant_utils
from nucleus.util import variantcall_utils
from nucleus.util import vcf_constants
def get_variant_ad(variant):
"""Returns the allele depth for the Variant, calculated across its calls."""
num_alleles = len(variant.alternate_bases) + 1
call_ads = [variantcall_utils.get_format(vc, 'AD') for vc in
variant.calls]
assert(len(call_ad) == num_alleles for call_ad in call_ads)
return [sum(call_ad[i] for call_ad in call_ads)
for i in six.moves.xrange(num_alleles)]
def main(argv):
if len(argv) != 3:
print('Usage: %s <input_vcf> <output_vcf>' % argv[0])
sys.exit(-1)
in_vcf = argv[1]
out_vcf = argv[2]
with vcf.VcfReader(in_vcf) as reader:
if 'AD' in [info.id for info in reader.header.infos]:
print('%s already contains AD field.' % in_vcf)
sys.exit(-1)
out_header = reader.header
out_header.infos.extend([vcf_constants.reserved_info_field('AD')])
with vcf.VcfWriter(out_vcf, header=out_header) as writer:
for variant in reader:
variant_utils.set_info(variant, 'AD', get_variant_ad(variant),
writer)
writer.write(variant)
if __name__ == '__main__':
app.run(main)
|
from __future__ import annotations
import dataclasses
import functools
import operator
import random
import timeit
from typing import Optional
import fire
from pipe import select
from tqdm import tqdm
COSTS = {
"A": 1,
"B": 10,
"C": 100,
"D": 1000,
}
def check_move_from_home(source: Node, target: Node) -> bool:
return not target.is_a_home or check_move_into_home(source, target)
def check_move_into_home(source: Node, target: Node) -> bool:
return target.is_a_home and source.occupant == target.home and target.all_occupants_from_home
def reachable_nodes(board: Board, source: Node) -> list[tuple[Node, Node, int]]:
targets_with_distances = source.get_relevant_neighbors_with_distance(board)
for target, distance in targets_with_distances:
for neighbor, neighbor_distance in target.get_relevant_neighbors_with_distance(board):
if neighbor != source and all(neighbor != added for added, _ in targets_with_distances):
targets_with_distances.append((neighbor, neighbor_distance + distance))
return trim(source, targets_with_distances)
def trim(source: Node, targets_with_distances: list[tuple[Node, int]]) -> list[tuple[Node, Node, int]]:
check_func = check_move_from_home if source.is_a_home else check_move_into_home
output = []
for target, distance in targets_with_distances:
if check_func(source, target):
output.append((source, target, distance))
return output
@dataclasses.dataclass
class Board:
grid: dict[int, Node] = dataclasses.field(default_factory=dict)
def add_nodes(self, *nodes: Node) -> None:
for node in nodes:
self.add_node(node)
def add_node(self, node: Node) -> None:
self.grid[node.node_id] = node
def get_node(self, node_id: int) -> Node:
return self.grid[node_id]
def move_occupant(self, from_node: Node, to_node: Node, distance: int) -> tuple[Board, int]:
new_board = self.copy()
new_board.grid[from_node.node_id], occupant, extra_from_dist = from_node.pop()
new_board.grid[to_node.node_id], extra_to_dist = to_node.push(occupant)
move_cost = COSTS[occupant] * (extra_from_dist + distance + extra_to_dist)
return new_board, move_cost
@property
def completed(self) -> bool:
for node in self.grid.values():
if node.is_a_home:
if any(node.home != occupant for occupant in node.occupants):
return False
elif node.occupied:
return False
return True
def generate_moves(self) -> list[tuple[Node, Node, int]]:
return functools.reduce(
operator.add,
[
reachable_nodes(self, from_node)
for from_node in self.grid.values()
if from_node.has_occupant and not from_node.completed
],
)
def copy(self) -> Board:
return dataclasses.replace(self, grid=self.grid.copy())
def __hash__(self):
return hash((frozenset(self.grid.items())))
@dataclasses.dataclass(frozen=True)
class Node:
node_id: int
_neighbors: tuple[int, ...]
distances: tuple[int, ...]
home: str = ""
occupants: str = ""
max_occupants: int = 1
def get_relevant_neighbors_with_distance(self, board: Board) -> list[tuple[Node, int]]:
return [
(board.get_node(node_id), distance)
for node_id, distance in zip(self._neighbors, self.distances)
if not board.get_node(node_id).occupied
]
@property
def occupant(self) -> str:
return self.occupants[0]
@property
def is_a_home(self) -> bool:
return self.home != ""
@property
def has_occupant(self) -> bool:
return len(self.occupants) > 0
@property
def occupied(self) -> bool:
return len(self.occupants) == self.max_occupants
@property
def all_occupants_from_home(self) -> bool:
return all(self.occupant == occupant for occupant in self.occupants[1:])
@property
def completed(self) -> bool:
return self.is_a_home and self.occupied and all(self.home == occupant for occupant in self.occupants)
def __hash__(self):
return hash((self.node_id, self.occupants))
def __str__(self):
return f"{self.node_id} {self.occupants}"
def pop(self) -> tuple[Node, str, int]:
assert len(self.occupants) > 0
first_occupant = self.occupants[0]
remaining_occupants = self.occupants[1:]
return (
dataclasses.replace(self, occupants=remaining_occupants),
first_occupant,
self.max_occupants - len(self.occupants),
)
def push(self, occupant: str) -> tuple[Node, int]:
assert len(self.occupants) < self.max_occupants
return (
dataclasses.replace(self, occupants=occupant + self.occupants),
self.max_occupants - len(self.occupants) - 1,
)
def initial_state(input_file: str, part_2: bool = True) -> Board:
with open(input_file) as f:
starting_configuration = [char for char in f.read() if char in "ABCD"]
starting_configuration = "".join(
"".join(pair) for pair in zip(starting_configuration[:4], starting_configuration[4:])
)
board = Board()
board.add_nodes(
Node(node_id=0, _neighbors=(1,), distances=(1,)),
Node(node_id=1, _neighbors=(0, 2, 7), distances=(1, 2, 2)),
Node(node_id=2, _neighbors=(1, 3, 7, 9), distances=(2, 2, 2, 2)),
Node(node_id=3, _neighbors=(2, 4, 9, 11), distances=(2, 2, 2, 2)),
Node(node_id=4, _neighbors=(3, 5, 11, 13), distances=(2, 2, 2, 2)),
Node(node_id=5, _neighbors=(4, 6, 13), distances=(2, 1, 2)),
Node(node_id=6, _neighbors=(5,), distances=(1,)),
Node(
node_id=7,
_neighbors=(1, 2),
distances=(2, 2),
home="A",
occupants=starting_configuration[0] + ("DD" if part_2 else "") + starting_configuration[1],
max_occupants=4 if part_2 else 2,
),
Node(
node_id=9,
_neighbors=(2, 3),
distances=(2, 2),
home="B",
occupants=starting_configuration[2] + ("CB" if part_2 else "") + starting_configuration[3],
max_occupants=4 if part_2 else 2,
),
Node(
node_id=11,
_neighbors=(3, 4),
distances=(2, 2),
home="C",
occupants=starting_configuration[4] + ("BA" if part_2 else "") + starting_configuration[5],
max_occupants=4 if part_2 else 2,
),
Node(
node_id=13,
_neighbors=(4, 5),
distances=(2, 2),
home="D",
occupants=starting_configuration[6] + ("AC" if part_2 else "") + starting_configuration[7],
max_occupants=4 if part_2 else 2,
),
)
return board
MIN_COST = 100000
def solve_board(board: Board):
known_boards = {board: 0}
min_cost = MIN_COST
open_boards = {board: 0}
completions = 0
while open_boards:
new_boards = {}
t = tqdm(open_boards.items())
t.set_description(f"Open: {len(open_boards): >10}, Completed: {completions: >10}, Min: {min_cost: >10}")
for board, current_cost in t:
moves = board.generate_moves()
sorted(moves, key=operator.itemgetter(2), reverse=True)
for from_node, to_node, distance in moves:
new_board, additional_cost = board.move_occupant(from_node, to_node, distance)
total_cost = current_cost + additional_cost
if new_board.completed:
if total_cost < min_cost:
min_cost = total_cost
completions += 1
t.set_description(
f"Open: {len(open_boards): >10}, Completed: {completions: >10}, Min: {min_cost: >10}"
)
elif total_cost >= min_cost:
continue
elif new_board not in known_boards or total_cost < known_boards[new_board]:
known_boards[new_board] = total_cost
new_boards[new_board] = total_cost
open_boards = new_boards
def main(input_file: str = "input.txt") -> None:
board = initial_state(input_file)
solve_board(board)
if __name__ == "__main__":
print(timeit.Timer(lambda: fire.Fire(main)).timeit(1))
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This module implements fMRI Design Matrix creation.
Design matrices are represented by Pandas DataFrames
Computations of the different parts of the design matrix are confined
to the make_first_level_design_matrix function, that create a DataFrame
All the others are ancillary functions.
Design matrices contain three different types of regressors:
1. Task-related regressors, that result from the convolution
of the experimental paradigm regressors with hemodynamic models
A hemodynamic model is one of:
- 'spm' : linear filter used in the SPM software
- 'glover' : linear filter estimated by G.Glover
- 'spm + derivative', 'glover + derivative': the same linear models,
plus their time derivative (2 regressors per condition)
- 'spm + derivative + dispersion', 'glover + derivative + dispersion':
idem plus the derivative wrt the dispersion parameter of the hrf
(3 regressors per condition)
- 'fir' : finite impulse response model, generic linear filter
2. User-specified regressors, that represent information available on
the data, e.g. motion parameters, physiological data resampled at
the acquisition rate, or sinusoidal regressors that model the
signal at a frequency of interest.
3. Drift regressors, that represent low_frequency phenomena of no
interest in the data; they need to be included to reduce variance
estimates.
Author: Bertrand Thirion, 2009-2015
"""
from __future__ import with_statement
import sys
from warnings import warn
import numpy as np
import pandas as pd
from scipy import linalg
from .experimental_paradigm import check_events
from .hemodynamic_models import compute_regressor, _orthogonalize
from .utils import full_rank, _basestring
######################################################################
# Ancillary functions
######################################################################
def _poly_drift(order, frame_times):
"""Create a polynomial drift matrix
Parameters
----------
order : int,
Number of polynomials in the drift model.
frame_times : array of shape(n_scans),
Time stamps used to sample polynomials.
Returns
-------
pol : ndarray, shape(n_scans, order + 1)
estimated polynomial drifts plus a constant regressor
"""
order = int(order)
pol = np.zeros((np.size(frame_times), order + 1))
tmax = float(frame_times.max())
for k in range(order + 1):
pol[:, k] = (frame_times / tmax) ** k
pol = _orthogonalize(pol)
pol = np.hstack((pol[:, 1:], pol[:, :1]))
return pol
def _cosine_drift(high_pass, frame_times):
"""Create a cosine drift matrix with frequencies or equal to
high_pass.
Parameters
----------
high_pass : float
Cut frequency of the high-pass filter in Hz
frame_times : array of shape (n_scans,)
The sampling times in seconds
Returns
-------
cosine_drift : array of shape(n_scans, n_drifts)
Cosine drifts plus a constant regressor at cosine_drift[:, -1]
Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II
"""
n_frames = len(frame_times)
n_times = np.arange(n_frames)
dt = (frame_times[-1] - frame_times[0]) / (n_frames - 1)
if high_pass * dt >= .5:
warn('High-pass filter will span all accessible frequencies '
'and saturate the design matrix. '
'You may want to reduce the high_pass value.'
'The provided value is {0} Hz'.format(high_pass))
order = np.minimum(n_frames - 1,
int(np.floor(2 * n_frames * high_pass * dt)))
cosine_drift = np.zeros((n_frames, order + 1))
normalizer = np.sqrt(2.0 / n_frames)
for k in range(1, order + 1):
cosine_drift[:, k - 1] = normalizer * np.cos(
(np.pi / n_frames) * (n_times + .5) * k)
cosine_drift[:, -1] = 1.
return cosine_drift
def _none_drift(frame_times):
""" Create an intercept vector
Returns
-------
np.ones_like(frame_times)
"""
return np.reshape(np.ones_like(frame_times), (np.size(frame_times), 1))
def _make_drift(drift_model, frame_times, order, high_pass):
"""Create the drift matrix
Parameters
----------
drift_model : {'polynomial', 'cosine', None},
string that specifies the desired drift model
frame_times : array of shape(n_scans),
list of values representing the desired TRs
order : int, optional,
order of the drift model (in case it is polynomial)
high_pass : float, optional,
high-pass frequency in case of a cosine model (in Hz)
Returns
-------
drift : array of shape(n_scans, n_drifts),
the drift matrix
names : list of length(n_drifts),
the associated names
"""
if isinstance(drift_model, _basestring):
drift_model = drift_model.lower() # for robust comparisons
if drift_model == 'polynomial':
drift = _poly_drift(order, frame_times)
elif drift_model == 'cosine':
drift = _cosine_drift(high_pass, frame_times)
elif drift_model is None:
drift = _none_drift(frame_times)
else:
raise NotImplementedError("Unknown drift model %r" % (drift_model))
names = []
for k in range(1, drift.shape[1]):
names.append('drift_%d' % k)
names.append('constant')
return drift, names
def _convolve_regressors(events, hrf_model, frame_times, fir_delays=[0],
min_onset=-24, oversampling=50):
""" Creation of a matrix that comprises
the convolution of the conditions onset with a certain hrf model
Parameters
----------
events : DataFrame instance,
Events data describing the experimental paradigm
see nistats.experimental_paradigm to check the specification
for these to be valid paradigm descriptors
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
'fir', None}
String that specifies the hemodynamic response function
frame_times : array of shape (n_scans,)
The targeted timing for the design matrix.
fir_delays : array-like of shape (n_onsets,), optional,
In case of FIR design, yields the array of delays
used in the FIR model (in scans).
min_onset : float, optional (default: -24),
Minimal onset relative to frame_times[0] (in seconds) events
that start before frame_times[0] + min_onset are not considered.
oversampling: int optional, default:50,
Oversampling factor used in temporal convolutions.
Returns
-------
regressor_matrix : array of shape (n_scans, n_regressors),
Contains the convolved regressors associated with the
experimental conditions.
regressor_names : list of strings,
The regressor names, that depend on the hrf model used
if 'glover' or 'spm' then this is identical to the input names
if 'glover + derivative' or 'spm + derivative', a second name is output
i.e. '#name_derivative'
if 'spm + derivative + dispersion' or
'glover + derivative + dispersion',
a third name is used, i.e. '#name_dispersion'
if 'fir', the regressos are numbered accoding to '#name_#delay'
"""
regressor_names = []
regressor_matrix = None
trial_type, onset, duration, modulation = check_events(events)
for condition in np.unique(trial_type):
condition_mask = (trial_type == condition)
exp_condition = (onset[condition_mask],
duration[condition_mask],
modulation[condition_mask])
reg, names = compute_regressor(
exp_condition, hrf_model, frame_times, con_id=condition,
fir_delays=fir_delays, oversampling=oversampling,
min_onset=min_onset)
regressor_names += names
if regressor_matrix is None:
regressor_matrix = reg
else:
regressor_matrix = np.hstack((regressor_matrix, reg))
return regressor_matrix, regressor_names
######################################################################
# Design matrix creation
######################################################################
def make_first_level_design_matrix(
frame_times, events=None, hrf_model='glover',
drift_model='cosine', high_pass=.01, drift_order=1, fir_delays=[0],
add_regs=None, add_reg_names=None, min_onset=-24, oversampling=50):
"""Generate a design matrix from the input parameters
Parameters
----------
frame_times : array of shape (n_frames,)
The timing of acquisition of the scans in seconds.
events : DataFrame instance, optional
Events data that describes the experimental paradigm.
The DataFrame instance might have these keys:
'onset': column to specify the start time of each events in
seconds. An error is raised if this key is missing.
'trial_type': column to specify per-event experimental conditions
identifier. If missing each event are labelled
'dummy' and considered to form a unique condition.
'duration': column to specify the duration of each events in
seconds. If missing the duration of each events is set
to zero.
'modulation': column to specify the amplitude of each
events. If missing the default is set to
ones(n_events).
An experimental paradigm is valid if it has an 'onset' key
and a 'duration' key.
If these keys are missing an error will be raised.
For the others keys a warning will be displayed.
Particular attention should be given to the 'trial_type' key
which defines the different conditions in the experimental paradigm.
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
'fir', None}, optional,
Specifies the hemodynamic response function
drift_model : {'polynomial', 'cosine', None}, optional
Specifies the desired drift model,
period_cut : float, optional
Cut period of the high-pass filter in seconds.
Used only if drift_model is 'cosine'.
drift_order : int, optional
Order of the drift model (in case it is polynomial).
fir_delays : array of shape(n_onsets) or list, optional,
In case of FIR design, yields the array of delays used in the FIR
model (in scans).
add_regs : array of shape(n_frames, n_add_reg), optional
additional user-supplied regressors, e.g. data driven noise regressors
or seed based regressors.
add_reg_names : list of (n_add_reg,) strings, optional
If None, while add_regs was provided, these will be termed
'reg_%i', i = 0..n_add_reg - 1
min_onset : float, optional
Minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered.
oversampling: int, optional,
Oversampling factor used in temporal convolutions.
Returns
-------
design_matrix : DataFrame instance,
holding the computed design matrix, the index being the frames_times
and each column a regressor.
"""
# check arguments
# check that additional regressor specification is correct
n_add_regs = 0
if add_regs is not None:
if add_regs.shape[0] == np.size(add_regs):
add_regs = np.reshape(add_regs, (np.size(add_regs), 1))
n_add_regs = add_regs.shape[1]
assert add_regs.shape[0] == np.size(frame_times), ValueError(
'Incorrect specification of additional regressors: '
'length of regressors provided: %d, number of '
'time-frames: %d' % (add_regs.shape[0], np.size(frame_times)))
# check that additional regressor names are well specified
if add_reg_names is None:
add_reg_names = ['reg%d' % k for k in range(n_add_regs)]
elif len(add_reg_names) != n_add_regs:
raise ValueError(
'Incorrect number of additional regressor names was provided'
'(%d provided, %d expected' % (len(add_reg_names),
n_add_regs))
# computation of the matrix
names = []
matrix = None
# step 1: events-related regressors
if events is not None:
# create the condition-related regressors
if isinstance(hrf_model, _basestring):
hrf_model = hrf_model.lower()
matrix, names = _convolve_regressors(
events, hrf_model, frame_times, fir_delays, min_onset,
oversampling)
# step 2: additional regressors
if add_regs is not None:
# add user-supplied regressors and corresponding names
if matrix is not None:
matrix = np.hstack((matrix, add_regs))
else:
matrix = add_regs
names += add_reg_names
# step 3: drifts
drift, dnames = _make_drift(drift_model, frame_times, drift_order,
high_pass)
if matrix is not None:
matrix = np.hstack((matrix, drift))
else:
matrix = drift
names += dnames
# check column names are all unique
if len(np.unique(names)) != len(names):
raise ValueError('Design matrix columns do not have unique names')
# step 4: Force the design matrix to be full rank at working precision
matrix, _ = full_rank(matrix)
design_matrix = pd.DataFrame(
matrix, columns=names, index=frame_times)
return design_matrix
def check_design_matrix(design_matrix):
""" Check that the provided DataFrame is indeed a valid design matrix
descriptor, and returns a triplet of fields
Parameters
----------
design matrix : pandas DataFrame,
Describes a design matrix.
Returns
-------
frame_times : array of shape (n_frames,),
Sampling times of the design matrix in seconds.
matrix : array of shape (n_frames, n_regressors), dtype='f'
Numerical values for the design matrix.
names : array of shape (n_events,), dtype='f'
Per-event onset time (in seconds)
"""
names = [name for name in design_matrix.keys()]
frame_times = design_matrix.index
matrix = design_matrix.values
return frame_times, matrix, names
def make_second_level_design_matrix(subjects_label, confounds=None):
"""Sets up a second level design.
Construct a design matrix with an intercept and subject specific confounds.
Parameters
----------
subjects_label: list of str
Contain subject labels to extract confounders in the right order,
corresponding with the images, to create the design matrix.
confounds: pandas DataFrame, optional
If given, contains at least two columns, 'subject_label' and one
confound. The subjects list determines the rows to extract from
confounds thanks to its 'subject_label' column. All subjects must
have confounds specified. There should be only one row per subject.
Returns
-------
design_matrix: pandas DataFrame
The second level design matrix
"""
confounds_name = []
if confounds is not None:
confounds_name = confounds.columns.tolist()
confounds_name.remove('subject_label')
design_columns = (confounds_name + ['intercept'])
# check column names are unique
if len(np.unique(design_columns)) != len(design_columns):
raise ValueError('Design matrix columns do not have unique names')
# float dtype necessary for linalg
design_matrix = pd.DataFrame(columns=design_columns, dtype=float)
for ridx, subject_label in enumerate(subjects_label):
design_matrix.loc[ridx] = [0] * len(design_columns)
design_matrix.loc[ridx, 'intercept'] = 1
if confounds is not None:
conrow = confounds['subject_label'] == subject_label
if np.sum(conrow) > 1:
raise ValueError('confounds contain more than one row for '
'subject %s' % subject_label)
elif np.sum(conrow) == 0:
raise ValueError('confounds not specified for subject %s' %
subject_label)
for conf_name in confounds_name:
confounds_value = confounds[conrow][conf_name].values[0]
design_matrix.loc[ridx, conf_name] = confounds_value
# check design matrix is not singular
epsilon = sys.float_info.epsilon
if np.linalg.cond(design_matrix.values) > design_matrix.size:
warn('Attention: Design matrix is singular. Aberrant estimates '
'are expected.')
return design_matrix
|
import discord
from discord.ext import commands
from main import *
import asyncio
import datetime
import cogs.utils.slash as slash
class Bot(commands.Cog):
"""Commands and events related to the bot."""
def __init__(self, bot):
self.bot = bot
display_emoji = "๐พ"
@commands.Cog.listener()
async def on_ready(self):
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(f'{prefix}help'))
print("Running.")
print(self.bot.user)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
ignore = (commands.CommandNotFound)
if isinstance(error, ignore):
return
if isinstance(error, commands.NotOwner):
await ctx.send("You do not own this bot.")
show_help = (commands.MissingRequiredArgument, commands.UserInputError)
if isinstance(error, show_help):
await ctx.send_help(ctx.command)
if isinstance(error, commands.MaxConcurrencyReached):
name = error.per.name
suffix = "per %s" % name if error.per.name != "default" else "globally"
plural = "%s times %s" if error.number > 1 else "%s time %s"
fmt = plural % (error.number, suffix)
await ctx.send(f"This command can only be used **{fmt}** at the same time. Use `{prefix}{ctx.full_parent_name} stop` to stop it.")
if isinstance(error, commands.MissingPermissions):
missing = ["`" + perm.replace("_", " ").replace("guild", "server").title() + "`"
for perm in error.missing_perms]
fmt = "\n".join(missing)
message = f"You need the following permissions to run this command:\n{fmt}."
await ctx.send(message)
if isinstance(error, commands.BotMissingPermissions):
missing = ["`" + perm.replace("_", " ").replace("guild", "server").title() + "`"
for perm in error.missing_perms]
fmt = "\n".join(missing)
message = f"I need the following permissions to run this command:\n{fmt}."
await ctx.send(message)
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(f"That command is on cooldown for **{round(error.retry_after, 2)}s**")
if isinstance(error, commands.DisabledCommand):
await ctx.send(f'Command `{ctx.command}` has been disabled by the developer for updates, debugging or due to some other issue.')
else:
await ctx.send(str(error))
raise error
# logs
@commands.Cog.listener(name="on_command")
async def on_command(self, ctx):
try:
log_ch = await self.bot.fetch_channel(log_channel)
user = ctx.author
command = ctx.command
message_content = str(ctx.message.content)
message_id = ctx.message.id
channel = str(ctx.channel)
channel_id = ctx.channel.id
em = discord.Embed(colour = embed_colour)
em.set_author(name = user, icon_url = user.avatar.url)
em.add_field(name = "Command used", value = message_content, inline = False)
em.timestamp = datetime.datetime.utcnow()
if ctx.guild:
server = ctx.guild.name
server_id = ctx.guild.id
em.add_field(name = "Go to", value = f"[Warp](https://discord.com/channels/{server_id}/{channel_id}/{message_id})")
em.set_footer(text = f"{server} | #{channel}")
else:
em.set_footer(text = "Direct messages")
await log_ch.send(embed = em)
except Exception as e:
raise e
# prefix
@commands.command(name="prefix",
aliases=["prefixes"],
brief="Shows prefixes.",
help="Shows the prefixes of the bot. Cannot be changed.")
async def _prefix(self, ctx):
n = "\n> "
await ctx.send(f"My prefixes are:\n> {n.join(get_prefix(bot, ctx)[1:])}\nThey cannot be changed.")
# ping
@commands.command(name = "ping",
brief = "Bot's latency",
help = "Responds with 'Pong!' and the bot's latency")
async def ping(self, ctx):
message = await ctx.send('Pong!')
ms = int((message.created_at - ctx.message.created_at).total_seconds() * 1000)
await message.edit(content= f"Pong! {ms} ms")
# invite
@commands.command(name = "invite",
brief = "Bot's invite link",
help = "Sends the bot's invite link."
)
async def invite(self, ctx):
embed = discord.Embed(title = "Add the bot to your server using the following link.", color = embed_colour)
embed.set_thumbnail(url=self.bot.user.avatar.url)
embed.add_field(name="Invite Bot", value=f"[Invite link.](https://discord.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=8&scope=bot%20applications.commands)", inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Bot(bot)) |
"""Utility functions for tf-based Reinforcement learning algorithms."""
import numpy as np
from metarl.misc import tensor_utils as np_tensor_utils
from metarl.tf.misc import tensor_utils
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount,
gae_lambda):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
max_path_length (int): Maximum length of a single rollout.
baseline_predictions(numpy.ndarray): : Predicted value of GAE
(Generalized Advantage Estimation) Baseline.
discount (float): Environment reward discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
Returns:
dict: Processed sample data, with key
* observations: (numpy.ndarray)
* actions: (numpy.ndarray)
* rewards: (numpy.ndarray)
* baselines: (numpy.ndarray)
* returns: (numpy.ndarray)
* valids: (numpy.ndarray)
* agent_infos: (dict)
* env_infos: (dict)
* paths: (list[dict])
"""
baselines = []
returns = []
total_steps = 0
for idx, path in enumerate(paths):
total_steps += len(path['rewards'])
path_baselines = np.append(baseline_predictions[idx], 0)
deltas = (path['rewards'] + discount * path_baselines[1:] -
path_baselines[:-1])
path['advantages'] = np_tensor_utils.discount_cumsum(
deltas, discount * gae_lambda)
path['deltas'] = deltas
for idx, path in enumerate(paths):
# baselines
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
# returns
path['returns'] = np_tensor_utils.discount_cumsum(
path['rewards'], discount)
returns.append(path['returns'])
# make all paths the same length
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path['returns'] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
baselines = tensor_utils.pad_tensor_n(baselines, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos
])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
lengths = np.asarray([v.sum() for v in valids])
samples_data = dict(
observations=obs,
actions=actions,
rewards=rewards,
baselines=baselines,
returns=returns,
valids=valids,
lengths=lengths,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
return samples_data
|
"""Some utilities that may help.
"""
from .iterables import (flatten, group, take, subsets,
variations, numbered_symbols, cartes, capture, dict_merge,
postorder_traversal, preorder_traversal, interactive_traversal,
prefixes, postfixes, sift, topological_sort)
from .lambdify import lambdify
from .source import source
from .decorator import threaded, xthreaded
from .runtests import test, doctest
from .cythonutils import cythonized
from .timeutils import timed
from .misc import default_sort_key
|
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook copula.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Copula - Multivariate joint distribution
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import stats
sns.set_style("darkgrid")
sns.mpl.rc("figure", figsize=(8, 8))
# When modeling a system, there are often cases where multiple parameters
# are involved. Each of these parameters could be described with a given
# Probability Density Function (PDF). If would like to be able to generate a
# new set of parameter values, we need to be able to sample from these
# distributions-also called marginals. There are mainly two cases: *(i)*
# PDFs are independent; *(ii)* there is a dependency. One way to model the
# dependency it to use a **copula**.
# ## Sampling from a copula
#
# Let's use a bi-variate example and assume first that we have a prior and
# know how to model the dependence between our 2 variables.
#
# In this case, we are using the Gumbel copula and fix its hyperparameter
# `theta=2`. We can visualize it's 2-dimensional PDF.
from statsmodels.distributions.copula.api import (CopulaDistribution,
GumbelCopula,
IndependenceCopula)
copula = GumbelCopula(theta=2)
_ = copula.plot_pdf() # returns a matplotlib figure
# And we can sample the PDF.
sample = copula.rvs(10000)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="hex")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# Let's come back to our 2 variables for a second. In this case we
# consider them to be gamma and normally distributed. If they would be
# independent from each other, we could sample from each PDF individually.
# Here we use a convenient class to do the same operation.
#
# ### Reproducibility
#
# Generating reproducible random values from copulas required explicitly
# setting the `seed` argument.
# `seed` accepts either an initialized NumPy `Generator` or `RandomState`,
# or any argument acceptable
# to `np.random.default_rng`, e.g., an integer or a sequence of integers.
# This example uses an
# integer.
#
# The singleton `RandomState` that is directly exposed in the `np.random`
# distributions is
# not used, and setting `np.random.seed` has no effect on the values
# generated.
marginals = [stats.gamma(2), stats.norm]
joint_dist = CopulaDistribution(copula=IndependenceCopula(),
marginals=marginals)
sample = joint_dist.rvs(512, random_state=20210801)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="scatter")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# Now, above we have expressed the dependency between our variables using
# a copula, we can use this copula to sample a new set of observation with
# the same convenient class.
joint_dist = CopulaDistribution(copula, marginals)
# Use an initialized Generator object
rng = np.random.default_rng([2, 0, 2, 1, 0, 8, 0, 1])
sample = joint_dist.rvs(512, random_state=rng)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="scatter")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# There are two things to note here. *(i)* as in the independent case, the
# marginals are correctly showing a gamma and normal distribution; *(ii)*
# the dependence is visible between the two variables.
# ## Estimating copula parameters
#
# Now, imagine we already have experimental data and we know that there is
# a dependency that can be expressed using a Gumbel copula. But we don't
# know what is the hyperparameter value for our copula. In this case, we can
# estimate the value.
#
# We are going to use the sample we just generated as we already know the
# value of the hyperparameter we should get: `theta=2`.
copula = GumbelCopula()
theta = copula.fit_corr_param(sample)
print(theta)
# We can see that the estimated hyperparameter value is close to the value
# set previously.
|
# Generated by Django 2.1.2 on 2018-10-29 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0014_auto_20171227_1530'),
]
operations = [
migrations.AddField(
model_name='profile',
name='api_key_id',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='profile',
name='api_key_readonly',
field=models.CharField(blank=True, max_length=128),
),
]
|
'''
Version: 0419
Utility Maximizing Learning Plan Simulations
# Brute force, Greedy, ILP Solver
# Support Additive cost function
'''
import networkx as nx
import numpy as np
import random,copy, time, json, os, argparse, csv, datetime
from gurobipy import *
from UMLP_solver import *
import utils
# INPUT description:
# G <- a DAG object representing n knowledge points' dependencies
# B <- a number describing total budget
# C <- a row vector of length n describing cost of learning Ki
# U <- a row vector of length n describing the value of learning Ki
# type <- type of cost function
def generate_random_dag(nodes, density):
#Generate a random Directed Acyclic Graph (DAG) with a given number of nodes and edges.
G = nx.DiGraph()
edges = density * nodes * (nodes - 1)
for i in range(nodes):
G.add_node(i)
for i in range(nodes**2):
a = random.randint(0,nodes-1)
b = a
while b==a:
b = random.randint(0,nodes-1)
if G.has_edge(a,b):
G.remove_edge(a,b)
else:
G.add_edge(a,b)
current_degree = sum(dict(G.degree()).values())
if not (nx.is_directed_acyclic_graph(G) and current_degree <= edges):
G.remove_edge(a,b)
return G
def get_index (A):
res = 0
for kp in A:
res += 2**kp
return res
def generate_cost(G, cost_type = "add"):
# cost_type has add, monotone
N = G.order()
C0 = np.random.uniform(1,10,N)
if cost_type == "add":
return C0
elif cost_type == "monotone":
C = np.zeros((2**N, N))
C[0] = C0
def generate_part_cost(C, A, i):
maximum_cost = cost(C,[],i, cost_type)
for i in range(len(A)):
curA = A[:i]+A[i+1:]
curCost = cost(C,curA, i, cost_type)
if curCost <= maximum_cost:
maximum_cost = curCost
return np.random.uniform(maximum_cost*(1-1.0/N), maximum_cost)
def all_subsets(N, x):
return itertools.combinations(list(range(N)), x)
for x in range(N):
subsets = all_subsets(N,x)
for subset in subsets:
for i in range(N):
index = get_index(subset)
C[index][i] = generate_part_cost(C, subset, i)
return C
def generate_utility(G):
N = G.order()
return np.random.uniform(1,10, N)
def simulate():
args = utils.process_args(vars(utils.parser.parse_args()))
print(args)
Ns, densities, solvers, budgets, nsim, costType, verbose, loadPrev, standardize = args
result_dict = []
result_colnums_names = ['N','Density','Solver','Budget','Cost',
'Time_avg','Time_sd','Sol_avg','Sol_sd']
total_simulations = utils.getTotalSimulation([Ns, densities, budgets, costType])
total_simulations *= nsim
progress = 0
loadPrev_outer = loadPrev
try:
for N in Ns:
for density in densities:
for budget in budgets:
for cost in costType:
sols = np.zeros((nsim,len(solvers)))
times = np.zeros((nsim,len(solvers)))
if loadPrev:
try:
print ("\nLoading previously saved test instances...")
try:
update_cost = False
sims,new_budget = utils.load_saved_instance(N,density,budget,cost)
except:
print("Need to update costs...")
update_cost = True
sims,new_budget = utils.load_saved_instance(N,density,budget,None)
except:
print ("Failed to load... Creating new instances...")
sims = []
loadPrev = False
else:
print ("Creating new instances...")
sims = []
for sim in range(nsim):
if loadPrev and sim < len(sims):
changed_instance = False
G,B,U,C = sims[sim]
if update_cost:
print("\nUpdating costs...")
C = generate_cost(G, cost)
sims[sim] = G,B,U,C
changed_instance = True
if new_budget:
print("\nReusing test cases but with different budget...")
B = 5 * G.order() * budget
else:
changed_instance = True
G = generate_random_dag(N, density)
B = 5 * N * budget
U = generate_utility(G)
C = generate_cost(G, cost)
sims.append((G,B,U,C))
for solver_index in range(len(solvers)):
solver = solvers[solver_index]
if solver == "ilp":
if cost == "monotone":
C_ilp = C[0]
s_time, s_sol = ilp_time(G,C[0],B,U)
elif cost == "add":
s_time, s_sol = ilp_time(G,C,B,U)
elif solver == "bf":
s_time, s_sol = brute_force_time(G,C,B,U,cost)
elif solver == "gd":
s_time, s_sol = greedy_time(G,C,B,U,cost)
elif solver == "gd2":
s_time, s_sol = greedy2_time(G,C,B,U,cost)
sols[sim,solver_index] = s_sol
times[sim,solver_index] = s_time
progress += 1
if verbose: utils.update_progress(progress/total_simulations)
if changed_instance or new_budget:
print ("\nTest instances saved for future use.")
utils.save_instance(sims,N,density,budget,cost)
result_dict.extend(utils.generate_result_dict(N, density, budget,
cost, solvers, sols, times,
standardize))
loadPrev = loadPrev_outer
utils.export(result_colnums_names, result_dict)
except KeyboardInterrupt:
utils.export(result_colnums_names, result_dict)
if __name__ == '__main__':
simulate()
|
import os
import threading
from castero import helpers
from castero.config import Config
from castero.datafile import DataFile
class Episode:
"""The Episode class.
This class represents a single episode from a podcast feed.
"""
def __init__(self, feed, title=None, description=None, link=None,
pubdate=None, copyright=None, enclosure=None) -> None:
"""Initializes the object.
At least one of a title or description must be specified.
Args:
feed: the feed that this episode is a part of
title: (optional) the title of the episode
description: (optional) the description of the episode
link: (optional) a link to the episode
pubdate: (optional) the date the episode was published, as a string
copyright: (optional) the copyright notice of the episode
enclosure: (optional) a url to a media file
"""
assert title is not None or description is not None
self._feed = feed
self._title = title
self._description = description
self._link = link
self._pubdate = pubdate
self._copyright = copyright
self._enclosure = enclosure
def __str__(self) -> str:
"""Represent this object as a single-line string.
Returns:
string: this episode's title, if it exists, else its description
"""
if self._title is not None:
representation = self._title
else:
representation = self._description
return representation.split('\n')[0]
def _feed_directory(self) -> str:
"""Gets the path to the downloaded episode's feed directory.
This method does not ensure whether the directory exists -- it simply
acts as a single definition of where it _should_ be.
Returns:
str: a path to the feed directory
"""
feed_dirname = helpers.sanitize_path(str(self._feed))
if Config is None or Config["custom_download_dir"] == "":
path = DataFile.DEFAULT_DOWNLOADED_DIR
else:
path = Config["custom_download_dir"]
return os.path.join(path, feed_dirname)
def get_playable(self) -> str:
"""Gets a playable path for this episode.
This method checks whether the episode is available on the disk, giving
the path to that file if so. Otherwise, simply return the episode's
enclosure, which is probably a URL.
Returns:
str: a path to a playable file for this episode
"""
playable = self.enclosure
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
playable = os.path.join(feed_directory, File)
return playable
def download(self, download_queue, display=None):
"""Downloads this episode to the file system.
This method currently only supports downloading from an external URL.
In the future, it may be worthwhile to determine whether the episode's
source is a local file and simply copy it instead.
Args:
download_queue: the download_queue overseeing this download
display: (optional) the display to write status updates to
"""
if self._enclosure is None:
if display is not None:
display.change_status("Download failed: episode does not have"
" a valid media source")
return
feed_directory = self._feed_directory()
episode_partial_filename = helpers.sanitize_path(str(self))
extension = os.path.splitext(self._enclosure)[1].split('?')[0]
output_path = os.path.join(feed_directory,
episode_partial_filename + str(extension))
DataFile.ensure_path(output_path)
if display is not None:
display.change_status("Starting episode download...")
t = threading.Thread(
target=DataFile.download_to_file,
args=[
self._enclosure, output_path, str(self),
download_queue, display
],
name="download_%s" % str(self)
)
t.start()
def delete(self, display=None):
"""Deletes the episode file from the file system.
Args:
display: (optional) the display to write status updates to
"""
if self.downloaded:
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
os.remove(os.path.join(feed_directory, File))
if display is not None:
display.change_status(
"Successfully deleted the downloaded episode"
)
# if there are no more files in the feed directory, delete it
if len(os.listdir(feed_directory)) == 0:
os.rmdir(feed_directory)
def downloaded(self) -> bool:
"""Determines whether the episode is downloaded.
Returns:
bool: whether or not the episode is downloaded
"""
found_downloaded = False
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
found_downloaded = True
return found_downloaded
@property
def title(self) -> str:
"""str: the title of the episode"""
result = self._title
if result is None:
result = "Title not available."
return result
@property
def description(self) -> str:
"""str: the description of the episode"""
result = self._description
if result is None:
result = "Description not available."
return result
@property
def link(self) -> str:
"""str: the link of/for the episode"""
result = self._link
if result is None:
result = "Link not available."
return result
@property
def pubdate(self) -> str:
"""str: the publish date of the episode"""
result = self._pubdate
if result is None:
result = "Publish date not available."
return result
@property
def copyright(self) -> str:
"""str: the copyright of the episode"""
result = self._copyright
if result is None:
result = "No copyright specified."
return result
@property
def enclosure(self) -> str:
"""str: the enclosure of the episode"""
result = self._enclosure
if result is None:
result = "Enclosure not available."
return result
|
#
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional
# this may raise an ImportError if the python-gitlab package is not installed
from gitlab import Gitlab
# this may raise an ImportError if the python-gitlab package is not installed
from gitlab.exceptions import GitlabError # noqa H306
from traitlets.config import LoggingConfigurable
from urllib3.util import parse_url
class GitLabClient(LoggingConfigurable):
def __init__(
self,
token: str,
project: str,
branch: Optional[str] = None,
server_url: Optional[str] = "https://gitlab.com",
**kwargs,
):
"""
Creates a GitLab client for Elyra
:param token: Personal Access Token for use with GitLab
:param project: GitLab project to use. Use format [namespace]/[project] e.g. elyra/examples
:param branch: Project branch to use. If not provided, this will use the default branch configured in the
target project
:param server_url: GitLab API endpoint to use for the client. This is can be an Enterprise
GitLab instance. By default the client will attempt to connect to the main GitLab API at
https://www.gitlab.com'
"""
super().__init__(**kwargs)
# Remove trailing slash(es) from server URL to prevent failure
self.server_url = server_url.rstrip("/")
self.project_name = project
self.branch = branch
try:
self.client = Gitlab(self.server_url, private_token=token)
self.gitlab_project = self.client.projects.get(self.project_name)
except GitlabError as gle:
self.log.error(f"Error accessing project {self.project_name}: {gle}")
raise RuntimeError(
f"Error accessing repository {self.project_name}: {gle}. "
"Please validate your runtime configuration details and retry."
) from gle
def upload_dag(self, pipeline_filepath: str, pipeline_name: str) -> None:
"""
Push a DAG to a gitlab project
:param pipeline_filepath: filepath to the location of the DAG in the local filesystem
:param pipeline_name: the name of the file to be created in the gitlab project
:return:
"""
try:
# Upload DAG to gitlab project
with open(pipeline_filepath) as input_file:
content = input_file.read()
git_file_name = f"{pipeline_name}.py"
self.gitlab_project.files.create(
{
"file_path": git_file_name,
"branch": self.branch,
"content": content,
"commit_message": f"Pushed DAG {pipeline_name}",
}
)
self.log.info(f"DAG file {git_file_name} was successfully uploaded to branch {self.branch}.")
except FileNotFoundError as fnfe:
self.log.error(f"Unable to locate local DAG file to upload: {pipeline_filepath}: " + str(fnfe))
raise RuntimeError(f"Unable to locate local DAG file to upload: {pipeline_filepath}: {str(fnfe)}") from fnfe
except GitlabError as gle:
self.log.error(f"Error uploading DAG to branch {self.branch}: {gle}")
raise RuntimeError(
f"Error uploading DAG to branch {self.branch}: {gle} "
"Please validate your runtime configuration details and try again."
) from gle
@staticmethod
def get_git_url(api_url: str, repository_name: str, repository_branch: str) -> str:
"""
Generates the URL to the location of the pushed DAG
:param api_url: git API endpoint URL
:param project_name: name of the GitLab project in the form [namespace]/[project]
:param project_branch: name of the project branch
:return: a URL in string format
"""
parsed_url = parse_url(api_url)
scheme = f"{parsed_url.scheme}://"
host = parsed_url.host
port = ""
if parsed_url.host.split(".")[0] == "api":
host = ".".join(parsed_url.host.split(".")[1:])
if parsed_url.port:
port = f":{parsed_url.port}"
return f"{scheme}{host}{port}/{repository_name}/tree/{repository_branch}"
|
"""insert new fields in user table
Revision ID: 948eefcc2e28
Revises: a9c4c250382f
Create Date: 2020-02-05 11:00:03.687495
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '948eefcc2e28'
down_revision = 'a9c4c250382f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PdbStatusDetails(object):
"""
The number and status of PDBs in a Container Database.
"""
#: A constant which can be used with the status property of a PdbStatusDetails.
#: This constant has a value of "UP"
STATUS_UP = "UP"
#: A constant which can be used with the status property of a PdbStatusDetails.
#: This constant has a value of "DOWN"
STATUS_DOWN = "DOWN"
#: A constant which can be used with the status property of a PdbStatusDetails.
#: This constant has a value of "UNKNOWN"
STATUS_UNKNOWN = "UNKNOWN"
def __init__(self, **kwargs):
"""
Initializes a new PdbStatusDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param status:
The value to assign to the status property of this PdbStatusDetails.
Allowed values for this property are: "UP", "DOWN", "UNKNOWN", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param count:
The value to assign to the count property of this PdbStatusDetails.
:type count: int
"""
self.swagger_types = {
'status': 'str',
'count': 'int'
}
self.attribute_map = {
'status': 'status',
'count': 'count'
}
self._status = None
self._count = None
@property
def status(self):
"""
Gets the status of this PdbStatusDetails.
The status of the PDBs with this count.
Allowed values for this property are: "UP", "DOWN", "UNKNOWN", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this PdbStatusDetails.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this PdbStatusDetails.
The status of the PDBs with this count.
:param status: The status of this PdbStatusDetails.
:type: str
"""
allowed_values = ["UP", "DOWN", "UNKNOWN"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def count(self):
"""
Gets the count of this PdbStatusDetails.
The number of PDBs with this status.
:return: The count of this PdbStatusDetails.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this PdbStatusDetails.
The number of PDBs with this status.
:param count: The count of this PdbStatusDetails.
:type: int
"""
self._count = count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
from nncf.common.graph.patterns import GraphPattern
from nncf.common.graph.patterns import HWFusedPatterns
from nncf.torch.graph.pattern_operations import ARITHMETIC_OPERATIONS
from nncf.torch.graph.pattern_operations import ATOMIC_ACTIVATIONS_OPERATIONS
from nncf.torch.graph.pattern_operations import BATCH_NORMALIZATION_OPERATIONS
from nncf.torch.graph.pattern_operations import GROUP_NORMALIZATION_OPERATIONS
from nncf.torch.graph.pattern_operations import LINEAR_OPERATIONS
from nncf.torch.graph.pattern_operations import MATMUL_OPERATIONS
from nncf.torch.graph.pattern_operations import RELU_OPERATIONS
from nncf.torch.graph.patterns import create_fc_conv_mul
from nncf.torch.graph.patterns import create_h_sigmoid_act
from nncf.torch.graph.patterns import create_h_swish_act
from nncf.torch.graph.patterns import create_swish_act
from nncf.torch.graph.patterns import create_l2_norm
def _get_torch_hw_fused_patterns() -> HWFusedPatterns:
retval = HWFusedPatterns()
linear_ops = GraphPattern()
linear_ops.add_node(**LINEAR_OPERATIONS)
retval.register(linear_ops, LINEAR_OPERATIONS['label'], match=False)
matmul_ops = GraphPattern()
matmul_ops.add_node(**MATMUL_OPERATIONS)
retval.register(linear_ops, MATMUL_OPERATIONS['label'], match=False)
batch_norm = GraphPattern()
batch_norm.add_node(**BATCH_NORMALIZATION_OPERATIONS)
retval.register(batch_norm, BATCH_NORMALIZATION_OPERATIONS['label'], match=False)
atomic_activations = GraphPattern()
atomic_activations.add_node(**ATOMIC_ACTIVATIONS_OPERATIONS)
swish = create_swish_act()
h_sigmoid = create_h_sigmoid_act()
h_swish = create_h_swish_act()
activations = atomic_activations | swish | h_swish | h_sigmoid
retval.register(activations, 'ACTIVATIONS', match=False)
arithmetic_ops = GraphPattern()
arithmetic_ops.add_node(**ARITHMETIC_OPERATIONS)
retval.register(arithmetic_ops, ARITHMETIC_OPERATIONS['label'], match=False)
batch_norm_activations_permutation = batch_norm + activations | activations + batch_norm | batch_norm | activations
retval.register(linear_ops + batch_norm_activations_permutation, 'LINEAR + BN_ACT_PERM',
match=True)
retval.register(matmul_ops + arithmetic_ops, 'MATMUL + ARITHMETIC',
match=True)
retval.register(batch_norm + activations, 'BN + ACTIVATIONS', match=True)
retval.register(activations + batch_norm, 'ACTIVATIONS + BN', match=True)
retval.register(arithmetic_ops + batch_norm_activations_permutation,
'ARITHMETIC + BN_ACT_PERM', match=True)
group_norm = GraphPattern()
group_norm.add_node(**GROUP_NORMALIZATION_OPERATIONS)
relu = GraphPattern()
relu.add_node(**RELU_OPERATIONS)
retval.register(group_norm + relu, 'GROUP_NORM + RELU', match=True)
l2_norm = create_l2_norm()
retval.register(l2_norm, 'L2_NORM', match=True)
fc_mul = create_fc_conv_mul()
retval.register(fc_mul, 'FC_MUL_CONST', match=True)
return retval
PT_HW_FUSED_PATTERNS = _get_torch_hw_fused_patterns()
|
# coding: utf-8
class Node:
def __init__(self, *_, key=None, value=None, p=None, s=None):
self.key = key
self.value = value
self.precursor = p
self.successor = s
class LRUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.count = 0
self._map = dict()
self._head = None
self._tail = None
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self._map:
return -1
node = self._map[key]
if self._head != node:
self._pop_node(node)
self._push_node(node)
return self._map[key].value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self._map:
node = self._map[key]
node.value = value
self._pop_node(node)
else:
if self.count == self.capacity:
self._map.pop(self._tail.key)
self._pop_node(self._tail)
node = Node(key=key, value=value)
self._map[key] = node
self._push_node(node)
def _pop_node(self, node):
p = node.precursor
s = node.successor
if node == self._tail:
self._tail = p
else:
s.precursor = p
if node == self._head:
self._head = s
else:
p.successor = node.successor
self.count -= 1
return node
def _push_node(self, node):
if self.count == 0:
self._head = node
self._tail = node
self.count += 1
return
old = self._head
self._head = node
old.precursor = self._head
self._head.successor = old
self.count += 1
if __name__ == '__main__':
c = LRUCache(1)
assert c.get(1) == -1
c.put(1, 1)
assert c.get(1) == 1
assert c.get(1) == 1
c = LRUCache(4)
c.put(1, 1)
c.put(2, 2)
c.put(3, 3)
c.put(4, 4)
assert c.get(3) == 3 # 3 4 2 1
assert c.get(1) == 1 # 1 3 4 2
assert c.get(2) == 2 # 2 1 3 4
c = LRUCache(1)
c.put(2, 1)
c.get(2)
c.put(3, 2)
c.get(2)
c.get(3)
|
import datetime
import json
import logging
import os
import random
from collections import defaultdict
from dict_recursive_update import recursive_update
from tuw_nlp.common.utils import ensure_dir
from tuw_nlp.grammar.alto import get_rule_string, run_alto
from tuw_nlp.grammar.utils import get_dummy_input
class IRTGCache():
@staticmethod
def load(fn):
with open(fn) as f:
cache = json.load(f)
ints = sorted(cache.keys())
logging.warning(f'loaded cache from {fn} with interpretations: {ints}')
obj = IRTGCache(ints, fn)
obj.cache.update(cache)
return obj
def update_file(self, fn):
old = IRTGCache.load(fn)
assert old.interpretations == self.interpretations
recursive_update(old.cache, self.cache)
with open(fn, 'w') as f:
json.dump(old.cache, f)
logging.warning(f'updated cache in {fn}')
def __init__(self, interpretations, fn, new=False):
self.fn = fn
self.interpretations = interpretations
self.cache = {i: {} for i in interpretations}
if new:
with open(fn, 'w') as f:
json.dump(self.cache, f)
def get(
self, input_obj, input_int,
output_int, output_codec, create_path=False):
if input_obj not in self.cache[input_int]:
if create_path:
self.cache[input_int][input_obj] = defaultdict(
lambda: defaultdict(dict))
else:
return self.cache[input_int][input_obj][output_int].get(
output_codec)
return None
def add(self, input_obj, input_int, output_int, output_codec, output_obj):
assert self.get(
input_obj, input_int, output_int, output_codec,
create_path=True) is None
self.cache[input_int][input_obj][output_int][output_codec] = output_obj
class IRTGGrammar():
def __init__(self, **kwargs):
self.tmpdir = os.getenv("TUWNLP_TMPDIR", "tmp")
ensure_dir(self.tmpdir)
self.load_cache(**kwargs)
def load_cache(self, **kwargs):
cache_path = kwargs.get('cache_dir') or 'cache'
cache_fn = kwargs.get('cache_fn') or f'{self.__class__.__name__}.json'
ensure_dir(cache_path)
fn = os.path.join(cache_path, cache_fn)
if not os.path.exists(fn):
logging.warning(f'setting up new cache file: {fn}')
self.cache = IRTGCache(
sorted(self.interpretations.keys()), fn, new=True)
else:
logging.warning(f'loading cache from file: {fn}')
self.cache = IRTGCache.load(fn)
self.cache_fn = fn
def preprocess_input(self, input_obj, **kwargs):
return input_obj
def postprocess_output(self, output_obj, **kwargs):
return output_obj
def _parse(self, input_obj, input_int, output_int, output_codec, **kwargs):
output = self.run(
input_obj, input_int, output_int, output_codec)
return self.postprocess_output(output, **kwargs)
def parse(self, orig_input, input_int, output_int, output_codec, **kwargs):
if input_int not in self.interpretations:
raise ValueError(f"unknown interpretation: {input_int}")
if output_int not in self.interpretations:
raise ValueError(f"unknown interpretation: {output_int}")
input_obj = self.preprocess_input(orig_input, **kwargs)
cached = self.cache.get(
input_obj, input_int, output_int, output_codec)
if cached is None:
output_obj = self._parse(
input_obj, input_int, output_int, output_codec, **kwargs)
self.cache.add(
input_obj, input_int, output_int, output_codec, output_obj)
self.cache.update_file(self.cache_fn)
return output_obj
return cached
def gen_file_names(self):
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
rand_id = random.randrange(100000, 999999)
path = os.path.join(self.tmpdir, f"{timestamp}_{rand_id}")
ensure_dir(path)
return tuple(os.path.join(path, fn) for fn in (
"input.txt", "grammar.irtg", "output.txt"))
def gen_grammar_header(self):
for name, algebra in self.interpretations.items():
yield f"interpretation {name}: {algebra}"
def gen_input_header(self, input_int):
algebra = self.interpretations[input_int]
yield "# IRTG unannotated corpus file, v1.0"
yield f"# interpretation {input_int}: {algebra}"
def write_input_file(self, transformed_input, input_fn, input_int):
input_alg = self.interpretations[input_int]
dummy_input = get_dummy_input(input_alg)
with open(input_fn, 'w') as f:
for line in self.gen_input_header(input_int):
f.write(f"{line}\n")
f.write('\n')
f.write(f"{transformed_input}\n")
f.write(f"{dummy_input}\n")
def write_grammar_file(self, grammar_fn):
with open(grammar_fn, 'w') as f:
for line in self.gen_grammar_header():
f.write(f"{line}\n")
f.write('\n')
for rule_string in self.gen_rule_strings():
f.write(f"{rule_string}\n")
def gen_rule_strings(self):
term_rule_strings = []
for irtg_rule, interpretations, rule_type in self.gen_rules():
rule_string = get_rule_string(irtg_rule, interpretations)
if rule_type == 'terminal':
term_rule_strings.append(rule_string)
continue
yield rule_string
yield from term_rule_strings
def create_alto_files(self, transformed_input, input_int):
input_fn, grammar_fn, output_fn = self.gen_file_names()
self.write_input_file(transformed_input, input_fn, input_int)
self.write_grammar_file(grammar_fn)
return input_fn, grammar_fn, output_fn
def run(self, transformed_input, input_int, output_int, output_codec):
input_fn, grammar_fn, output_fn = self.create_alto_files(
transformed_input, input_int)
success = run_alto(
input_fn, grammar_fn, output_fn, input_int, output_int,
output_codec)
if success:
outputs, _ = self.parse_output(output_fn)
return outputs[0]
return None
def parse_output(self, output_fn):
derivs, outputs = [], []
with open(output_fn) as f:
for i, raw_line in enumerate(f):
line = raw_line.strip()
if line in ('null', '<null>'):
line = None
if i % 2 == 0:
derivs.append(line)
else:
outputs.append(line)
return outputs, derivs
def gen_rules(self):
raise NotImplementedError
|
from .VerificationModel import VerificationModel |
# Generated by Django 3.2.4 on 2021-06-13 07:11
from django.db import migrations, models
import user.models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', user.models.UserManager()),
],
),
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False, verbose_name='์คํํ ๊ถํ'),
),
migrations.AlterField(
model_name='user',
name='created_at',
field=models.TextField(default=1623568318.789693),
),
]
|
from bs4 import BeautifulSoup
class Kata:
def __init__(self, soup):
self.soup = soup
@property
def source_codes(self):
codes = self.soup.find_all('div', {'class': 'markdown'})
return [''.join(code.findAll(text=True)) for code in codes]
@property
def languages(self):
languages = self.soup.find_all('h6')
return [language.text.rstrip(':').lower() for language in languages]
@property
def difficulty(self):
difficulty = self.soup.find(
'div', {'class': 'item-title'}
).find('span').text
return difficulty.replace(' ', '-').lower()
@property
def title(self):
title = self.soup.find('div', {'class': 'item-title'}).find('a').text
return title.replace(' ', '-').lower()
@property
def kata_id(self):
href = self.soup.find('div', {'class': 'item-title'}).find('a')['href']
return href.split('/')[-1]
@property
def get_languages_and_source_codes(self):
documents = (self.soup
.findAll('div', {
'class': 'markdown prose max-w-none'}
))
languages = [document.find('pre').find('code')
.get('data-language') for document in documents]
codes = [''.join(code.findAll(text=True)) for code in documents]
return zip(languages, codes)
class KataParser:
def __init__(self, html):
soup = BeautifulSoup(html, 'html.parser')
self.elems = soup.find_all('div', {'class': 'list-item-solutions'})
def parse_katas(self):
return [Kata(elem) for elem in self.elems]
|
from typing import List
from unicodedata import normalize
from tokenizer.base import BaseTokenizer
class JamoTokenizer(BaseTokenizer):
def __init__(self):
pass
def tokenize(self, text: str) -> List[str]:
return list("โ".join([normalize("NFKD", token) for token in text.strip().split(" ")]))
def detokenize(self, tokens: List[str]) -> str:
return normalize("NFKC", "".join(tokens)).replace("โ", " ")
|
_base_ = [
'../_base_/models/setr_convfuse_pup.py',
'../_base_/datasets/ade20k.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_240k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(img_size=512,align_corners=False, pos_embed_interp=True,drop_rate=0.,num_classes=150,embed_dim=768, depth=12, num_heads=12,conv_type='resnet18'),
decode_head=dict(img_size=512,align_corners=False,num_conv=4,upsampling_method='bilinear',embed_dim=768, in_index=11,
num_upsampe_layer=4,num_classes=150),
)
optimizer = dict(lr=0.001, weight_decay=0.0,
paramwise_cfg = dict(custom_keys={'head': dict(lr_mult=10.)})
)
crop_size = (512, 512)
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(384, 384))
find_unused_parameters = True
data = dict(samples_per_gpu=6)
|
'''Aggregates item files in data/libs/items'''
import json
import os
import re
def pack_folder(folder="./dat"):
'''
Takes a string containing a relative path to a folder containing multiple .json files,
combines them into one array, returns that array. Uses the containing folder to apply
a category to the item as well.
'''
full_list = []
contents = os.listdir(folder)
item_errors = 0
err_files = []
for file in contents:
this_file = folder + file
with open(this_file) as current_file:
try:
items = json.load(current_file)
except ValueError:
items = {"flags": ["null"]}
item_errors = item_errors + 1
err_files.append(file)
category = strip_path(["libs/", "dat/"], folder, 1).lstrip("./")
items['category'] = category
full_list.append(items)
print("File read errors: " + str(item_errors))
if item_errors > 0:
print("Files with errors:")
print(err_files)
return full_list
def write_list(items, name='items.json'):
'''
Takes an array of JSON objects, writes them to a new .json file.
'''
with open(name, mode="w") as f_pointer:
json.dump(items, f_pointer, indent=4)
print("Successfully wrote to file: " + name)
def strip_path(regex, path, times):
'''
Takes a string, makes a regex from it, strips path of it.
If regex is a list of strings, strips path of each one once, in order
'''
if isinstance(regex, list):
for r in regex:
pattern = re.compile(r)
path = pattern.sub("", path, count=1)
return path
else:
pattern = re.compile(regex)
return pattern.sub("", path, count=times)
# Below is mostly for testing purposes, remove later.
write_list(items=pack_folder(folder='../../libs/items/gear/worn/armor/light/dat/'),
name="items_test.json")
|
import numpy as np
import matplotlib,pylab as plt
import scipy.io
matfile_name = "C:\\dataset\\mpii_human_pose_v1_u12_2\\mpii_human_pose_v1_u12_1.mat"
matfile = scipy.io.loadmat(matfile_name, struct_as_record=False)
print(matfile.get('RELEASE')[0, 0].get('annolist'))
# print(matfile['re'])
# # print(type(matfile))
# for i in matfile:
# print(i)
# print(type(matfile['RELEASE'])) # <class 'numpy.ndarray'>
temp = matfile['RELEASE']
temp = temp[0, 0]
print(type(temp))
print(temp.__dict__['annolist'][0, 0].__dict__['annorect'][0, 0].__dict__['annopoints.point'][0, 0].__dict__['point'])
# temp = np.squeeze(temp)
# print(temp)
exit(0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.