repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
jstone-lucasfilm/tkInterFit | tkInterFit.py | import tkinter as tk
import FittingInterface
# Note that you can embed the application into
# your own tkinter programs as shown here
root = tk.Tk()
interface = FittingInterface.InterfaceFrame(root)
interface.pack()
root.title("tkinterFit - Curve And Surface Fitting Interface")
# manually center the application window on the user display
root.update_idletasks()
width = root.winfo_width()
height = root.winfo_height()
x = (root.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (root.winfo_screenheight() // 2) - (height // 2) # integer division
root.geometry('{}x{}+{}+{}'.format(width, height, x, y))
root.mainloop()
|
jstone-lucasfilm/tkInterFit | FittingThread.py | import os, sys, time, threading
import pyeq3
class FittingThread(threading.Thread):
def __init__(self, notify_window, equation):
threading.Thread.__init__(self)
self.notify_window = notify_window
self.equation = equation
# This starts the thread running on creation, but you could
# also make the GUI thread responsible for calling this
self.start()
def run(self):
statusString = 'Fitting data...'
self.notify_window.queue.put(statusString)
self.notify_window.event_generate('<<status_update>>')
time.sleep(0.5) # allow users a moment to see the update
self.equation.Solve()
statusString = 'Calculating model errors...'
self.notify_window.queue.put(statusString)
self.notify_window.event_generate('<<status_update>>')
time.sleep(0.5) # allow users a moment to see the update
self.equation.CalculateModelErrors(self.equation.solvedCoefficients, self.equation.dataCache.allDataCacheDictionary)
statusString = 'Calculating coefficient and fit statistics...'
self.notify_window.queue.put(statusString)
self.notify_window.event_generate('<<status_update>>')
time.sleep(0.5) # allow users a moment to see the update
self.equation.CalculateCoefficientAndFitStatistics()
statusString = 'Fitting complete, creating graphs and reports...'
self.notify_window.queue.put(statusString)
self.notify_window.event_generate('<<status_update>>')
time.sleep(0.5) # allow users a moment to see the update
# the fitted equation is now the queue's event data, rather than
# a status update string. The event handler checks the data type
self.notify_window.queue.put(self.equation)
self.notify_window.event_generate('<<status_update>>')
|
jstone-lucasfilm/tkInterFit | AdditionalInfo.py | <reponame>jstone-lucasfilm/tkInterFit
links = '''
URL for animated "Common Problems In Curve Fitting":
http://commonproblems.readthedocs.io/
URL for source code of this computer program:
https://github.com/zunzun/tkInterFit
URL for web version of this code, which generates \
PDF files and animated 3D surface rotations:
https://github.com/zunzun/zunzunsite3
URL for the pyeq3 fitting library, which has hundreds \
of known 2D and 3D equations:
https://github.com/zunzun/pyeq3
'''
author = '''
This is <NAME>, author of tkInterFit. My background is in \
nuclear engineering and industrial radiation physics, as I started \
working in the U.S. Navy as a submarine nuclear reactor operator \
many, many neutrons ago.
I have quite a bit of international experience calibrating industrial \
metal thickness and coating gauges. For example the thicker a piece of \
steel the more radiation it absorbs, and measuring the amount of radiation \
that passes through a sheet of steel can tell you how thick it is without \
touching it. Another example is that the thicker a zinc coating on steel \
sheets, the more zinc X-ray fluorescence energy it can emit - again allowing \
accurate thickness measurement for industrial manufacture.
My post-Navy employer originally used ad-hoc spreadsheets to very \
tediously create 4th-order polynomials calibrating to readings from \
known samples. So I started writing my own curve-fitting software in C.
When X-rays pass through aluminium, the atomic number of the alloying \
elements is much greater than that of the aluminium itself such that \
small changes in alloy composition lead to large changes in X-ray \
transmission for the same thickness. Alloy changes look like thickness \
changes, egad! However, alloy changes also cause changes to the X-rays \
that are scattered back from the aluminium, so that if both the transmitted \
and backscattered radiation is measured a more alloy-insensitive thickness \
measurement can be made - but this is now a 3D surface fit, and I started \
writing surface fitting software. I began to do considerable international work.
This finally led to the development of my Python fitting libraries, and \
this example tkinter curve and surface fitter. I also have Python 2 and 3 \
wxPython and django versions on GitHub.
James
'''
history = '''
Prior to the invention of electronic calculation, only manual methods \
were available, of course - meaning that creating mathematical models \
from experimental data was done by hand. Even Napier's invention of \
logarithms did not help much in reducing the tediousness of this task. \
Linear regression techniques worked, but how to then compare models? \
And so the F-statistic was created for the purpose of model selection, \
since graphing models and their confidence intervals was practically \
out of the question. Forward and backward regression techniques used \
linear methods, requiring less calculation than nonlinear methods, but \
limited the possible mathematical models to linear combinations of functions.
With the advent of computerized calculations, nonlinear methods which \
were impractical in the past could be automated and made practical. \
However, the nonlinear fitting methods all required starting points \
for their solvers - meaning in practice you had to have a good idea of \
the final equation parameters to begin with!
If however a genetic or monte carlo algorithm searched error space for \
initial parameters prior to running the nonlinear solvers, this problem \
could be strongly mitigated. This meant that instead of hit-or-miss \
forward and backward regression, large numbers of known linear *and* \
nonlinear equations could be fitted to an experimental data set, and \
then ranked by a fit statistic such as AIC or SSQ errors.
Note that for an initial guesstimate of parameter values, not all data \
need be used. A reduced size data set with min, max, and (hopefully) \
evenly spaced additional data points in between are used. The total \
number of data points required is the number of equation parameters \
plus a few extra points.
Reducing the data set size used by the code's genetic algorithm greatly \
reduces total processing time. I tested many different methods before \
choosing the one in the code, a genetic algorithm named \
"Differential Evolution".
I hope you find this code useful, and to that end I have sprinkled \
explanatory comments throughout the code. If you have any questions, \
comments or suggestions please e-mail me directly at <EMAIL> \
or by posting to the user group at the URL
https://groups.google.com/forum/#!forum/zunzun_dot_com
I will be glad to help you.
<NAME>
2548 Vera Cruz Drive
Birmingham, AL 35235 USA
email: <EMAIL>
'''
|
jstone-lucasfilm/tkInterFit | IndividualReports.py | <filename>IndividualReports.py
import pickle, inspect, re
import pyeq3
import numpy, scipy
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm # to colormap 3D surfaces from blue to red
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import ttk as ttk
from tkinter import messagebox as tk_mbox
import tkinter.scrolledtext as tk_stxt
import XYscrolledtext as xy_stxt
textboxWidth = 60 # units are characters
textboxHeight = 12 # units are characters
graphWidth = 800 # units are pixels
graphHeight = 600 # units are pixels
# 3D contour plot lines
numberOfContourLines = 16
# this is used in several reports
def DataArrayStatisticsReport(parent, titleString, tempdata):
scrolledText = tk_stxt.ScrolledText(parent, width=textboxWidth, height=textboxHeight, wrap=tk.NONE)
scrolledText.insert(tk.END, titleString + '\n\n')
# must at least have max and min
minData = min(tempdata)
maxData = max(tempdata)
if maxData == minData:
scrolledText.insert(tk.END, 'All data has the same value,\n')
scrolledText.insert(tk.END, "value = %-.16E\n" % (minData))
scrolledText.insert(tk.END, 'statistics cannot be calculated.')
else:
scrolledText.insert(tk.END, "max = %-.16E\n" % (maxData))
scrolledText.insert(tk.END, "min = %-.16E\n" % (minData))
try:
temp = scipy.mean(tempdata)
scrolledText.insert(tk.END, "mean = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "mean gave error in calculation\n")
try:
temp = scipy.stats.sem(tempdata)
scrolledText.insert(tk.END, "standard error of mean = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "standard error of mean gave error in calculation\n")
try:
temp = scipy.median(tempdata)
scrolledText.insert(tk.END, "median = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "median gave error in calculation\n")
try:
temp = scipy.var(tempdata)
scrolledText.insert(tk.END, "variance = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "variance gave error in calculation\n")
try:
temp = scipy.std(tempdata)
scrolledText.insert(tk.END, "std. deviation = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "std. deviation gave error in calculation\n")
try:
temp = scipy.stats.skew(tempdata)
scrolledText.insert(tk.END, "skew = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "skew gave error in calculation\n")
try:
temp = scipy.stats.kurtosis(tempdata)
scrolledText.insert(tk.END, "kurtosis = %-.16E\n" % (temp))
except:
scrolledText.insert(tk.END, "kurtosis gave error in calculation\n")
return scrolledText
def CoefficientAndFitStatistics(parent, equation):
scrolledText = tk_stxt.ScrolledText(parent, width=80, height=25, wrap=tk.NONE)
if equation.upperCoefficientBounds or equation.lowerCoefficientBounds:
scrolledText.insert(tk.END, 'This model has coefficient bounds. Parameter statistics may\n')
scrolledText.insert(tk.END, 'not be valid for parameter values at or near the bounds.\n')
scrolledText.insert(tk.END, '\n')
scrolledText.insert(tk.END, 'Degress of freedom error ' + str(equation.df_e) + '\n')
scrolledText.insert(tk.END, 'Degress of freedom regression ' + str(equation.df_r) + '\n')
if equation.rmse == None:
scrolledText.insert(tk.END, 'Root Mean Squared Error (RMSE): n/a\n')
else:
scrolledText.insert(tk.END, 'Root Mean Squared Error (RMSE): ' + str(equation.rmse) + '\n')
if equation.r2 == None:
scrolledText.insert(tk.END, 'R-squared: n/a\n')
else:
scrolledText.insert(tk.END, 'R-squared: ' + str(equation.r2) + '\n')
if equation.r2adj == None:
scrolledText.insert(tk.END, 'R-squared adjusted: n/a\n')
else:
scrolledText.insert(tk.END, 'R-squared adjusted: ' + str(equation.r2adj) + '\n')
if equation.Fstat == None:
scrolledText.insert(tk.END, 'Model F-statistic: n/a\n')
else:
scrolledText.insert(tk.END, 'Model F-statistic: ' + str(equation.Fstat) + '\n')
if equation.Fpv == None:
scrolledText.insert(tk.END, 'Model F-statistic p-value: n/a\n')
else:
scrolledText.insert(tk.END, 'Model F-statistic p-value: ' + str(equation.Fpv) + '\n')
if equation.ll == None:
scrolledText.insert(tk.END, 'Model log-likelihood: n/a\n')
else:
scrolledText.insert(tk.END, 'Model log-likelihood: ' + str(equation.ll) + '\n')
if equation.aic == None:
scrolledText.insert(tk.END, 'Model AIC: n/a\n')
else:
scrolledText.insert(tk.END, 'Model AIC: ' + str(equation.aic) + '\n')
if equation.bic == None:
scrolledText.insert(tk.END, 'Model BIC: n/a\n')
else:
scrolledText.insert(tk.END, 'Model BIC: ' + str(equation.bic) + '\n')
scrolledText.insert(tk.END, '\n')
scrolledText.insert(tk.END, "Individual Parameter Statistics:\n")
for i in range(len(equation.solvedCoefficients)):
if type(equation.tstat_beta) == type(None):
tstat = 'n/a'
else:
tstat = '%-.5E' % (equation.tstat_beta[i])
if type(equation.pstat_beta) == type(None):
pstat = 'n/a'
else:
pstat = '%-.5E' % ( equation.pstat_beta[i])
if type(equation.sd_beta) != type(None):
scrolledText.insert(tk.END, "Coefficient %s = %-.16E, std error: %-.5E\n" % (equation.GetCoefficientDesignators()[i], equation.solvedCoefficients[i], equation.sd_beta[i]))
else:
scrolledText.insert(tk.END, "Coefficient %s = %-.16E, std error: n/a\n" % (equation.GetCoefficientDesignators()[i], equation.solvedCoefficients[i]))
scrolledText.insert(tk.END, " t-stat: %s, p-stat: %s, 95 percent confidence intervals: [%-.5E, %-.5E]\n" % (tstat, pstat, equation.ci[i][0], equation.ci[i][1]))
scrolledText.insert(tk.END, '\n')
scrolledText.insert(tk.END, "Coefficient Covariance Matrix:\n")
for i in equation.cov_beta:
scrolledText.insert(tk.END, str(i) + '\n')
return scrolledText
def CoefficientListing(parent, equation):
scrolledText = tk_stxt.ScrolledText(parent, width=textboxWidth, height=textboxHeight, wrap=tk.NONE)
cd = equation.GetCoefficientDesignators()
for i in range(len(equation.solvedCoefficients)):
scrolledText.insert(tk.END, "%s = %-.16E\n" % (cd[i], equation.solvedCoefficients[i]))
return scrolledText
def SourceCodeReport(parent, equation, lanuageNameString):
scrolledText = tk_stxt.ScrolledText(parent, width=textboxWidth, height=textboxHeight, wrap=tk.NONE)
code = eval('pyeq3.outputSourceCodeService().GetOutputSourceCode' + lanuageNameString + '(equation)')
scrolledText.insert(tk.END, code)
return scrolledText
def AbsoluteErrorGraph(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
dep_data = equation.dataCache.allDataCacheDictionary['DependentData']
abs_error = equation.modelAbsoluteError
axes.plot(dep_data, abs_error, 'D')
if equation.GetDimensionality() == 2: # used for labels only
axes.set_title('Absolute Error vs. X Data')
axes.set_xlabel('X Data')
else:
axes.set_title('Absolute Error vs. Z Data')
axes.set_xlabel('Z Data')
axes.set_ylabel(" Absolute Error") # Y axis label is always absolute error
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def PercentErrorGraph(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
dep_data = equation.dataCache.allDataCacheDictionary['DependentData']
per_error = equation.modelPercentError
axes.plot(dep_data, per_error, 'D')
if equation.GetDimensionality() == 2: # used for labels only
axes.set_title('Percent Error vs. X Data')
axes.set_xlabel('X Data')
else:
axes.set_title('Percent Error vs. Z Data')
axes.set_xlabel('Z Data')
axes.set_ylabel(" Percent Error") # Y axis label is always percent error
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def AbsoluteErrorHistogram(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
abs_error = equation.modelAbsoluteError
bincount = len(abs_error)//2 # integer division
if bincount < 5:
bincount = 5
if bincount > 25:
bincount = 25
n, bins, patches = axes.hist(abs_error, bincount, rwidth=0.8)
# some axis space at the top of the graph
ylim = axes.get_ylim()
if ylim[1] == max(n):
axes.set_ylim(0.0, ylim[1] + 1)
axes.set_title('Absolute Error Histogram') # add a title
axes.set_xlabel('Absolute Error') # X axis data label
axes.set_ylabel(" Frequency") # Y axis label is frequency
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def PercentErrorHistogram(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
per_error = equation.modelPercentError
bincount = len(per_error)//2 # integer division
if bincount < 5:
bincount = 5
if bincount > 25:
bincount = 25
n, bins, patches = axes.hist(per_error, bincount, rwidth=0.8)
# some axis space at the top of the graph
ylim = axes.get_ylim()
if ylim[1] == max(n):
axes.set_ylim(0.0, ylim[1] + 1)
axes.set_title('Percent Error Histogram') # add a title
axes.set_xlabel('Percent Error') # X axis data label
axes.set_ylabel(" Frequency") # Y axis label is frequency
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def ModelScatterConfidenceGraph(parent, equation, scatterplotOnlyFlag):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
y_data = equation.dataCache.allDataCacheDictionary['DependentData']
x_data = equation.dataCache.allDataCacheDictionary['IndependentData'][0]
# first the raw data as a scatter plot
axes.plot(x_data, y_data, 'D')
if not scatterplotOnlyFlag:
# create data for the fitted equation plot
xModel = numpy.linspace(min(x_data), max(x_data))
tempcache = equation.dataCache # store the data cache
equation.dataCache = pyeq3.dataCache()
equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([xModel, xModel])
equation.dataCache.FindOrCreateAllDataCache(equation)
yModel = equation.CalculateModelPredictions(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
equation.dataCache = tempcache # restore the original data cache
# now the model as a line plot
axes.plot(xModel, yModel)
# now calculate confidence intervals
# http://support.sas.com/documentation/cdl/en/statug/63347/HTML/default/viewer.htm#statug_nlin_sect026.htm
# http://www.staff.ncl.ac.uk/tom.holderness/software/pythonlinearfit
mean_x = numpy.mean(x_data)
n = equation.nobs
t_value = scipy.stats.t.ppf(0.975, equation.df_e) # (1.0 - (a/2)) is used for two-sided t-test critical value, here a = 0.05
confs = t_value * numpy.sqrt((equation.sumOfSquaredErrors/equation.df_e)*(1.0/n + (numpy.power((xModel-mean_x),2.0)/
((numpy.sum(numpy.power(x_data,2.0)))-n*(numpy.power(mean_x,2.0))))))
# get lower and upper confidence limits based on predicted y and confidence intervals
upper = yModel + abs(confs)
lower = yModel - abs(confs)
# mask off any numbers outside the existing plot limits
booleanMask = yModel > axes.get_ylim()[0]
booleanMask &= (yModel < axes.get_ylim()[1])
# color scheme improves visibility on black background lines or points
axes.plot(xModel[booleanMask], lower[booleanMask], linestyle='solid', color='white')
axes.plot(xModel[booleanMask], upper[booleanMask], linestyle='solid', color='white')
axes.plot(xModel[booleanMask], lower[booleanMask], linestyle='dashed', color='blue')
axes.plot(xModel[booleanMask], upper[booleanMask], linestyle='dashed', color='blue')
if not scatterplotOnlyFlag:
axes.set_title('Model With 95% Confidence Intervals') # add a title
else:
axes.set_title('Scatter Plot') # add a title
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def SurfacePlot(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
matplotlib.pyplot.grid(True)
axes = Axes3D(f, auto_add_to_figure=False)
f.add_axes(axes)
x_data = equation.dataCache.allDataCacheDictionary['IndependentData'][0]
y_data = equation.dataCache.allDataCacheDictionary['IndependentData'][1]
z_data = equation.dataCache.allDataCacheDictionary['DependentData']
xModel = numpy.linspace(min(x_data), max(x_data), 20)
yModel = numpy.linspace(min(y_data), max(y_data), 20)
X, Y = numpy.meshgrid(xModel, yModel)
tempcache = equation.dataCache # store the data cache
equation.dataCache = pyeq3.dataCache()
equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([X, Y])
equation.dataCache.FindOrCreateAllDataCache(equation)
Z = equation.CalculateModelPredictions(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
equation.dataCache = tempcache# restore the original data cache
axes.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=1, antialiased=True, alpha=0.75)
axes.scatter(x_data, y_data, z_data, depthshade=False, color='k')
axes.set_title('Surface Plot (click-drag with mouse)') # add a title for surface plot
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
axes.set_zlabel('Z Data') # Z axis data label
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def ContourPlot(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
axes = f.add_subplot(111)
x_data = equation.dataCache.allDataCacheDictionary['IndependentData'][0]
y_data = equation.dataCache.allDataCacheDictionary['IndependentData'][1]
z_data = equation.dataCache.allDataCacheDictionary['DependentData']
xModel = numpy.linspace(min(x_data), max(x_data), 20)
yModel = numpy.linspace(min(y_data), max(y_data), 20)
X, Y = numpy.meshgrid(xModel, yModel)
tempcache = equation.dataCache # store the data cache
equation.dataCache = pyeq3.dataCache()
equation.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([X, Y])
equation.dataCache.FindOrCreateAllDataCache(equation)
Z = equation.CalculateModelPredictions(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
equation.dataCache = tempcache # restore the original data cache
axes.plot(x_data, y_data, 'o')
axes.set_title('Contour Plot') # add a title for contour plot
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
CS = matplotlib.pyplot.contour(X, Y, Z, numberOfContourLines, colors='k')
matplotlib.pyplot.clabel(CS, inline=1, fontsize=10) # labels for contours
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def ScatterPlot(parent, equation):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
canvas = FigureCanvasTkAgg(f, master=parent)
matplotlib.pyplot.grid(True)
axes = Axes3D(f, auto_add_to_figure=False)
f.add_axes(axes)
x_data = equation.dataCache.allDataCacheDictionary['IndependentData'][0]
y_data = equation.dataCache.allDataCacheDictionary['IndependentData'][1]
z_data = equation.dataCache.allDataCacheDictionary['DependentData']
axes.scatter(x_data, y_data, z_data, depthshade=False, color='k')
axes.set_title('Scatter Plot (click-drag with mouse)')
axes.set_xlabel('X Data')
axes.set_ylabel('Y Data')
axes.set_zlabel('Z Data')
canvas.draw()
plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems
return [canvas.get_tk_widget(), f]
def AllEquationReport(parent, dim):
xyscrolledText = xy_stxt.XYScrolledText(parent, width=textboxWidth, height=textboxHeight, wrap=tk.NONE)
xyscrolledText.tag_configure("sup", offset=5) # superscript is +5 pixels
xyscrolledText.tag_configure("sub", offset=-5) # subscript is -5 pixels
xyscrolledText.tag_configure("bold", font='-weight bold')
xyscrolledText.tag_configure("italic", font='-slant italic')
if dim == 2:
module = pyeq3.Models_2D
else:
module = pyeq3.Models_3D
for submodule in inspect.getmembers(module):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
# if the equation *already* has an offset,
# do not add an offset version here
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
equationName = equation.GetDisplayName()
moduleName = str(dim) + 'D ' + submodule[0]
xyscrolledText.insert(tk.END, moduleName, 'bold')
xyscrolledText.insert(tk.END, ' ')
xyscrolledText.insert(tk.END, equationName, 'italic')
xyscrolledText.insert(tk.END, ' ')
html = equation.GetDisplayHTML()
# html <br> tags become new line characters
html = html.replace('<br>', '\n')
# display pyeq3's html superscript and subscript tags
# tkinter has no native HTML widget, and pyeq3's html os
# simple and has no *nested* HTML tags - no recursion needed
findIter = re.finditer(r'<su.>|</su.>', html)
currentIndex = 0
endingIndex = len(html)
itemCount = 0
for item in findIter:
span = item.span()
if not itemCount % 2: # text is *not* within HTML tags
t = html[currentIndex:span[0]]
xyscrolledText.insert(tk.END, t)
currentIndex = span[1] # beginning tag
else: # text *is* within html tags
if html[span[1]-2] == 'b': # subscript tag
tag = 'sub'
else: # html superscript tag
tag = 'sup'
t = html[currentIndex:span[1]-6]
xyscrolledText.insert(tk.END, t, tag)
currentIndex = span[1] # ending tag
itemCount += 1
# any ending text, or if no tags were found
if currentIndex < endingIndex:
t = html[currentIndex:endingIndex]
xyscrolledText.insert(tk.END, t)
xyscrolledText.insert(tk.END, '\n')
return xyscrolledText
if __name__ == "__main__":
import FittingResultsViewer
root = tk.Tk()
interface = FittingResultsViewer.ResultsFrame(root, 'pickledEquationFile')
interface.pack()
root.title("Example tkinterFit - Fitting Results Viewer")
# manually center the application window on the user display
root.update_idletasks()
width = root.winfo_width()
height = root.winfo_height()
x = (root.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (root.winfo_screenheight() // 2) - (height // 2) # integer division
root.geometry('{}x{}+{}+{}'.format(width, height, x, y))
root.mainloop()
|
jstone-lucasfilm/tkInterFit | FittingInterface.py | import os, sys, queue, pickle, time, inspect
import pyeq3
import matplotlib # ensure this dependency imports for later use in fitting results
import tkinter as tk
import tkinter.ttk
from tkinter import messagebox as tk_mbox
import tkinter.scrolledtext as tk_stxt
import DataForControls as dfc
import FittingThread
class InterfaceFrame(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.queue = queue.Queue()
self.equationSelect_2D = tk.IntVar()
self.equationSelect_3D = tk.IntVar()
self.fittingTargetSelect_2D = tk.IntVar()
self.fittingTargetSelect_3D = tk.IntVar()
# ROW 0 - empty labels as visual buffers
row, col = (0, 0) # left edge
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
row, col = (0, 2) # center
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
row, col = (0, 4) # right edge
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
# ROW 1 - text data entry labels
# no "self" needed as no later references exist
row, col = (1, 1)
l = tk.Label(self, text="--- 2D Data Text Editor ---", font="-weight bold")
l.grid(column=col, row=row)
row, col = (1, 3)
l = tk.Label(self, text="--- 3D Data Text Editor ---", font="-weight bold")
l.grid(column=col, row=row)
# ROW 2 - text data input, no line wrap
row, col = (2, 1)
self.text_2D = tk_stxt.ScrolledText(self, width=45, height=12, wrap=tk.NONE)
self.text_2D.insert(tk.END, dfc.exampleText_2D) # inital text data
self.text_2D.grid(column=col, row=row, sticky=(tk.N, tk.W, tk.E, tk.S))
row, col = (2, 3)
self.text_3D = tk_stxt.ScrolledText(self, width=45, height=12, wrap=tk.NONE)
self.text_3D.insert(tk.END, dfc.exampleText_3D) # inital text data
self.text_3D.grid(column=col, row=row, sticky=(tk.N, tk.W, tk.E, tk.S))
# ROW 3 - empty label as visual buffer
row, col = (3, 0)
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
# ROW 4 - equation selection labels
# no "self" needed as no later references exist
row, col = (4, 1)
l = tk.Label(self, text="--- Standard 2D Equations ---", font="-weight bold")
l.grid(column=col, row=row)
row, col = (4, 3)
l = tk.Label(self, text="--- Standard 3D Equations ---", font="-weight bold")
l.grid(column=col, row=row)
# ROW 5 - equation selection
row, col = (5, 1)
f = tk.Frame(self)
f.grid(column=col, row=row)
self.cb_Modules2D = tk.ttk.Combobox(f, state='readonly')
self.cb_Modules2D['values'] = sorted(list(dfc.eq_od2D.keys()))
self.cb_Modules2D.bind("<<ComboboxSelected>>", self.moduleSelectChanged_2D)
self.cb_Modules2D.set('Polynomial')
self.cb_Modules2D.pack(anchor=tk.W)
self.cb_Equations2D = tk.ttk.Combobox(f, state='readonly')
self.cb_Equations2D['width'] = 50
self.cb_Equations2D['values'] = sorted(list(dfc.eq_od2D['Polynomial'].keys()))
self.cb_Equations2D.set('1st Order (Linear)')
self.cb_Equations2D.pack(anchor=tk.W)
row, col = (5, 3)
f = tk.Frame(self)
f.grid(column=col, row=row)
self.cb_Modules3D = tk.ttk.Combobox(f, state='readonly')
self.cb_Modules3D['values'] = sorted(list(dfc.eq_od3D.keys()))
self.cb_Modules3D.bind("<<ComboboxSelected>>", self.moduleSelectChanged_3D)
self.cb_Modules3D.set('Polynomial')
self.cb_Modules3D.pack(anchor=tk.W)
self.cb_Equations3D = tk.ttk.Combobox(f, state='readonly')
self.cb_Equations3D['width'] = 50
self.cb_Equations3D['values'] = sorted(list(dfc.eq_od3D['Polynomial'].keys()))
self.cb_Equations3D.set('Linear')
self.cb_Equations3D.pack(anchor=tk.W)
# ROW 6 - empty label as visual buffer
row, col = (6, 0)
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
# ROW 7 - fitting target selection labels
# no "self" needed as no later references exist
row, col = (7, 1)
l = tk.Label(self, text="--- Fitting Target 2D ---", font="-weight bold")
l.grid(column=col, row=row)
row, col = (7, 3)
l = tk.Label(self, text="--- Fitting Target 3D ---", font="-weight bold")
l.grid(column=col, row=row)
# ROW 8 - fitting target selection radio buttons
row, col = (8, 1)
f = tk.Frame(self)
f.grid(column=col, row=row)
index=0
for fittingTargetText in dfc.fittingTargetList:
rb = tk.Radiobutton(f, text=fittingTargetText, variable=self.fittingTargetSelect_2D, value=index)
rb.pack(anchor=tk.W)
index += 1
row, col = (8, 3)
f = tk.Frame(self)
f.grid(column=col, row=row)
index=0
for fittingTargetText in dfc.fittingTargetList:
rb = tk.Radiobutton(f, text=fittingTargetText, variable=self.fittingTargetSelect_3D, value=index)
rb.pack(anchor=tk.W)
index += 1
# ROW 9 - empty label as visual buffer
row, col = (9, 0)
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
# ROW 10 - fitting buttons
row, col = (10, 1)
self.buttonFit_2D = tk.Button(self, text="Fit 2D Text Data", command=self.OnFit_2D)
self.buttonFit_2D.grid(column=col, row=row)
row, col = (10, 3)
self.buttonFit_3D = tk.Button(self, text="Fit 3D Text Data", command=self.OnFit_3D)
self.buttonFit_3D.grid(column=col, row=row)
# ROW 11 - empty label as visual buffer
row, col = (11, 0)
l = tk.Label(self, text=" ")
l.grid(column=col, row=row)
# now bind our custom ""status_update"" event to the handler function
self.bind('<<status_update>>', self.StatusUpdateHandler)
def moduleSelectChanged_2D(self, event):
self.cb_Equations2D['values'] = sorted(list(dfc.eq_od2D[event.widget.get()].keys()))
self.cb_Equations2D.current(0)
def moduleSelectChanged_3D(self, event):
self.cb_Equations3D['values'] = sorted(list(dfc.eq_od3D[event.widget.get()].keys()))
self.cb_Equations3D.current(0)
def OnFit_2D(self):
textData = self.text_2D.get("1.0", tk.END)
moduleSelection = self.cb_Modules2D.get()
equationSelection = self.cb_Equations2D.get()
fittingTargetSelection = dfc.fittingTargetList[self.fittingTargetSelect_2D.get()]
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
item = dfc.eq_od2D[moduleSelection][equationSelection]
eqString = 'pyeq3.Models_2D.' + moduleSelection + '.' + item[0] + "('" + fittingTarget + "','" + item[1] + "')"
self.equation = eval(eqString)
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq3.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
tk_mbox.showerror("Error", self.equation.reasonWhyDataRejected)
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
tk_mbox.showerror("Error", "This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.buttonFit_2D.config(state=tk.DISABLED)
self.buttonFit_3D.config(state=tk.DISABLED)
# create simple top-level text dialog to display status as fitting progresses
# when the fitting thread completes, it will close the status box
self.statusBox = tk.Toplevel()
self.statusBox.title("Fitting Status")
self.statusBox.text = tk.Text(self.statusBox)
self.statusBox.text.pack()
# in tkinter the status box must be manually centered
self.statusBox.update_idletasks()
width = self.statusBox.winfo_width()
height = self.statusBox.winfo_height()
x = (self.statusBox.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (self.statusBox.winfo_screenheight() // 2) - (height // 2) # integer division
self.statusBox.geometry('{}x{}+{}+{}'.format(width, height, x, y))
# thread will automatically start to run
# "status update" handler will re-enable buttons
self.fittingWorkerThread = FittingThread.FittingThread(self, self.equation)
def OnFit_3D(self):
textData = self.text_3D.get("1.0", tk.END)
moduleSelection = self.cb_Modules3D.get()
equationSelection = self.cb_Equations3D.get()
fittingTargetSelection = dfc.fittingTargetList[self.fittingTargetSelect_3D.get()]
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
item = dfc.eq_od3D[moduleSelection][equationSelection]
eqString = 'pyeq3.Models_3D.' + moduleSelection + '.' + item[0] + "('" + fittingTarget + "','" + item[1] + "')"
self.equation = eval(eqString)
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq3.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
tk_mbox.showerror("Error", self.equation.reasonWhyDataRejected)
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
tk_mbox.showerror("Error", "This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.buttonFit_2D.config(state=tk.DISABLED)
self.buttonFit_3D.config(state=tk.DISABLED)
# create simple top-level text dialog to display status as fitting progresses
# when the fitting thread completes, it will close the status box
self.statusBox = tk.Toplevel()
self.statusBox.title("Fitting Status")
self.statusBox.text = tk.Text(self.statusBox)
self.statusBox.text.pack()
# in tkinter the status box must be manually centered
self.statusBox.update_idletasks()
width = self.statusBox.winfo_width()
height = self.statusBox.winfo_height()
x = (self.statusBox.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (self.statusBox.winfo_screenheight() // 2) - (height // 2) # integer division
self.statusBox.geometry('{}x{}+{}+{}'.format(width, height, x, y))
# thread will automatically start to run
# "status update" handler will re-enable buttons
self.fittingWorkerThread = FittingThread.FittingThread(self, self.equation)
# When "status_update" event is generated, get
# text data from queue and display it to the user.
# If the queue data is not text, it is the fitted equation.
def StatusUpdateHandler(self, *args):
data = self.queue.get_nowait()
if type(data) == type(''): # text is used for status box display to user
self.statusBox.text.insert(tk.END, data + '\n')
else: # the queue data is now the fitted equation.
# write the fitted equation to a pickle file. This
# allows the possibility of archiving the fitted equations
pickledEquationFile = open("pickledEquationFile", "wb")
pickle.dump(data, pickledEquationFile)
pickledEquationFile.close()
# view fitting results
p = os.popen(sys.executable + ' FittingResultsViewer.py')
p.close()
# re-enable fitting buttons
self.buttonFit_2D.config(state=tk.NORMAL)
self.buttonFit_3D.config(state=tk.NORMAL)
# destroy the now-unused status box
self.statusBox.destroy()
if __name__ == "__main__":
root = tk.Tk()
interface = InterfaceFrame(root)
interface.pack()
root.title("tkinterFit - Curve And Surface Fitting Interface")
# manually center the application window on the user display
root.update_idletasks()
width = root.winfo_width()
height = root.winfo_height()
x = (root.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (root.winfo_screenheight() // 2) - (height // 2) # integer division
root.geometry('{}x{}+{}+{}'.format(width, height, x, y))
root.mainloop()
|
jstone-lucasfilm/tkInterFit | FittingResultsViewer.py | <gh_stars>0
import pickle
import pyeq3
import tkinter as tk
from tkinter import ttk as ttk
from tkinter import messagebox as tk_mbox
import tkinter.scrolledtext as tk_stxt
from tkinter import filedialog as filedialog
import IndividualReports
import AdditionalInfo
class ResultsFrame(tk.Frame):
def __init__(self, parent, pickledEquationFileName):
tk.Frame.__init__(self, parent)
self.graphReportsListForPDF = []
self.textReportsListForPDF = []
self.sourceCodeReportsListForPDF = []
# first, load the fitted equation
equationFile = open(pickledEquationFileName, 'rb')
self.equation = pickle.load(equationFile)
equationFile.close()
topLevelNotebook = ttk.Notebook(self)
topLevelNotebook.pack()
# the "graph reports" notebook tab
nbGraphReports = ttk.Notebook(topLevelNotebook)
nbGraphReports.pack()
topLevelNotebook.add(nbGraphReports, text='Graph Reports')
if self.equation.GetDimensionality() == 2:
report = IndividualReports.ModelScatterConfidenceGraph(nbGraphReports, self.equation, scatterplotOnlyFlag=False)
reportTitle = "Model With 95% Confidence"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.ModelScatterConfidenceGraph(nbGraphReports, self.equation, scatterplotOnlyFlag=True)
reportTitle = "Scatter Plot"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
else:
report = IndividualReports.SurfacePlot(nbGraphReports, self.equation)
reportTitle = "Surface Plot"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.ContourPlot(nbGraphReports, self.equation)
reportTitle = "Contour Plot"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.ScatterPlot(nbGraphReports, self.equation)
reportTitle = "Scatter Plot"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.AbsoluteErrorGraph(nbGraphReports, self.equation)
reportTitle = "Absolute Error"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.AbsoluteErrorHistogram(nbGraphReports, self.equation)
reportTitle = "Absolute Error Histogram"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
if self.equation.dataCache.DependentDataContainsZeroFlag != 1:
report = IndividualReports.PercentErrorGraph(nbGraphReports, self.equation)
reportTitle = "Percent Error"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
report = IndividualReports.PercentErrorHistogram(nbGraphReports, self.equation)
reportTitle = "Percent Error Histogram"
nbGraphReports.add(report[0], text=reportTitle)
self.graphReportsListForPDF.append([report[1], reportTitle])
# the "text reports" notebook tab
nbTextReports = ttk.Notebook(topLevelNotebook)
nbTextReports.pack()
topLevelNotebook.add(nbTextReports, text='Text Reports')
report = IndividualReports.CoefficientAndFitStatistics(nbTextReports, self.equation)
reportTitle = "Coefficient And Fit Statistics"
nbTextReports.add(report, text=reportTitle)
self.textReportsListForPDF.append([report.get("1.0", tk.END), reportTitle])
report = IndividualReports.CoefficientListing(nbTextReports, self.equation)
reportTitle = "Coefficient Listing"
nbTextReports.add(report, text=reportTitle)
self.textReportsListForPDF.append([report.get("1.0", tk.END), reportTitle])
report = IndividualReports.DataArrayStatisticsReport(nbTextReports, 'Absolute Error Statistics', self.equation.modelAbsoluteError)
reportTitle = "Absolute Error Statistics"
nbTextReports.add(report, text=reportTitle)
self.textReportsListForPDF.append([report.get("1.0", tk.END), reportTitle])
if self.equation.dataCache.DependentDataContainsZeroFlag != 1:
report = IndividualReports.DataArrayStatisticsReport(nbTextReports, 'Percent Error Statistics', self.equation.modelPercentError)
reportTitle = "Percent Error Statistics"
nbTextReports.add(report, text=reportTitle)
self.textReportsListForPDF.append([report.get("1.0", tk.END), reportTitle])
# the "source code" notebook tab
nbSourceCodeReports = ttk.Notebook(topLevelNotebook)
nbSourceCodeReports.pack()
topLevelNotebook.add(nbSourceCodeReports, text='Source Code')
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation, 'CPP')
reportTitle = "C++"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'CSHARP')
reportTitle = "CSHARP"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'VBA')
reportTitle = "VBA"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'PYTHON')
reportTitle = "PYTHON"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'JAVA')
reportTitle = "JAVA"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'JAVASCRIPT')
reportTitle = "JAVASCRIPT"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'JULIA')
reportTitle = "JULIA"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'SCILAB')
reportTitle = "SCILAB"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'MATLAB')
reportTitle = "MATLAB"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
report = IndividualReports.SourceCodeReport(nbSourceCodeReports, self.equation,'FORTRAN90')
reportTitle = "FORTRAN90"
nbSourceCodeReports.add(report, text=reportTitle)
self.sourceCodeReportsListForPDF.append([report.get("1.0", tk.END), reportTitle + " Source Code"])
# the "additional information" notebook tab
nbAdditionalInfo = ttk.Notebook(topLevelNotebook)
nbAdditionalInfo.pack()
topLevelNotebook.add(nbAdditionalInfo, text='Additional Information')
scrolledText = tk_stxt.ScrolledText(nbAdditionalInfo, width=IndividualReports.textboxWidth, height=IndividualReports.textboxHeight, wrap=tk.WORD)
nbAdditionalInfo.add(scrolledText, text="Fitting History")
scrolledText.insert(tk.END, AdditionalInfo.history)
scrolledText = tk_stxt.ScrolledText(nbAdditionalInfo, width=IndividualReports.textboxWidth, height=IndividualReports.textboxHeight, wrap=tk.WORD)
nbAdditionalInfo.add(scrolledText, text="Author History")
scrolledText.insert(tk.END, AdditionalInfo.author)
scrolledText = tk_stxt.ScrolledText(nbAdditionalInfo, width=IndividualReports.textboxWidth, height=IndividualReports.textboxHeight, wrap=tk.WORD)
nbAdditionalInfo.add(scrolledText, text="Web Links")
scrolledText.insert(tk.END, AdditionalInfo.links)
# the "list of all standard equations" notebook tab
dim = self.equation.GetDimensionality()
allEquations = IndividualReports.AllEquationReport(topLevelNotebook, dim)
allEquations.pack()
topLevelNotebook.add(allEquations, text="List Of All Standard " + str(dim) + "D Equations")
# the "Save To PDF" tab
fsaveFrame = tk.Frame(self)
# this label is only for visual spacing
l = tk.Label(fsaveFrame, text="\n\n\n")
l.pack()
buttonSavePDF = tk.Button(fsaveFrame, text="Save To PDF", command=self.createPDF, height=0, width=0)
buttonSavePDF.pack()
topLevelNotebook.add(fsaveFrame, text="Save To PDF File")
def createPDF(self):
try:
import reportlab
except:
tk_mbox.showerror("Error", "\nCould not import reportlab.\n\nPlease install using the command\n\n'pip3 install reportlab'")
return
# see https://bugs.python.org/issue22810 for the
# "alloc: invalid block" error on application close
fName = filedialog.asksaveasfilename(
filetypes =(("PDF Files", "*.pdf"),("All Files","*.*")),
title = "PDF file name"
)
if fName:
import pdfCode
pdfCode.CreatePDF(fName,
self.equation,
self.graphReportsListForPDF,
self.textReportsListForPDF,
self.sourceCodeReportsListForPDF
)
tk_mbox.showinfo("Success", "\nSuccessfully created PDF file.")
if __name__ == "__main__":
root = tk.Tk()
interface = ResultsFrame(root, 'pickledEquationFile')
interface.pack()
root.title("Example tkinterFit - Fitting Results Viewer")
# manually center the application window on the user display
root.update_idletasks()
width = root.winfo_width()
height = root.winfo_height()
x = (root.winfo_screenwidth() // 2) - (width // 2) # integer division
y = (root.winfo_screenheight() // 2) - (height // 2) # integer division
root.geometry('{}x{}+{}+{}'.format(width, height, x, y))
root.mainloop()
|
jstone-lucasfilm/tkInterFit | DataForControls.py | import collections, inspect
import pyeq3
exampleText_2D = """\
Example 2D data for testing
Paste your own 2D data here
X Y
5.357 10.376
5.457 10.489
5.936 11.049
6.161 11.327 ending text is ignored
6.697 12.054
8.442 14.744
9.769 17.068
9.861 17.104
"""
exampleText_3D = """\
Example 3D data for testing
Paste your own 3D data here
X Y Z
3.017 2.175 0.0320
2.822 2.624 0.0629
1.784 3.144 6.570
2.0 2.6 4.0 ending text is ignored
1.712 3.153 6.721
2.972 2.106 0.0313
2.719 2.542 0.0643
1.479 2.957 6.583
1.387 2.963 6.744
2.843 1.984 0.0315
2.485 2.320 0.0639
0.742 2.568 6.581
0.607 2.571 6.753
"""
fittingTargetList = ['Lowest Sum Of Squared Absolute Error (SSQABS)',
'Lowest Sum Of Squared Log[Pred/Actual] (LNQREL)',
'Lowest Sum Of Squared Relative Error (SSQREL)',
'Lowest Sum Of Squared Orthogonal Distance (ODR)',
'Lowest Akaike Information Criterion (AIC)',
]
# these require additional user input - available in zunzunsite3 but not this project
excludedModuleNames_2D = [
'Polyfunctional',
'Rational',
'Spline',
'UserDefinedFunction',
]
excludedModuleNames_3D = [
'Polyfunctional',
'Spline',
'UserDefinedFunction',
]
eq_od2D = collections.OrderedDict()
for submodule in inspect.getmembers(pyeq3.Models_2D):
if inspect.ismodule(submodule[1]):
if submodule[0] in excludedModuleNames_2D:
continue
eq_od2D[submodule[0]] = collections.OrderedDict()
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
# if the equation *already* has an offset,do not add an offset version here
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
# if the equation requires special user input, exclude here
if equationClass[1].userSelectablePolynomialFlag or \
equationClass[1].userCustomizablePolynomialFlag or \
equationClass[1].userSelectableRationalFlag:
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
equationName = equation.GetDisplayName()
eq_od2D[submodule[0]][equationName] = [equationClass[0], extendedVersionName]
eq_od3D = collections.OrderedDict()
for submodule in inspect.getmembers(pyeq3.Models_3D):
if inspect.ismodule(submodule[1]):
if submodule[0] in excludedModuleNames_3D:
continue
eq_od3D[submodule[0]] = collections.OrderedDict()
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
# if the equation *already* has an offset,do not add an offset version here
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
# if the equation requires special user input, exclude here
if equationClass[1].userSelectablePolynomialFlag or \
equationClass[1].userCustomizablePolynomialFlag or \
equationClass[1].userSelectableRationalFlag:
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
equationName = equation.GetDisplayName()
eq_od3D[submodule[0]][equationName] = [equationClass[0], extendedVersionName]
|
jstone-lucasfilm/tkInterFit | pdfCode.py | <reponame>jstone-lucasfilm/tkInterFit
import time, os, glob
import reportlab
import reportlab.platypus
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
import reportlab.lib.pagesizes
# from http://code.activestate.com/recipes/576832-improved-reportlab-recipe-for-page-x-of-y/
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number(num_pages)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self, page_count):
self.setFont("Helvetica", 7)
self.drawRightString(200*mm, 20*mm, "Page %d of %d" % (self._pageNumber, page_count))
self.drawCentredString(25*mm, 20*mm, 'https://github.com/zunzun/tkInterFit')
def CreatePDF(inFileAndPathName, inEquation, inGraphList, inTextList, inSourceCodeList):
pageElements = []
styles = reportlab.lib.styles.getSampleStyleSheet()
styles.add(reportlab.lib.styles.ParagraphStyle(name='CenteredBodyText', parent=styles['BodyText'], alignment=reportlab.lib.enums.TA_CENTER))
styles.add(reportlab.lib.styles.ParagraphStyle(name='SmallCode', parent=styles['Code'], fontSize=6, alignment=reportlab.lib.enums.TA_LEFT)) # 'Code' and wordwrap=CJK causes problems
myTableStyle = [('ALIGN', (1,1), (-1,-1), 'CENTER'),
('VALIGN', (1,1), (-1,-1), 'MIDDLE')]
tableRow = ['tkInterFit'] # originally included images that are now unused
table = reportlab.platypus.Table([tableRow], style=myTableStyle)
pageElements.append(table)
pageElements.append(reportlab.platypus.XPreformatted('<br/><br/><br/><br/>', styles['CenteredBodyText']))
pageElements.append(reportlab.platypus.Paragraph(inEquation.GetDisplayName(), styles['CenteredBodyText']))
pageElements.append(reportlab.platypus.XPreformatted('<br/><br/>', styles['CenteredBodyText']))
pageElements.append(reportlab.platypus.XPreformatted('<br/><br/>', styles['CenteredBodyText']))
pageElements.append(reportlab.platypus.Paragraph("Created " + time.asctime(time.localtime()), styles['CenteredBodyText']))
pageElements.append(reportlab.platypus.PageBreak())
# make a page for each report output, with report name as page header
filenamePrefix = 'temp_'
index = 1
for report in inGraphList:
pageElements.append(reportlab.platypus.XPreformatted('<br/><br/>', styles['CenteredBodyText']))
# I could not get io.BytesIO and ImageReader to work, using files instead
# each image file name must be unique for reportlab's use - delete when done
fname = filenamePrefix + str(index) + '.png'
report[0].savefig(fname, format='png')
im = reportlab.platypus.Image(fname)
im._restrictSize(600, 600) # if image is too large for one page
im.hAlign = 'CENTER'
pageElements.append(im)
index += 1
pageElements.append(reportlab.platypus.PageBreak())
for report in inTextList + inSourceCodeList:
pageElements.append(reportlab.platypus.Preformatted(report[1], styles['SmallCode']))
pageElements.append(reportlab.platypus.XPreformatted('<br/><br/><br/>', styles['CenteredBodyText']))
replacedText = report[0]
if -1 != report[1].find('Coefficients'):
reportText = reportText.replace('<sup>', '^')
reportText = reportText.replace('<SUP>', '^')
replacedText = replacedText.replace('\t', ' ') # convert tabs to four spaces
replacedText = replacedText.replace('\r\n', '\n')
rebuiltText = ''
for line in replacedText.split('\n'):
if line == '':
rebuiltText += '\n'
else:
if line[0] == '<':
splitLine = line.split('>')
if len(splitLine) > 1:
newLine = splitLine[len(splitLine)-1]
else:
newLine = ''
else:
newLine = line
# crude line wrapping
if len(newLine) > 500:
rebuiltText += newLine[:100] + '\n'
rebuiltText += newLine[100:200] + '\n'
rebuiltText += newLine[200:300] + '\n'
rebuiltText += newLine[300:400] + '\n'
rebuiltText += newLine[400:500] + '\n'
rebuiltText += newLine[500:] + '\n'
elif len(newLine) > 400:
rebuiltText += newLine[:100] + '\n'
rebuiltText += newLine[100:200] + '\n'
rebuiltText += newLine[200:300] + '\n'
rebuiltText += newLine[300:400] + '\n'
rebuiltText += newLine[400:] + '\n'
elif len(newLine) > 300:
rebuiltText += newLine[:100] + '\n'
rebuiltText += newLine[100:200] + '\n'
rebuiltText += newLine[200:300] + '\n'
rebuiltText += newLine[300:] + '\n'
elif len(newLine) > 200:
rebuiltText += newLine[:100] + '\n'
rebuiltText += newLine[100:200] + '\n'
rebuiltText += newLine[200:] + '\n'
elif len(newLine) > 100:
rebuiltText += newLine[:100] + '\n'
rebuiltText += newLine[100:] + '\n'
else:
rebuiltText += newLine + '\n'
pageElements.append(reportlab.platypus.Preformatted(rebuiltText, styles['SmallCode']))
pageElements.append(reportlab.platypus.PageBreak())
doc = reportlab.platypus.SimpleDocTemplate(inFileAndPathName, pagesize=reportlab.lib.pagesizes.letter)
doc.build(pageElements, canvasmaker=NumberedCanvas)
# Done, now delete the temporary image files
for fname in glob.glob(filenamePrefix + "*.png"):
os.remove(fname)
|
paopaofi/stingray | stingray/powerspectrum.py | <gh_stars>1-10
from __future__ import division
import numpy as np
import scipy
import scipy.stats
import scipy.fftpack
import scipy.optimize
import logging
import stingray.lightcurve as lightcurve
import stingray.utils as utils
from stingray.gti import bin_intervals_from_gtis, check_gtis
from stingray.utils import simon
from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum
__all__ = ["Powerspectrum", "AveragedPowerspectrum", "DynamicalPowerspectrum"]
def classical_pvalue(power, nspec):
"""
Compute the probability of detecting the current power under
the assumption that there is no periodic oscillation in the data.
This computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying assumptions that make this calculation valid
are:
1. the powers in the power spectrum follow a chi-square distribution
2. the power spectrum is normalized according to [Leahy 1983]_, such
that the powers have a mean of 2 and a variance of 4
3. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overall shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Groth 1975]_.
Original implementation in IDL by <NAME>.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smaller power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract
* .. [Groth 1975] https://ui.adsabs.harvard.edu/#abs/1975ApJS...29..285G/abstract
"""
if not np.isfinite(power):
raise ValueError("power must be a finite floating point number!")
if power < 0:
raise ValueError("power must be a positive real number!")
if not np.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
if not np.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
# If the power is really big, it's safe to say it's significant,
# and the p-value will be nearly zero
if (power * nspec) > 30000:
simon("Probability of no signal too miniscule to calculate.")
return 0.0
else:
pval = _pavnosigfun(power, nspec)
return pval
def _pavnosigfun(power, nspec):
"""
Helper function doing the actual calculation of the p-value.
Parameters
----------
power : float
The measured candidate power
nspec : int
The number of power spectral bins that were averaged in `power`
(note: can be either through averaging spectra or neighbouring bins)
"""
sum = 0.0
m = nspec - 1
pn = power * nspec
while m >= 0:
s = 0.0
for i in range(int(m) - 1):
s += np.log(float(m - i))
logterm = m * np.log(pn / 2) - pn / 2 - s
term = np.exp(logterm)
ratio = sum / term
if ratio > 1.0e15:
return sum
sum += term
m -= 1
return sum
class Powerspectrum(Crossspectrum):
"""
Make a :class:`Powerspectrum` (also called periodogram) from a (binned) light curve.
Periodograms can be normalized by either Leahy normalization, fractional rms
normalizaation, absolute rms normalization, or not at all.
You can also make an empty :class:`Powerspectrum` object to populate with your
own fourier-transformed data (this can sometimes be useful when making
binned power spectra).
Parameters
----------
lc: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data to be Fourier-transformed.
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the power spectrum to be used. Options are
``leahy``, ``frac``, ``abs`` and ``none``, default is ``frac``.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }
the normalization of the power spectrun
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging power spectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged powers in each bin
n: int
The number of data points in the light curve
nphots: float
The total number of photons in the light curve
"""
def __init__(self, lc=None, norm='frac', gti=None):
Crossspectrum.__init__(self, lc1=lc, lc2=lc, norm=norm, gti=gti)
self.nphots = self.nphots1
def rebin(self, df=None, f=None, method="mean"):
"""
Rebin the power spectrum.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes ``df`` with ``f*self.df``
Returns
-------
bin_cs = :class:`Powerspectrum` object
The newly binned power spectrum.
"""
bin_ps = Crossspectrum.rebin(self, df=df, f=f, method=method)
bin_ps.nphots = bin_ps.nphots1
return bin_ps
def compute_rms(self, min_freq, max_freq, white_noise_offset=0.):
"""
Compute the fractional rms amplitude in the power spectrum
between two frequencies.
Parameters
----------
min_freq: float
The lower frequency bound for the calculation
max_freq: float
The upper frequency bound for the calculation
Other parameters
----------------
white_noise_offset : float, default 0
This is the white noise level, in Leahy normalization. In the ideal
case, this is 2. Dead time and other instrumental effects can alter
it. The user can fit the white noise level outside this function
and it will get subtracted from powers here.
Returns
-------
rms: float
The fractional rms amplitude contained between ``min_freq`` and
``max_freq``
"""
minind = self.freq.searchsorted(min_freq)
maxind = self.freq.searchsorted(max_freq)
powers = self.power[minind:maxind]
nphots = self.nphots
if self.norm.lower() == 'leahy':
powers_leahy = powers.copy()
elif self.norm.lower() == "frac":
powers_leahy = \
self.unnorm_power[minind:maxind].real * 2 / nphots
else:
raise TypeError("Normalization not recognized!")
rms = np.sqrt(np.sum(powers_leahy - white_noise_offset) / nphots)
rms_err = self._rms_error(powers_leahy)
return rms, rms_err
def _rms_error(self, powers):
"""
Compute the error on the fractional rms amplitude using error
propagation.
Note: this uses the actual measured powers, which is not
strictly correct. We should be using the underlying power spectrum,
but in the absence of an estimate of that, this will have to do.
.. math::
r = \sqrt{P}
.. math::
\delta r = \\frac{1}{2 * \sqrt{P}} \delta P
Parameters
----------
powers: iterable
The list of powers used to compute the fractional rms amplitude.
Returns
-------
delta_rms: float
the error on the fractional rms amplitude
"""
nphots = self.nphots
p_err = scipy.stats.chi2(2.0 * self.m).var() * powers / self.m / nphots
rms = np.sum(powers) / nphots
pow = np.sqrt(rms)
drms_dp = 1 / (2 * pow)
sq_sum_err = np.sqrt(np.sum(p_err**2))
delta_rms = sq_sum_err * drms_dp
return delta_rms
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, assuming an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, where M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying assumptions are fulfilled:
1. The power spectrum is Leahy-normalized
2. There is no source of variability in the data other than the
periodic signal to be determined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for all powers in
the power spectrum, where index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentially
significant powers. Must be between 0 and 1.
Default is ``1`` (all p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report all powers) though for any application
where `threshold`` is set to something meaningful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for all powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.norm == "leahy":
raise ValueError("This method only works on "
"Leahy-normalized power spectra!")
if np.size(self.m) == 1:
# calculate p-values for all powers
# leave out zeroth power since it just encodes the number of photons!
pv = np.array([classical_pvalue(power, self.m)
for power in self.power])
else:
pv = np.array([classical_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add 1 to the indices to make up for the fact that
# we left out the first power above!
indices = np.where(pv < threshold)[0]
pvals = np.vstack([pv[indices], indices])
return pvals
class AveragedPowerspectrum(AveragedCrossspectrum, Powerspectrum):
"""
Make an averaged periodogram from a light curve by segmenting the light
curve, Fourier-transforming each segment and then averaging the
resulting periodograms.
Parameters
----------
lc: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
The light curve data to be Fourier-transformed.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in lc is not an integer multiple
of the ``segment_size``, then any fraction left-over at the end of the
time series will be lost.
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the periodogram to be used.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }
the normalization of the periodogram
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged periodograms
n: int
The number of data points in the light curve
nphots: float
The total number of photons in the light curve
"""
def __init__(self, lc=None, segment_size=None, norm="frac", gti=None):
self.type = "powerspectrum"
if segment_size is None and lc is not None:
raise ValueError("segment_size must be specified")
if segment_size is not None and not np.isfinite(segment_size):
raise ValueError("segment_size must be finite!")
self.segment_size = segment_size
Powerspectrum.__init__(self, lc, norm, gti=gti)
return
def _make_segment_spectrum(self, lc, segment_size):
"""
Split the light curves into segments of size ``segment_size``, and calculate a power spectrum for
each.
Parameters
----------
lc : :class:`stingray.Lightcurve` objects\
The input light curve
segment_size : ``numpy.float``
Size of each light curve segment to use for averaging.
Returns
-------
power_all : list of :class:`Powerspectrum` objects
A list of power spectra calculated independently from each light curve segment
nphots_all : ``numpy.ndarray``
List containing the number of photons for all segments calculated from ``lc``
"""
if not isinstance(lc, lightcurve.Lightcurve):
raise TypeError("lc must be a lightcurve.Lightcurve object")
if self.gti is None:
self.gti = lc.gti
check_gtis(self.gti)
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, segment_size, lc.time, dt=lc.dt)
power_all = []
nphots_all = []
for start_ind, end_ind in zip(start_inds, end_inds):
time = lc.time[start_ind:end_ind]
counts = lc.counts[start_ind:end_ind]
counts_err = lc.counts_err[start_ind: end_ind]
lc_seg = lightcurve.Lightcurve(time, counts, err=counts_err,
err_dist=lc.err_dist.lower())
power_seg = Powerspectrum(lc_seg, norm=self.norm)
power_all.append(power_seg)
nphots_all.append(np.sum(lc_seg.counts))
return power_all, nphots_all
class DynamicalPowerspectrum(AveragedPowerspectrum):
"""
Create a dynamical power spectrum, also often called a *spectrogram*.
This class will divide a :class:`Lightcurve` object into segments of
length ``segment_size``, create a power spectrum for each segment and store
all powers in a matrix as a function of both time (using the mid-point of each
segment) and frequency.
This is often used to trace changes in period of a (quasi-)periodic signal over
time.
Parameters
----------
lc : :class:`stingray.Lightcurve` object
The time series of which the Dynamical powerspectrum is
to be calculated.
segment_size : float, default 1
Length of the segment of light curve, default value is 1 (in whatever units
the ``time`` array in the :class:`Lightcurve`` object uses).
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the periodogram to be used.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
segment_size: float
The size of each segment to average. Note that if the total
duration of each Lightcurve object in lc is not an integer multiple
of the ``segment_size``, then any fraction left-over at the end of the
time series will be lost.
dyn_ps : np.ndarray
The matrix of normalized squared absolute values of Fourier
amplitudes. The axis are given by the ``freq``
and ``time`` attributes
norm: {``leahy`` | ``frac`` | ``abs`` | ``none``}
the normalization of the periodogram
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
df: float
The frequency resolution
dt: float
The time resolution
"""
def __init__(self, lc, segment_size, norm="frac", gti=None):
if segment_size < 2 * lc.dt:
raise ValueError("Length of the segment is too short to form a "
"light curve!")
elif segment_size > lc.tseg:
raise ValueError("Length of the segment is too long to create "
"any segments of the light curve!")
AveragedPowerspectrum.__init__(self, lc=lc,
segment_size=segment_size, norm=norm,
gti=gti)
self._make_matrix(lc)
def _make_matrix(self, lc):
"""
Create a matrix of powers for each time step (rows) and each frequency step (columns).
Parameters
----------
lc : :class:`Lightcurve` object
The :class:`Lightcurve` object from which to generate the dynamical power spectrum
"""
ps_all, _ = AveragedPowerspectrum._make_segment_spectrum(
self, lc, self.segment_size)
self.dyn_ps = np.array([ps.power for ps in ps_all]).T
self.freq = ps_all[0].freq
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, self.segment_size, lc.time, dt=lc.dt)
tstart = lc.time[start_inds]
tend = lc.time[end_inds]
self.time = tstart + 0.5*(tend - tstart)
# Assign length of lightcurve as time resolution if only one value
if len(self.time) > 1:
self.dt = self.time[1] - self.time[0]
else:
self.dt = lc.n
# Assign biggest freq. resolution if only one value
if len(self.freq) > 1:
self.df = self.freq[1] - self.freq[0]
else:
self.df = 1 / lc.n
def rebin_frequency(self, df_new, method="sum"):
"""
Rebin the Dynamic Power Spectrum to a new frequency resolution. Rebinning is
an in-place operation, i.e. will replace the existing ``dyn_ps`` attribute.
While the new resolution need not be an integer multiple of the
previous frequency resolution, be aware that if it is not, the last
bin will be cut off by the fraction left over by the integer division.
Parameters
----------
df_new: float
The new frequency resolution of the Dynamical Power Spectrum.
Must be larger than the frequency resolution of the old Dynamical
Power Spectrum!
method: {``sum`` | ``mean`` | ``average``}, optional, default ``sum``
This keyword argument sets whether the counts in the new bins
should be summed or averaged.
"""
dynspec_new = []
for data in self.dyn_ps.T:
freq_new, bin_counts, bin_err, _ = \
utils.rebin_data(self.freq, data, dx_new=df_new,
method=method)
dynspec_new.append(bin_counts)
self.freq = freq_new
self.dyn_ps = np.array(dynspec_new).T
self.df = df_new
def trace_maximum(self, min_freq=None, max_freq=None, sigmaclip=False):
"""
Return the indices of the maximum powers in each segment :class:`Powerspectrum`
between specified frequencies.
Parameters
----------
min_freq: float, default ``None``
The lower frequency bound.
max_freq: float, default ``None``
The upper frequency bound.
Returns
-------
max_positions : np.array
The array of indices of the maximum power in each segment having
frequency between ``min_freq`` and ``max_freq``.
"""
if min_freq is None:
min_freq = np.min(self.freq)
if max_freq is None:
max_freq = np.max(self.freq)
max_positions = []
for ps in self.dyn_ps.T:
indices = np.logical_and(self.freq <= max_freq,
min_freq <= self.freq)
max_power = np.max(ps[indices])
max_positions.append(np.where(ps == max_power)[0][0])
return np.array(max_positions)
def rebin_time(self, dt_new, method='sum'):
"""
Rebin the Dynamic Power Spectrum to a new time resolution.
While the new resolution need not be an integer multiple of the
previous time resolution, be aware that if it is not, the last bin
will be cut off by the fraction left over by the integer division.
Parameters
----------
dt_new: float
The new time resolution of the Dynamical Power Spectrum.
Must be larger than the time resolution of the old Dynamical Power
Spectrum!
method: {"sum" | "mean" | "average"}, optional, default "sum"
This keyword argument sets whether the counts in the new bins
should be summed or averaged.
Returns
-------
time_new: numpy.ndarray
Time axis with new rebinned time resolution.
dynspec_new: numpy.ndarray
New rebinned Dynamical Power Spectrum.
"""
if dt_new < self.dt:
raise ValueError("New time resolution must be larger than "
"old time resolution!")
dynspec_new = []
for data in self.dyn_ps:
time_new, bin_counts, bin_err, _ = \
utils.rebin_data(self.time, data, dt_new,
method=method)
dynspec_new.append(bin_counts)
self.time = time_new
self.dyn_ps = np.array(dynspec_new)
self.dt = dt_new
|
Gambellator/Bulk-DHCP-Client-Tester | samples/test-no-renew.py | import sys
import time
import logging
import random
import progressbar
import dhcpclient.dhcpclient as dhcp
from dhcpclient.utilities import RandomMac
AVC = 'AVC999904444401'
VLAN = [2001]
INTERFACE = 'em4'
SESSIONS = 1
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
rando_mac = RandomMac()
subscriber_list = []
try:
logger.info("Starting DHCP clients.")
bar = progressbar.ProgressBar()
for i in bar(range(1, SESSIONS + 1)):
a = dhcp.DHCPClient(INTERFACE, rando_mac.get_mac(), vlan_tags=VLAN, option82=AVC, dsl_sub_options=[12000, 450000], no_renew=True)
subscriber_list.append(a)
# while True:
# if a.threadid:
# continue
logger.info("Services loaded starting clients.")
for a in subscriber_list:
a.start_server()
time.sleep(0.01)
cps = []
while True:
time.sleep(1)
i = 0
for a in subscriber_list:
#print a.hostname, a.server_status(), a.ciaddr, a.state.state, a.threadid
if a.server_status() == "Bound":
i += 1
if i < len(subscriber_list):
cps.append(i)
else:
logger.info('CPS list = %s', cps)
logger.info('!!!!!!!!!!!! ---- We have %s sessions up', i)
except KeyboardInterrupt:
for a in subscriber_list:
print a.server_status()
a.stop_server()
last_number = len(subscriber_list)
print "Closing sessions: {0}".format(last_number)
while True:
if len(subscriber_list) == 0:
sys.exit()
if subscriber_list[-1].server_status() == "Stopped":
subscriber_list.pop()
|
Gambellator/Bulk-DHCP-Client-Tester | dhcpclient/dhcpclient.py | <gh_stars>0
"""dhcp.py"""
import logging
import string
import time
import binascii
from random import randint, choice
from scapy.config import conf
#conf.use_pcap = True
conf.verb = 0
from scapy.arch import linux, pcapdnet
from scapy.arch.pcapdnet import *
#conf.L3socket = linux.L2Socket
from scapy.all import Ether, Dot1Q, IP, UDP, BOOTP, DHCP
from scapy.automaton import *
import packetqueue
global PACKET_QUEUE
PACKET_QUEUE = None
logging.getLogger("scapy").setLevel(1)
logger = logging.getLogger(__name__)
class DHCPClient(Automaton):
'''
'''
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
BROADCAST_IP = '255.255.255.255'
DEFAULT_IP = '0.0.0.0'
BOOTCLIENT = 68
BOOTSERVER = 67
DEBUF_RENEW_TIME = 30
def __setattr__(self, name, value):
logger.debug("Value: %s updated to: %s", name, value)
super(DHCPClient, self).__setattr__(name, value)
@staticmethod
def mac_decode(mac_address):
''' takes a mac address removes . or : turns it into hex'''
new_mac = mac_address.replace(":", "").replace(".", "")
logger.debug("Stripped mac_address, old: %s new: %s", mac_address, new_mac)
return new_mac.decode('hex')
@staticmethod
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(choice(chars) for _ in range(size))
@staticmethod
def random_int():
return randint(0, 2**32-1)
@staticmethod
def pad_zero(data):
if len(data) < 2:
data = '0' + data
return data
@staticmethod
def encode_string(string_data):
temp = []
for char in string_data:
new_hex = '{:x}'.format(ord(char))
temp.append(DHCPClient.pad_zero(new_hex))
length = DHCPClient.pad_zero('{:x}'.format(len(temp)))
return length + ''.join(temp)
def start_server(self):
self.runbg()
def stop_server(self):
self.stop_state = True
PACKET_QUEUE.stop()
def server_status(self):
return self.server_state
def _server_update_state(self, server_state):
self._state_update_parent(server_state)
self.server_state = server_state
def _state_update_parent(self, server_state):
''' Override with parent class method to update state'''
pass
def parse_args(self, interface, mac_address, hostname=None, broadcast=False,
early_renew=0, early_rebind=0,
no_renew=False, quick_start=False, dhcp_options=[],
vlan_tags=[], option82=None, dsl_sub_options=[], debug=100, **kargs):
self.send_socket_kargs = {}
Automaton.parse_args(self, **kargs)
self.debug_level = 2
#self.socket_kargs["ll"] = conf.L2socket
self.interface = interface
conf.iface = self.interface
if not PACKET_QUEUE:
PACKET_QUEUE = packetqueue.PacketQueue(iface=conf.iface)
global PACKET_QUEUE
self.send_sock_class = conf.L2socket
self.recv_sock_class = packetqueue.DHCPListenSocket
#self.send_sock_class = pcapdnet.L2pcapSocket
#self.recv_sock_class = pcapdnet.L2pcapListenSocket
self.send_socket_kargs['iface'] = self.interface
self.mac_address = mac_address
self.hostname = hostname
self.broadcast = broadcast
self.early_renew = early_renew
self.early_rebind = early_rebind
self.no_renew = no_renew
self.quick_start = quick_start
self.dhcp_options = dhcp_options
self.vlan_tags = vlan_tags
self.option82 = option82
self.dsl_sub_options = dsl_sub_options
if not self.hostname: self.hostname = DHCPClient.id_generator()
self.logger = logging.getLogger(self.hostname)
self.xid = 0
self.flags = 0
self.t1 = 0
self.t2 = 0
self.siaddr = '0.0.0.0'
self.yiaddr = '0.0.0.0'
self.ciaddr = '0.0.0.0'
self.renew_attempts = 0
self.rebind_attempts = 0
self.stop_state = False
self.server_state = 'Stopped'
if self.broadcast: self.flags = 32768
self.raw_mac = DHCPClient.mac_decode(self.mac_address)
self.logger.debug("Timeout for states are: %s", self.timeout)
def my_send(self, pkt):
self.send_sock.send(pkt)
def master_filter(self, pkt):
''' '''
return ( Ether in pkt and pkt[Ether].src != self.mac_address and (BOOTP in pkt and pkt[BOOTP].xid == self.xid) )
def get_dot1q(self, vlan):
return Dot1Q(vlan=vlan)
def get_option82(self):
send = False
if self.option82:
hex_subscriber_id = binascii.unhexlify('01' + DHCPClient.encode_string(self.option82))
hex_remote_id = binascii.unhexlify('02' + DHCPClient.encode_string('BRASTEST'))
send = True
else:
hex_subscriber_id = ''
hex_remote_id = ''
if len(self.dsl_sub_options) == 2:
sup_option_header = binascii.unhexlify('0911' + '{0:08X}'.format(3561) + '0C')
actual_up = binascii.unhexlify('8104' + '{0:08X}'.format(self.dsl_sub_options[0]))
actual_down = binascii.unhexlify('8204' + '{0:08X}'.format(self.dsl_sub_options[1]))
send = True
else:
sup_option_header = ''
actual_up = ''
actual_down = ''
if send:
return [('relay_agent_Information', hex_subscriber_id + hex_remote_id + sup_option_header + actual_up + actual_down)]
return []
def dhcp_add_options(self, header_options):
self.logger.debug("dhcp options ")
try:
full_options = header_options + self.dhcp_options + self.get_option82() + ['end']
except:
self.logger.exception("dhcp_options what!")
self.logger.debug("dhcp options %s", full_options)
return DHCP(options=full_options)
def get_l2_transport(self, src_mac, dst_mac):
ethernet = Ether(src=src_mac, dst=dst_mac)
for vlan in self.vlan_tags:
ethernet = ethernet / self.get_dot1q(vlan)
return ethernet
def get_transport(self, src_mac, dst_mac, src_ip, dst_ip):
ethernet = self.get_l2_transport(src_mac, dst_mac)
ip_header = IP(src=src_ip, dst=dst_ip)
udp_header = UDP(sport=self.BOOTCLIENT, dport=self.BOOTSERVER)
return ethernet/ip_header/udp_header
# State machine.
#INIT - Init
@ATMT.state(initial=1)
def Init(self):
''' '''
if self.stop_state: raise self.unbound_end()
self._server_update_state("Unbound")
self.logger.info("DHCP Client started for MAC %s", self.mac_address)
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.DEFAULT_IP,
self.BROADCAST_IP)
self.xid = DHCPClient.random_int()
self.logger.info("XID set to: %s", self.xid)
self.listen_sock = packetqueue.DHCPListenSocket(xid=self.xid, packet_queue_class=PACKET_QUEUE)
if self.quick_start:
logging.debug("Quick startup enabled, skipping random desync")
else:
desync_time = randint(1,30)
logging.debug("Waiting for desync time to expire in %ss", desync_time)
time.sleep(desync_time)
logging.debug("desync time expired, Sending Discover")
bootp_header = BOOTP(flags=self.flags,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = self.dhcp_add_options([('message-type', 'discover')])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Sending Discover: %s", packet.sprintf('%Ether.src% > %Ether.dst% %Dot1Q.vlan% %IP.src% > %IP.dst% %BOOTP.xid%'))
self.logger.debug("Sending Discover: %s", packet.show(dump=True))
self.send(packet)
raise self.Selecting()
@ATMT.state()
def Rebooting(self):
self.siaddr = '0.0.0.0'
self.yiaddr = '0.0.0.0'
self.ciaddr = '0.0.0.0'
raise self.Init()
#SELECTING - Selecting
@ATMT.state()
def Selecting(self):
self.logger.info("Moved to state Selecting")
@ATMT.timeout(Selecting, 15)
def Selecting_timeout(self):
self.logger.info("No repsonse back in 15 seconds heading back to Init state")
raise self.Init()
@ATMT.state()
def Requesting(self):
self.logger.info("Moved to state Requesting")
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.DEFAULT_IP,
self.BROADCAST_IP)
bootp_header = BOOTP(flags=self.flags,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("server_id",self.siaddr),
("requested_addr",self.yiaddr),
("hostname",self.hostname),
("param_req_list","pad"),
"end"])
for option in self.dhcp_options:
dhcp_header.options.append(option)
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Requesting: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Requesting: %s", packet.show(dump=True))
self.send(packet)
@ATMT.state()
def Bound(self):
self._server_update_state("Bound")
self.logger.info("Moved to state Bound with ip: %s", self.ciaddr)
time_now = time.time()
while time_now < self.lease_expire_time:
if self.stop_state: raise self.bound_end()
if not self.broadcast or not self.no_renew:
if self.early_renew > 0 and self.early_renew < self.t1:
if time_now > self.early_renew_expire_time:
raise self.Renewing()
if time_now > self.t1_expire_time:
raise self.Renewing()
if time_now > self.t2_expire_time:
raise self.Rebinding()
elif (self.early_rebind > 0 and self.early_rebind < self.t2) and time_now > self.early_rebind_expire_time:
raise self.Rebinding()
time.sleep(1)
time_now = time.time()
raise self.Rebooting()
@ATMT.state()
def Renewing(self):
self.logger.info("Moved to state Renewing")
back_off_time = randint(1, self.DEBUF_RENEW_TIME) * self.renew_attempts
self.logger.info("Backing off %ss", back_off_time)
time.sleep(back_off_time)
l2_transport = self.get_transport(self.mac_address,
self.server_mac,
self.yiaddr,
self.siaddr)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Renewing: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Renewing: %s", packet.show(dump=True))
self.send(packet)
self.renew_attempts += 1
@ATMT.state()
def Rebinding(self):
self.logger.info("Moved to state Rebinding")
back_off_time = randint(1, self.DEBUF_RENEW_TIME) * self.rebind_attempts
self.logger.debug("Backing off %ss", back_off_time)
time.sleep(back_off_time)
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.yiaddr,
self.BROADCAST_IP)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Rebinding: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Rebinding: %s", packet.show(dump=True))
self.send(packet)
self.rebind_attempts += 1
@ATMT.timeout(Requesting, 30)
def Requesting_timeout(self):
self.logger.info("No repsonse back in 10 seconds heading back to Init state")
raise self.Init()
@ATMT.timeout(Renewing, 5)
def waiting_renewing_response_timeout(self):
self.logger.info("No repsonse back in 5 seconds heading back to Bound state")
raise self.Bound()
@ATMT.timeout(Rebinding, 5)
def waiting_rebinding_response_timeout(self):
self.logger.info("No repsonse back in 5 seconds heading back to Bound state")
raise self.Bound()
# State conditions and actions.
@ATMT.receive_condition(Selecting)
def received_offer(self, pkt):
self.last_pkt = pkt
self.logger.debug("Selecting condition")
raise self.Requesting()
@ATMT.receive_condition(Requesting)
def recieved_packet_request(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Bound)
def recieved_packet_bound(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Renewing)
def recieved_packet_renewing(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Rebinding)
def recieved_packet_rebinding(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.action(received_offer)
@ATMT.action(recieved_packet_request)
@ATMT.action(recieved_packet_bound)
@ATMT.action(recieved_packet_renewing)
@ATMT.action(recieved_packet_rebinding)
def recieved_packet(self):
pkt = self.last_pkt
if (UDP in pkt and BOOTP in pkt):
self.logger.info("recieved_packet: %s", pkt.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("recieved_packet: %s", pkt.show(dump=True))
if pkt[BOOTP].xid != self.xid:
self.logger.warning("XID does not match! going to Init state, packet=%s, us=%s", pkt[BOOTP].xid, self.xid)
elif ("message-type", 2) in pkt[DHCP].options: # OFFER
self.siaddr = pkt[BOOTP].siaddr
self.yiaddr = pkt[BOOTP].yiaddr
self.server_mac = pkt[Ether].src
for opt in pkt[DHCP].options:
if opt[0] == 'server_id':
self.siaddr = opt[1]
raise self.Requesting()
elif ("message-type", 5) in pkt[DHCP].options: # ACK
time_now = time.time()
self.ciaddr = self.yiaddr
for opt in pkt[DHCP].options:
if opt[0] == 'renewal_time':
self.t1 = int(opt[1])
elif opt[0] == 'rebinding_time':
self.t2 = int(opt[1])
elif opt[0] == 'lease_time':
self.lease_time = int(opt[1])
self.t1_expire_time = time_now + self.t1
self.early_renew_expire_time = time_now + self.early_renew
self.t2_expire_time = time_now + self.t2
self.lease_expire_time = time_now + self.lease_time
self.early_rebind_expire_time = time_now + self.early_rebind
self.rebind_attempts = 0
self.renew_attempts = 0
raise self.Bound()
elif ("message-type", 6) in pkt[DHCP].options: # NACK
self.logger.info("Got NACK Rebooting")
self._update_state("Unbound")
raise self.Rebooting()
self.logger.error("Packet was fucked")
@ATMT.state()
def bound_end(self):
self.logger.debug("Moved to state Bounded Ending")
l2_transport = self.get_transport(self.mac_address,
self.server_mac,
self.yiaddr,
self.siaddr)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","release"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Bound Ending: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Bound End: %s", packet.show(dump=True))
self.send(packet)
raise self.END()
@ATMT.state()
def unbound_end(self):
raise self.END()
@ATMT.state(final=1)
def END(self):
self._server_update_state("Stopped")
self.logger.info("Client stopped")
if __name__ == "__main__":
import sys
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
a = DHCPClient(sys.argv[1], sys.argv[2], quick_start=True, vlan_tags=[2001], option82='AVC999904444404')
try:
a.start_server()
while True:
pass
except KeyboardInterrupt:
a.stop_server()
while True:
if a.server_status() == "Stopped":
sys.exit()
|
Gambellator/Bulk-DHCP-Client-Tester | setup.py | <filename>setup.py
# -*- coding: utf-8 -*-
#
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='dhcpclient',
version='0.0.1',
description='Bulk DHCP client tester',
long_description=readme,
author='Gambellator',
author_email='<EMAIL>',
url='https://github.com/Gambellator/Bulk-DHCP-Client-Tester',
license=license,
packages=find_packages(exclude=('samples'))
)
|
Gambellator/Bulk-DHCP-Client-Tester | dhcpclient/utilities.py | '''
Some helpful utlity functions and classes.
'''
import random
class RandomMac(object):
def __init__(self):
self.used_macs = set()
def get_mac(self):
temp = self._random_mac()
while True:
if temp not in self.used_macs:
self.used_macs.add(temp)
return temp
temp = self._random_mac()
def _random_mac(self):
def get_random_octet():
data = '{:x}'.format(random.randint(0, 255))
if len(data) < 2:
data = '0' + data
return data
return '34:34:34:{0}:{1}:{2}'.format(get_random_octet(),
get_random_octet(),
get_random_octet())
|
Gambellator/Bulk-DHCP-Client-Tester | dhcpclient/packetqueue.py | import socket
import struct
import os
import array
import Queue
import threading
import logging
from scapy.all import ETH_P_ALL
from scapy.all import select
from scapy.all import MTU
from scapy.config import conf
from scapy.all import Ether, Dot1Q, IP, UDP, BOOTP, DHCP
class PacketQueue(object):
'''
PacketQueue class
Listens to raw sockets once PacketQueue client's can join
using thier particular DHCP XID, it must be unique. Both the receiver
and the disperser run as seperate threads.
'''
def __init__(self, iface=None, queue_size=0):
'''
@iface=str
@queue_size=int
'''
self.iface = conf.iface if iface is None else iface
self.packet_queue = Queue.Queue(queue_size)
self.pack_reciever_threadid = None
self.pack_disperser_threadid = None
self.stop_queuing = False
self.register_queue = {}
self.ins = socket.socket(socket.AF_PACKET,
socket.SOCK_RAW,
socket.htons(ETH_P_ALL))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
self.ins.bind((self.iface, ETH_P_ALL))
self.start()
def start(self):
'''
Starts the packet_receiver and packet_disperser threads.
'''
self.stop_queuing = False
ready = threading.Event()
threading.Thread(target=self.packet_receiver, args=(ready,)).start()
ready.wait()
steady = threading.Event()
threading.Thread(target=self.packet_disperser, args=(steady,)).start()
steady.wait()
def stop(self):
self.stop_queuing = True
def packet_receiver(self, ready, *args, **kargs):
self.pack_reciever_threadid = threading.currentThread().ident
ready.set()
while True:
if self.stop_queuing:
break
pkt, sa_ll = self.ins.recvfrom(MTU)
if sa_ll[2] == socket.PACKET_OUTGOING:
continue
self.packet_queue.put((pkt, sa_ll))
self.pack_reciever_threadid = None
def packet_disperser(self, steady, *args, **kargs):
self.pack_disperser_threadid = threading.currentThread().ident
steady.set()
while True:
if self.stop_queuing:
break
try:
pkt, sa_ll = self.packet_queue.get()
except:
break
if sa_ll[3] in conf.l2types:
cls = conf.l2types[sa_ll[3]]
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
else:
cls = conf.default_l2
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
self.packet_dispersment(pkt)
self.pack_disperser_threadid = None
def packet_dispersment(self, pkt):
if BOOTP in pkt:
xid = pkt[BOOTP].xid
if self.register_queue.has_key(xid):
self.register_queue[xid].write(pkt)
def register(self, register_key, register_class):
xid = register_key
if self.register_queue.has_key(xid):
raise KeyError("xid in use")
self.register_queue[xid] = register_class
return self.register_queue[xid]
class DHCPListenSocket(object):
def __init__(self, xid=None, packet_queue_class=None):
self.rd, self.wr = os.pipe()
self.queue = Queue.Queue()
self.new_xid(xid=xid,packet_queue_class=packet_queue_class)
def fileno(self):
return self.rd
def checkRecv(self):
return len(self.queue) > 0
def write(self, obj):
self.send(obj)
def send(self, obj):
self.queue.put(obj)
os.write(self.wr,"X")
def new_xid(self, xid=None, packet_queue_class=None):
try:
packet_queue_class.register(xid, self)
except:
pass
def recv(self, *args, **kargs):
os.read(self.rd, 1)
try:
return self.queue.get(timeout=1)
except Queue.Empty:
return None
if __name__ == "__main__":
ip_sniff = PacketQueue(iface='em4')
ip_sniff.start()
client = DHCPListenSocket(xid=100, packet_queue_class=ip_sniff)
try:
while True:
client.recv().show()
except:
ip_sniff.stop()
|
paw-lu/nbpreview | src/nbpreview/notebook.py | """Render the notebook."""
import dataclasses
import pathlib
import typing
from dataclasses import InitVar
from pathlib import Path
from typing import IO, Any, AnyStr, Iterator, List, Optional, Tuple, Type, Union
import nbformat
from click.utils import KeepOpenFile
from nbformat.notebooknode import NotebookNode
from rich import table
from rich.console import Console, ConsoleOptions
from rich.table import Table
from nbpreview import errors
from nbpreview.component import row
from nbpreview.component.content.output.result.drawing import ImageDrawing
# terminedia depends on fcntl, which is not present on Windows platforms
try:
import terminedia # noqa: F401
except ModuleNotFoundError:
pass
# Fake KeepOpenFile used to avoid non-subscriptable error
# https://github.com/python/mypy/issues/5264
if typing.TYPE_CHECKING: # pragma: no cover
KeepOpenFileType = KeepOpenFile
else:
class _KeepOpenFile:
"""Fake click's KeepOpenFile for type checking purposes."""
def __getitem__(self, *args: Any) -> Type[KeepOpenFile]:
"""Make the fake class subscriptable."""
return KeepOpenFile
KeepOpenFileType = _KeepOpenFile()
def pick_option(option: Optional[bool], detector: bool) -> bool:
"""Select a render option.
Args:
option (Optional[bool]): The inputted option which can override
detections. By default None, which leaves the decision to
``detector``.
detector (bool): A detector based on terminal properties to set
the option to True. Will be ignored if ``option`` is a
boolean.
Returns:
bool: The option value.
"""
if option is None:
pick = detector
else:
pick = option
return pick
def _get_output_pad(plain: bool) -> Tuple[int, int, int, int]:
"""Return the padding for outputs.
Args:
plain (bool): Only show plain style. No decorations such as
boxes or execution counts.
Returns:
Tuple[int, int, int, int]: The padding for outputs.
"""
if plain:
return (0, 0, 0, 0)
else:
return (0, 0, 0, 1)
def _pick_image_drawing(
option: Union[ImageDrawing, None],
unicode: bool,
color: bool,
) -> ImageDrawing:
"""Pick an image render option.
Args:
option (Literal["block", "character", "braille", ImageDrawingEnum, None]):
The inputted option which can override detections. If None,
will autodetect.
unicode (bool): Whether to use unicode characters to
render the notebook. By default will autodetect.
color (bool): Whether to use color.
Returns:
Union[Literal["block", "character", "braille"] ImageDrawingEnum]:
The image type to render.
"""
image_render: ImageDrawing
if option is None:
# Block is too slow to offer as a sensible default
# Braille can not do negative space, and most notebook's primary
# images are plots with light backgrounds
image_render = "character"
else:
image_render = option
return image_render
def _render_notebook(
cells: List[NotebookNode],
plain: bool,
unicode: bool,
hyperlinks: bool,
theme: str,
nerd_font: bool,
files: bool,
hide_hyperlink_hints: bool,
hide_output: bool,
language: str,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
relative_dir: Path,
characters: Optional[str] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> Table:
"""Create a table representing a notebook."""
grid = table.Table.grid(padding=(1, 1, 1, 0))
pad = _get_output_pad(plain)
if not plain:
grid.add_column(justify="right")
grid.add_column()
for cell in cells:
cell_row = row.render_input_row(
cell,
plain=plain,
pad=pad,
language=language,
theme=theme,
unicode_border=unicode,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
line_numbers=line_numbers,
code_wrap=code_wrap,
)
if cell_row is not None:
grid.add_row(*cell_row.to_table_row())
outputs = cell.get("outputs")
if not hide_output and outputs is not None:
rendered_outputs = row.render_output_row(
outputs,
plain=plain,
pad=pad,
unicode=unicode,
hyperlinks=hyperlinks,
nerd_font=nerd_font,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
theme=theme,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
relative_dir=relative_dir,
)
for rendered_output in rendered_outputs:
grid.add_row(*rendered_output.to_table_row())
return grid
@dataclasses.dataclass()
class Notebook:
"""Construct a Notebook object to render Jupyter Notebooks.
Args:
notebook_node (NotebookNode): A NotebookNode of the notebook to
render.
theme (Optional[str]): The theme to use for syntax highlighting.
May be "ansi_light", "ansi_dark", or any Pygments theme. If
By default "ansi_dark".
plain (bool): Only show plain style. No decorations such as
boxes or execution counts. By default will autodetect.
unicode (Optional[bool]): Whether to use unicode characters to
render the notebook. By default will autodetect.
hide_output (bool): Do not render the notebook outputs. By
default False.
nerd_font (bool): Use nerd fonts when appropriate. By default
False.
files (bool): Create files when needed to render HTML content.
hyperlinks (bool): Whether to use hyperlinks. If false will
explicitly print out path.
hide_hyperlink_hints (bool): Hide text hints of when content is
clickable.
images (Optional[str]): Whether to render images. If None will
attempt to autodetect. By default None.
image_drawing (Optional[str]): How to render images. Options are
"block" or None. If None will attempt to autodetect. By
default None.
color (Optional[bool]): Whether to use color. If None will
attempt to autodetect. By default None.
relative_dir (Optional[Path]): The directory to prefix relative
paths to convert them to absolute. If None will assume
current directory is relative prefix.
line_numbers (bool): Whether to render line numbers in code
cells. By default False.
code_wrap (bool): Whether to wrap code if it does not fit. By
default False.
"""
notebook_node: NotebookNode
theme: str = "ansi_dark"
plain: Optional[bool] = None
unicode: Optional[bool] = None
hide_output: bool = False
nerd_font: bool = False
files: bool = True
negative_space: bool = True
hyperlinks: Optional[bool] = None
hide_hyperlink_hints: bool = False
images: Optional[bool] = None
image_drawing: Optional[ImageDrawing] = None
color: Optional[bool] = None
relative_dir: InitVar[Optional[Path]] = None
line_numbers: bool = False
code_wrap: bool = False
def __post_init__(self, relative_dir: Optional[Path]) -> None:
"""Constructor."""
self.cells = self.notebook_node.get("cells", nbformat.from_dict([]))
self.relative_dir = (
pathlib.Path().resolve() if relative_dir is None else relative_dir
)
try:
self.language = self.notebook_node.metadata.kernelspec.language
except AttributeError:
self.language = "python"
@classmethod
def from_file(
cls,
file: Union[Path, IO[AnyStr], KeepOpenFileType[AnyStr]],
theme: str = "dark",
plain: Optional[bool] = None,
unicode: Optional[bool] = None,
hide_output: bool = False,
nerd_font: bool = False,
files: bool = True,
negative_space: bool = True,
hyperlinks: Optional[bool] = None,
hide_hyperlink_hints: bool = False,
images: Optional[bool] = None,
image_drawing: Optional[ImageDrawing] = None,
color: Optional[bool] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> "Notebook":
"""Create Notebook from notebook file."""
try:
notebook_node = nbformat.read(file, as_version=4)
except (
AttributeError,
UnicodeDecodeError, # Windows failures when reading invalid files
) as exception:
raise errors.InvalidNotebookError from exception
relative_dir = (
pathlib.Path.cwd()
if (file_name := file.name) == "<stdin>"
else pathlib.Path(file_name).parent
).resolve()
return cls(
notebook_node,
theme=theme,
plain=plain,
unicode=unicode,
hide_output=hide_output,
nerd_font=nerd_font,
files=files,
negative_space=negative_space,
hyperlinks=hyperlinks,
hide_hyperlink_hints=hide_hyperlink_hints,
images=images,
image_drawing=image_drawing,
color=color,
relative_dir=relative_dir,
line_numbers=line_numbers,
code_wrap=code_wrap,
)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterator[Table]:
"""Render the Notebook to the terminal.
Args:
console (Console): The Rich Console object.
options (ConsoleOptions): The Rich Console options.
Yields:
Iterator[RenderResult]: The
"""
plain = pick_option(self.plain, detector=not options.is_terminal)
unicode = pick_option(
self.unicode, detector=not options.legacy_windows and not options.ascii_only
)
hyperlinks = pick_option(
self.hyperlinks, detector=not options.legacy_windows and options.is_terminal
)
color = pick_option(self.color, detector=options.is_terminal)
images = pick_option(self.images, detector=options.is_terminal and color)
image_drawing = _pick_image_drawing(
self.image_drawing, unicode=unicode, color=color
)
rendered_notebook = _render_notebook(
self.cells,
plain=plain,
unicode=unicode,
hyperlinks=hyperlinks,
theme=self.theme,
nerd_font=self.nerd_font,
files=self.files,
hide_hyperlink_hints=self.hide_hyperlink_hints,
hide_output=self.hide_output,
language=self.language,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=self.negative_space,
relative_dir=self.relative_dir,
line_numbers=self.line_numbers,
code_wrap=self.code_wrap,
)
yield rendered_notebook
|
paw-lu/nbpreview | tests/unit/test_notebook.py | """Test cases for render."""
import dataclasses
import io
import json
import os
import pathlib
import re
import sys
import textwrap
from pathlib import Path
from typing import (
Any,
Callable,
ContextManager,
Dict,
Generator,
Optional,
Protocol,
Union,
)
from unittest.mock import Mock
import httpx
import nbformat
import pytest
from _pytest.config import _PluggyPlugin
from nbformat import NotebookNode
from pytest_mock import MockerFixture
from rich import console
from nbpreview import notebook
from nbpreview.component.content.output.result.drawing import ImageDrawing
SKIP_TERMINEDIA_REASON = (
"terminedia is used to draw the images using block characters, and"
" is not importable on some systems due to a dependency on fcntl."
)
class RichOutput(Protocol):
"""Typing protocol for _rich_notebook_output."""
def __call__(
self,
cell: Union[Dict[str, Any], None],
plain: bool = False,
theme: str = "material",
no_wrap: bool = False,
unicode: Optional[bool] = None,
hide_output: bool = False,
nerd_font: bool = False,
files: bool = True,
negative_space: bool = True,
hyperlinks: bool = True,
hide_hyperlink_hints: bool = False,
images: Optional[bool] = None,
image_drawing: Optional[ImageDrawing] = None,
color: Optional[bool] = None,
relative_dir: Optional[Path] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> str: # pragma: no cover
"""Callable types."""
...
@pytest.fixture
def adjust_for_fallback() -> Callable[[str, int], str]:
"""Fixture to automatically adjust expected outputs for fallback."""
def _adjust_for_fallback(rendered_output: str, newlines: int) -> str:
"""Add fallback text to end of output if import succeeds."""
fallback_text = newlines * f"{' ':>80}\n" + (
" \x1b[38;2;187;134"
";252mImage "
" \x1b"
"[0m\n"
)
adjusted_output = rendered_output + fallback_text
return adjusted_output
return _adjust_for_fallback
@dataclasses.dataclass
class LinkFilePathNotFoundError(Exception):
"""No hyperlink filepath found in output."""
def __post_init__(
self,
) -> None: # pragma: no cover
"""Constructor."""
super().__init__("No hyperlink filepath found in output")
@pytest.fixture
def parse_link_filepath() -> Callable[[str], Path]:
"""Return a helper function for parsing filepaths from links."""
def _parse_link_filepath(output: str) -> Path:
"""Extract the filepaths of hyperlinks in outputs."""
path_re = re.compile(r"(?:file://)(.+)(?:\x1b\\\x1b)")
link_filepath_match = re.search(path_re, output)
if link_filepath_match is not None:
link_filepath = link_filepath_match.group(1)
return pathlib.Path(link_filepath)
else: # pragma: no cover
raise LinkFilePathNotFoundError()
return _parse_link_filepath
@pytest.fixture
def rich_notebook_output(
rich_console: Callable[[Any, Union[bool, None]], str],
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
) -> RichOutput:
"""Fixture returning a function that returns the rendered output.
Args:
rich_console (Callable[[Any, Union[bool, None]], str]): Pytest
fixture that returns a rich console.
make_notebook (Callable[[Optional[Dict[str, Any]]], NotebookNode]):
A fixture that creates a notebook node.
Returns:
RichOutput: The output generating function.
"""
def _rich_notebook_output(
cell: Union[Dict[str, Any], None],
plain: Optional[bool] = None,
theme: str = "material",
no_wrap: Optional[bool] = None,
unicode: Optional[bool] = None,
hide_output: bool = False,
nerd_font: bool = False,
files: bool = True,
negative_space: bool = True,
hyperlinks: bool = True,
hide_hyperlink_hints: bool = False,
images: Optional[bool] = None,
image_drawing: Optional[Union[ImageDrawing, None]] = None,
color: Optional[bool] = None,
relative_dir: Optional[Path] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> str:
"""Render the notebook containing the cell."""
notebook_node = make_notebook(cell)
rendered_notebook = notebook.Notebook(
notebook_node,
theme=theme,
plain=plain,
unicode=unicode,
hide_output=hide_output,
nerd_font=nerd_font,
files=files,
hyperlinks=hyperlinks,
hide_hyperlink_hints=hide_hyperlink_hints,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
relative_dir=relative_dir,
line_numbers=line_numbers,
code_wrap=code_wrap,
)
output = rich_console(rendered_notebook, no_wrap)
return output
return _rich_notebook_output
def test_automatic_plain(
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode]
) -> None:
"""It automatically renders in plain format when not a terminal."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
output_file = io.StringIO()
con = console.Console(
file=output_file,
width=80,
color_system="truecolor",
legacy_windows=False,
force_terminal=False,
)
notebook_node = make_notebook(code_cell)
rendered_notebook = notebook.Notebook(notebook_node, theme="material")
con.print(rendered_notebook)
output = output_file.getvalue()
expected_output = (
"\x1b[38;2;137;221;255;49m%%\x1b[0m\x1b[38;2;187;1"
"28;179;49mbash\x1b[0m "
" "
" \n\x1b[38;2;130;170;255;49mecho\x1b"
"[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;195"
";232;141;49m'lorep'\x1b[0m "
" "
" \n"
)
assert output == expected_output
def test_notebook_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n \x1b[1;38;5;37m### \x1b[0m\x1b[1;38;5;37mLorep"
" ipsum\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n "
" "
" \n \x1b[1mdolor\x1b[0m \x1b[3msit\x1b[0m \x1b"
"[97;40mamet\x1b[0m "
" \n"
)
assert output == expected_output
def test_notebook_latex_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with latex equations."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\nLorep ipsum doret $\\gamma$ su\n"
"\n\n$$\ny = \\alpha + \\beta x\n$$\n\nsu ro\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n \x1b[1;38;5;37m### \x1b[0m\x1b[1;38;5;37mLorep"
" ipsum\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n "
" "
" \n Lorep ipsum doret $\\gamma$ "
"su "
" \n "
" "
" \n y = α+ βx "
" "
" \n "
" "
" \n su ro "
" "
" \n"
)
assert output == expected_output
def test_notebook_latex_and_table_markdown_cell(
rich_notebook_output: RichOutput,
) -> None:
"""It renders a markdown cell with latex equations and tables."""
source = textwrap.dedent(
"""\
# Lorep ipsum
Hey
| a | b | c |
| --- | --- | --- |
| 1 | 2 | 3 |
$$
X \\sim \\mathcal{N}(\\mu,\\,\\sigma^{2})\
$$
Hear
| a | b | c |
| --- | --- | --- |
| 1 | 2 | 3 |
Ehse
$$
rmse = \\sqrt{(\frac{1}{n})\\sum_{i=1}^{n}(y_{i} - x_{i})^{2}}
$$
Fin
"""
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": source,
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mLorep ipsum\x1b[0m\x1b[1;38;5;231;48;"
"5;57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" Hey "
" \n"
" "
" "
"\n \x1b[1ma\x1b[0m \x1b["
"1mb\x1b[0m \x1b[1mc\x1b["
"0m \n ────────────"
"────────────────────────────────────────"
"──────────────────────────\n 1 "
" 2 "
" 3 \n "
" "
" \n "
" "
" \n X ∼𝒩(μ, "
"σ^2) "
" \n "
" "
" \n Hear "
" "
" \n "
" "
" \n \x1b[1"
"ma\x1b[0m \x1b[1mb\x1b[0m"
" \x1b[1mc\x1b[0m "
" \n ───────────────────"
"────────────────────────────────────────"
"───────────────────\n 1 "
" 2 3 "
" \n "
" "
" \n Ehse "
" "
" \n "
" "
" \n rmse = √(( rac"
"1n)∑_i=1^n(y_i - x_i)^2) "
" \n "
" "
" \n Fin "
" "
" \n"
)
assert output == expected_output
def test_image_link_markdown_cell_request_error(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
) -> None:
"""It falls back to rendering a message if RequestError occurs."""
mock = mocker.patch("httpx.get", side_effect=httpx.RequestError("Mock"))
mock.return_value.content = (
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
).read_bytes()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille")
expected_output = (
" \x1b]8;id=724062;https://github.com/paw-l"
"u/nbpreview/tests/assets/outline_article_white_48dp.png"
"\x1b\\\x1b[94m🌐 Click "
"to view Azores\x1b[0m\x1b]8;;\x1b\\ "
" "
"\n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_link_markdown_cell(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a markdown cell with an image."""
mock = mocker.patch("httpx.get")
mock.return_value.content = (
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
).read_bytes()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="character")
assert remove_link_ids(output) == expected_output
def test_image_markdown_cell(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a markdown cell with an image."""
image_path = os.fsdecode(
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille")
assert remove_link_ids(output) == expected_output
def test_image_markdown_cell_no_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a markdown cell with an image and skips drawing."""
image_path = os.fsdecode(
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille", images=False)
expected_output = (
f" \x1b]8;id=378979;file://{image_path}\x1b\\\x1b[94m"
"🖼 Click to view Azores\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_code_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with code."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "```python\nfor i in range(20):\n print(i)\n```",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;1"
"87;128;179;49mfor\x1b[0m\x1b[38;2;238;255;255;"
"49m \x1b[0m\x1b[38;2;238;255;255;49mi\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[3;38;2;137;221;2"
"55;49min\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mrange\x1b[0m\x1b[38;2;137"
";221;255;49m(\x1b[0m\x1b[38;2;247;140;108;49m2"
"0\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m\x1b[38;2;1"
"37;221;255;49m:\x1b[0m "
" \n \x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;13"
"0;170;255;49mprint\x1b[0m\x1b[38;2;137;221;255"
";49m(\x1b[0m\x1b[38;2;238;255;255;49mi\x1b[0m\x1b[38"
";2;137;221;255;49m)\x1b[0m "
" "
" \n"
)
assert output == expected_output
def test_table_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with tables."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": """# Hey buddy
*did you hear the news?*
```python
for i in range(20):
print(i)
```
| aaa | bbbb **ccc** |
| --- | --- |
| 111 **222** 333 | 222 |
| susu | lulu|
- so there you are
- words
| ddd | `eeee` fff |
| --- | --- |
| | |
--- | ---
sus | *spect*
rak
""",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mHey buddy\x1b[0m\x1b[1;38;5;231;48;5;"
"57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" \x1b[3mdid you hear the news?\x1b[0m "
" "
" \n "
" "
" \n \x1b[38;2;238;255;255;49m \x1b[0"
"m\x1b[38;2;187;128;179;49mfor\x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;238;255;255;49mi"
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[3;38;2;"
"137;221;255;49min\x1b[0m\x1b[38;2;238;255;255;"
"49m \x1b[0m\x1b[38;2;130;170;255;49mrange\x1b[0m\x1b"
"[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;247;140"
";108;49m20\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0"
"m\x1b[38;2;137;221;255;49m:\x1b[0m "
" "
" \n \x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mprint\x1b[0m\x1b[38;2;137"
";221;255;49m(\x1b[0m\x1b[38;2;238;255;255;49mi"
"\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m "
" "
" \n "
" "
" \n \x1b[1maaa\x1b[0m "
" \x1b[1mbbbb \x1b[0m"
"\x1b[1mccc\x1b[0m "
"\n ─────────────────────────────────────"
"────────────────────────────────────────"
"─\n 111 \x1b[1m222\x1b[0m 333 "
" 222 "
" \n susu "
" lulu "
" \n "
" "
" \n "
" "
" \n • so there you are "
" "
" \n • words "
" "
" \n "
" "
" \n \x1b[1mddd\x1b[0m "
" \x1b[1;97;40mee"
"ee\x1b[0m\x1b[1m fff\x1b[0m "
" \n ──────────────────────────────"
"────────────────────────────────────────"
"────────\n "
" "
" \n "
" "
" \n "
" "
" \n "
" "
" \n ─────────────────────────"
"────────────────────────────────────────"
"─────────────\n sus "
" \x1b[3mspect\x1b[0m "
" \n "
" "
" \n rak "
" "
" \n"
)
assert output == expected_output
def test_heading_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with headings."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "# Heading 1\n## Heading 2\n### Heading 3\n#### Heading 4\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mHeading 1\x1b[0m\x1b[1;38;5;231;48;5;"
"57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" "
" \n"
" \x1b[1;38;5;37m## \x1b[0m\x1b[1;38;5;37mHeading"
" 2\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n \x1b[2;38;5;37m─────────────────"
"────────────────────────────────────────"
"─────────────────────\x1b[0m\n "
" "
" \n "
" "
" \n \x1b[1;38;5;3"
"7m### \x1b[0m\x1b[1;38;5;37mHeading 3\x1b[0m\x1b[1;3"
"8;5;37m "
" \x1b[0m\n "
" "
" \n "
"\x1b[1;38;5;37m#### \x1b[0m\x1b[1;38;5;37mHeading"
" 4\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n"
)
assert output == expected_output
def test_wide_heading_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It reduced the padding if the heading is long."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "# " + "A" * 80,
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57mAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAA…\x1b[0m\n \x1b[2;38;5;57m"
"────────────────────────────────────────"
"──────────────────────────────────────\x1b["
"0m\n"
)
assert output == expected_output
def test_ruler_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with a ruler."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "Section 1\n\n---\n\nsection 2\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" Section 1 "
" "
"\n "
" "
" \n ────────────────────────────────────"
"────────────────────────────────────────"
"──\n section 2 "
" "
" \n"
)
assert output == expected_output
def test_bullet_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with bullets."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "- Item 1\n- Item 2\n - Item 3\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n • Item 1 "
" "
" \n • Item 2 "
" "
" \n • Item 3 "
" "
" \n"
)
assert output == expected_output
def test_number_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with numbers."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "1. Item 1\n2. Item 2\n3. Item 3\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n 1. Item 1 "
" "
" \n 2. Item 2 "
" "
" \n 3. Item 3 "
" "
" \n"
)
assert output == expected_output
def test_image_file_link_not_image_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when file is not an image."""
bad_path = pathlib.Path(__file__).parent / pathlib.Path("assets", "bad_image.xyz")
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "![This is a weird file extension]" f"({bad_path})",
}
output = rich_notebook_output(markdown_cell, images=True)
expected_output = (
f" \x1b]8;id=228254;file://{bad_path}\x1b\\\x1b[94m🖼 Click to "
"view This is a weird file extension\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_file_link_bad_extension_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when extension is unknown."""
bad_extension_path = __file__
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, images=True)
expected_output = (
f" \x1b]8;id=467471;file://{bad_extension_path}\x1b\\\x1b"
"[94m🖼 Click"
" to view This isn't even a image\x1b[0m\x1b]8;;\x1b\\"
" "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_file_link_not_exist_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when the file does not exist."""
project_dir = pathlib.Path().resolve()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b]8;"
f"id=179352;file://{project_dir / 'i_do_not_exists.xyz'}"
"\x1b\\\x1b[94m🖼 Click to view This image does not "
"exist\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_notebook_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;187;128;17"
"9;49mdef\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfoo\x1b[0m\x1b[38;2;137;2"
"21;255;49m(\x1b[0m\x1b[38;2;238;255;255;49mx\x1b["
"0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfl"
"oat\x1b[0m\x1b[38;2;137;221;255;49m,\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;238;255;255;"
"49my\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[38;2;130;170;255"
";49mfloat\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;137;22"
"1;255;49m-\x1b[0m\x1b[38;2;137;221;255;49m>\x1b[0"
"m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;1"
"70;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49"
"m:\x1b[0m "
" │\n │ \x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;187;128;179;49mreturn\x1b[0m\x1b[38;2;2"
"38;255;255;49m \x1b[0m\x1b[38;2;238;255;255;49"
"mx\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"137;221;255;49m+\x1b[0m\x1b[38;2;238;255;255;4"
"9m \x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m "
" "
" │\n ╰──────────────────────"
"────────────────────────────────────────"
"───────────╯\n"
)
assert output == expected_output
def test_notebook_magic_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell in a language specified by cell magic."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%\x1b[0m\x1b[38;2;187;128;179;49mbash\x1b[0"
"m "
" │\n │ \x1b[38"
";2;130;170;255;49mecho\x1b[0m\x1b[38;2;238;255"
";255;49m \x1b[0m\x1b[38;2;195;232;141;49m'lore"
"p'\x1b[0m "
" │\n ╰──────"
"────────────────────────────────────────"
"───────────────────────────╯\n"
)
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_raw_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a raw cell as plain text."""
code_cell = {
"cell_type": "raw",
"id": "emotional-amount",
"metadata": {},
"source": "Lorep ipsum",
}
expected_output = " ╭─────────────╮\n │ Lorep ipsum │\n ╰─────────────╯\n"
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_non_syntax_magic_code_cell(rich_notebook_output: RichOutput) -> None:
"""It uses the default highlighting when magic is not a syntax."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%timeit\ndef foo(x: float, y: float) -> float:\n return x + y",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%time\x1b[0m\x1b[38;2;238;255;255;49mit\x1b"
"[0m "
" │\n │ \x1b[38"
";2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0"
"m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;238;2"
"55;255;49mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b["
"0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;"
"170;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;4"
"9m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2"
";238;255;255;49my\x1b[0m\x1b[38;2;137;221;255;"
"49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;"
"2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221"
";255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;137;22"
"1;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0"
"m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;1"
"37;221;255;49m:\x1b[0m "
" │\n │ \x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;187;128;179;49mretur"
"n\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;2"
"38;255;255;49mx\x1b[0m\x1b[38;2;238;255;255;49"
"m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[38;2;"
"238;255;255;49m \x1b[0m\x1b[38;2;238;255;255;4"
"9my\x1b[0m "
" │\n ╰─────────"
"────────────────────────────────────────"
"────────────────────────╯\n"
)
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_plain_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell with plain formatting."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell, plain=True)
expected_output = (
"\x1b[38;2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfo"
"o\x1b[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;2"
"38;255;255;49mx\x1b[0m\x1b[38;2;137;221;255;49"
"m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;238;255;255;49my\x1b[0m\x1b[38;2;137;221;"
"255;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137"
";221;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;13"
"7;221;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m"
" \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38"
";2;137;221;255;49m:\x1b[0m "
" \n\x1b[38;2;238;25"
"5;255;49m \x1b[0m\x1b[38;2;187;128;179;49mr"
"eturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38"
";2;238;255;255;49mx\x1b[0m\x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49my\x1b[0m "
" \n"
)
assert output == expected_output
def test_render_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th></th>"
"\n <th>lorep</th>\n <th colspan="
'"2" halign="left">hey</th>\n <th>bye'
"</th>\n </tr>\n <tr>\n <th></th>"
"\n <th></th>\n <th>ipsum</th>\n "
" <th>hi</th>\n <th>very_long_word"
"</th>\n <th>hi</th>\n </tr>\n <t"
"r>\n <th>first</th>\n <th>second"
"</th>\n <th>third</th>\n <th></t"
"h>\n <th></th>\n <th></th>\n <"
"/tr>\n </thead>\n <tbody>\n <tr>\n "
' <th rowspan="3" valign="top">bar</th>\n '
' <th rowspan="2" valign="top">one</t'
"h>\n <th>1</th>\n <td>1</td>\n "
" <td>2</td>\n <td>4</td>\n </tr>"
"\n <tr>\n <th>10</th>\n <td>3<"
"/td>\n <td>4</td>\n <td>-1</td>\n"
" </tr>\n <tr>\n <th>three</th>\n"
" <th>3</th>\n <td>3</td>\n "
"<td>4</td>\n <td>-1</td>\n </tr>\n "
" <tr>\n <th>foo</th>\n <th>one"
"</th>\n <th>1</th>\n <td>3</td>\n"
" <td>4</td>\n <td>-1</td>\n <"
"/tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
"\x1b]8;id=1627258210.84976-39532;"
f"file://{tempfile_path}0.html\x1b\\\x1b[94"
"m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m \x1b[1m \x1b["
"0m \x1b[1m \x1b[0m \x1b[1mlorep\x1b[0m "
" \x1b[1m hey\x1b[0m \x1b[1mbye\x1b[0m "
" \n \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1mipsum\x1b[0m \x1b"
"[1mhi\x1b[0m \x1b[1mvery_long_word\x1b[0m \x1b[1"
"m hi\x1b[0m \n \x1b"
"[1mfirst\x1b[0m \x1b[1msecond\x1b[0m \x1b[1mthir"
"d\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m "
"\n ─────────────────────────────────"
"─────────────────── "
" \n \x1b[1m bar\x1b[0m \x1b[1m one\x1b[0m "
" \x1b[1m 1\x1b[0m 1 2 "
" 4 \n "
" \x1b[1m 10\x1b[0m 3 "
" 4 -1 \n "
" \x1b[1m three\x1b[0m \x1b[1m 3\x1b[0"
"m 3 4 -1 "
" \n \x1b[1m foo\x1b[0m \x1b[1m"
" one\x1b[0m \x1b[1m 1\x1b[0m 3 "
" 4 -1 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_only_header_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with only headers."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\\n<style scoped>\\n .dataframe tb"
"ody tr th:only-of-type {\\n vertic"
"al-align: middle;\\n }\\n\\n .datafra"
"me tbody tr th {\\n vertical-align"
": top;\\n }\\n\\n .dataframe thead tr"
" th {\\n text-align: left;\\n }\\"
"n\\n .dataframe thead tr:last-of-type "
"th {\\n text-align: right;\\n }\\"
'n</style>\\n<table border="1" class="data'
'frame">\\n <thead>\\n <tr>\\n <th>'
'Model:</th>\\n <th colspan="2" halig'
'n="left">Decision Tree</th>\\n <th c'
'olspan="2" halign="left">Regression</th>'
'\\n <th colspan="2" halign="left">Ra'
"ndom</th>\\n </tr>\\n <tr>\\n <t"
"h>Predicted:</th>\\n <th>Tumour</th>"
"\\n <th>Non-Tumour</th>\\n <th>T"
"umour</th>\\n <th>Non-Tumour</th>\\n "
" <th>Tumour</th>\\n <th>Non-Tumo"
"ur</th>\\n </tr>\\n <tr>\\n <th>"
"Actual Label:</th>\\n <th></th>\\n "
" <th></th>\\n <th></th>\\n <th"
"></th>\\n <th></th>\\n <th></th>"
"\\n </tr>\\n </thead>\\n <tbody>\\n </"
"tbody>\\n</table>\\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=360825;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1m Decision\x1b[0m \x1b[1mRegre"
"ssi…\x1b[0m \x1b[1m Random\x1b[0m \n "
" \x1b[1m Tree"
"\x1b[0m "
" \n \x1b[1mPredicte…\x1b[0m \x1b[1mT"
"umour\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumo"
"ur\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumour\x1b"
"[0m \x1b[1mNon-Tumo…\x1b[0m \n \x1b[1m A"
"ctual\x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m"
" \n \x1b[1m Label:\x1b[0m "
" "
" \n ───────────────────────"
"────────────────────────────────────────"
"───────────\n "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_mistagged_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It doesn't detect a DataFrame when it is not a table."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<not-a-table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th>Model:</th>\n <th"
' colspan="2" halign="left">Decision Tree'
'</th>\n <th colspan="2" halign="left'
'">Regression</th>\n <th colspan="2" '
'halign="left">Random</th>\n </tr>\n '
"<tr>\n <th>Predicted:</th>\n <th"
">Tumour</th>\n <th>Non-Tumour</th>\n "
" <th>Tumour</th>\n <th>Non-Tumou"
"r</th>\n <th>Tumour</th>\n <th>N"
"on-Tumour</th>\n </tr>\n <tr>\n "
"<th>Actual Label:</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n <th></th>\n "
" </tr>\n </thead>\n <tbody>\n <tr>\n "
" <th>Tumour (Positive)</th>\n <t"
"d>38.0</td>\n <td>2.0</td>\n <td"
">18.0</td>\n <td>22.0</td>\n <td"
">21</td>\n <td>NaN</td>\n </tr>\n "
" <tr>\n <th>Non-Tumour (Negative)</"
"th>\n <td>19.0</td>\n <td>439.0<"
"/td>\n <td>6.0</td>\n <td>452.0<"
"/td>\n <td>226</td>\n <td>232.0<"
"/td>\n </tr>\n </tbody>\n</not-a-table>\n</div"
">"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=968899;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m Model: | Decision Tree | Reg"
"ression | Random "
" \n Predicted: | Tumour | Non-T"
"umour | Tumour | Non-Tumour | Tumour | "
" \n Non-Tumour "
" "
" \n Actual Label: | | | | "
" | | "
" \n Tumour (Positive) | 38.0"
" | 2.0 | 18.0 | 22.0 | 21 | NaN "
" \n Non-Tumour (Negative) |"
" 19.0 | 439.0 | 6.0 | 452.0 | 226 | 232."
"0 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_multiindex_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a multiindex DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th>Model:</th>\n <th"
' colspan="2" halign="left">Decision Tree'
'</th>\n <th colspan="2" halign="left'
'">Regression</th>\n <th colspan="2" '
'halign="left">Random</th>\n </tr>\n '
"<tr>\n <th>Predicted:</th>\n <th"
">Tumour</th>\n <th>Non-Tumour</th>\n "
" <th>Tumour</th>\n <th>Non-Tumou"
"r</th>\n <th>Tumour</th>\n <th>N"
"on-Tumour</th>\n </tr>\n <tr>\n "
"<th>Actual Label:</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n <th></th>\n "
" </tr>\n </thead>\n <tbody>\n <tr>\n "
" <th>Tumour (Positive)</th>\n <t"
"d>38.0</td>\n <td>2.0</td>\n <td"
">18.0</td>\n <td>22.0</td>\n <td"
">21</td>\n <td>NaN</td>\n </tr>\n "
" <tr>\n <th>Non-Tumour (Negative)</"
"th>\n <td>19.0</td>\n <td>439.0<"
"/td>\n <td>6.0</td>\n <td>452.0<"
"/td>\n <td>226</td>\n <td>232.0<"
"/td>\n </tr>\n </tbody>\n</table>\n</div"
">"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=888128;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1m Decision\x1b[0m \x1b[1mRegre"
"ssi…\x1b[0m \x1b[1m Random\x1b[0m \n "
" \x1b[1m Tree"
"\x1b[0m "
" \n \x1b[1mPredicte…\x1b[0m \x1b[1mT"
"umour\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumo"
"ur\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumour\x1b"
"[0m \x1b[1mNon-Tumo…\x1b[0m \n \x1b[1m A"
"ctual\x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m"
" \n \x1b[1m Label:\x1b[0m "
" "
" \n ───────────────────────"
"────────────────────────────────────────"
"───────────\n \x1b[1m Tumour\x1b[0m "
" 38.0 2.0 18.0 22.0 "
" 21 NaN \n \x1b[1m(Positiv"
"…\x1b[0m "
" \n \x1b[1"
"mNon-Tumo…\x1b[0m 19.0 439.0 "
" 6.0 452.0 226 232.0 \n "
" \x1b[1m(Negativ…\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_styled_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a styled DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
'<style type="text/css">\n#T_7cafb_ td:hov'
"er {\n background-color: #ffffb3;\n}\n#T_7"
"cafb_ .index_name {\n font-style: italic"
";\n color: darkgrey;\n font-weight: norm"
"al;\n}\n#T_7cafb_ th:not(.index_name) {\n "
"background-color: #000066;\n color: whit"
"e;\n}\n#T_7cafb_ .true {\n background-colo"
"r: #e6ffe6;\n}\n#T_7cafb_ .false {\n backg"
"round-color: #ffe6e6;\n}\n</style>\n<table "
'id="T_7cafb_">\n <thead>\n <tr>\n '
'<th class="index_name level0" >Model:</t'
'h>\n <th class="col_heading level0 c'
'ol0" colspan="2">Decision Tree</th>\n '
' <th class="col_heading level0 col2" co'
'lspan="2">Regression</th>\n </tr>\n '
'<tr>\n <th class="index_name level1"'
' >Predicted:</th>\n <th class="col_h'
'eading level1 col0" >Tumour</th>\n <'
'th class="col_heading level1 col1" >Non-'
'Tumour</th>\n <th class="col_heading'
' level1 col2" >Tumour</th>\n <th cla'
'ss="col_heading level1 col3" >Non-Tumour'
"</th>\n </tr>\n <tr>\n <th class"
'="index_name level0" >Actual Label:</th>'
'\n <th class="blank col0" > </t'
'h>\n <th class="blank col1" > <'
'/th>\n <th class="blank col2" > '
';</th>\n <th class="blank col3" >&nb'
"sp;</th>\n </tr>\n </thead>\n <tbody>\n"
' <tr>\n <th id="T_7cafb_level0_ro'
'w0" class="row_heading level0 row0" >Tum'
'our (Positive)</th>\n <td id="T_7caf'
'b_row0_col0" class="data row0 col0 true '
'" >38</td>\n <td id="T_7cafb_row0_co'
'l1" class="data row0 col1 false " >2</td'
'>\n <td id="T_7cafb_row0_col2" class'
'="data row0 col2 true " >18</td>\n <'
'td id="T_7cafb_row0_col3" class="data ro'
'w0 col3 false " >22</td>\n </tr>\n <'
'tr>\n <th id="T_7cafb_level0_row1" c'
'lass="row_heading level0 row1" >Non-Tumo'
'ur (Negative)</th>\n <td id="T_7cafb'
'_row1_col0" class="data row1 col0 false '
'" >19</td>\n <td id="T_7cafb_row1_co'
'l1" class="data row1 col1 true " >439</t'
'd>\n <td id="T_7cafb_row1_col2" clas'
's="data row1 col2 false " >6</td>\n '
'<td id="T_7cafb_row1_col3" class="data r'
'ow1 col3 true " >452</td>\n </tr>\n </'
"tbody>\n</table>\n"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=698065;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b["
"0m \x1b[1mDecision Tree\x1b[0m "
" \x1b[1mRegression\x1b[0m \n \x1b["
"1m Predicted:\x1b[0m \x1b[1mTumour"
"\x1b[0m \x1b[1m Non-Tumour\x1b[0m \x1b[1mTumou"
"r\x1b[0m \x1b[1mNon-Tumour\x1b[0m \n \x1b"
"[1m Actual Label:\x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \n ─"
"────────────────────────────────────────"
"───────────────────────────── \n "
" \x1b[1m Tumour (Positive)\x1b[0m 38 "
" 2 18 22 "
" \n \x1b[1mNon-Tumour (Negative)\x1b[0m "
" 19 439 6 "
" 452 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_column_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with a missing column index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th>lorep"
"</th>\n <th>hey</th>\n <th>sup</"
"th>\n <th>bye</th>\n </tr>\n <tr"
">\n <th>hey</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th><"
"/th>\n </tr>\n </thead>\n <tbody>\n "
"<tr>\n <th>3</th>\n <th>1</th>\n "
" <td>1</td>\n <td>4</td>\n <"
"td>6</td>\n </tr>\n <tr>\n <th>4"
"</th>\n <th>1</th>\n <td>2</td>\n"
" <td>5</td>\n <td>7</td>\n </"
"tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=337911;file://{tempfile_path}0.html\x1b\\\x1b[94m🌐 "
"Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1mlorep\x1b[0m"
" \x1b[1mhey\x1b[0m \x1b[1msup\x1b[0m \x1b[1mbye\x1b["
"0m "
" \n \x1b[1mhey\x1b[0m \x1b[1m \x1b[0"
"m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m "
" \n ──────────────────────────"
"───── "
" \n \x1b[1m 3\x1b[0m \x1b[1m 1\x1b"
"[0m 1 4 6 "
" \n \x1b[1m 4"
"\x1b[0m \x1b[1m 1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_index_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with a missing index index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead th {\n "
" text-align: right;\n }\n</style>\n<tabl"
'e border="1" class="dataframe">\n <thead'
'>\n <tr style="text-align: right;">\n '
" <th></th>\n <th></th>\n <th>"
"a</th>\n <th>b</th>\n <th>c</th>"
"\n </tr>\n <tr>\n <th></th>\n "
" <th>hey</th>\n <th></th>\n <th"
"></th>\n <th></th>\n </tr>\n </the"
"ad>\n <tbody>\n <tr>\n <th>3</th>\n"
" <th>1</th>\n <td>1</td>\n "
"<td>4</td>\n <td>6</td>\n </tr>\n "
" <tr>\n <th>4</th>\n <th>1</th>"
"\n <td>2</td>\n <td>5</td>\n "
" <td>7</td>\n </tr>\n </tbody>\n</table"
">\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=308498;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b"
"[1ma\x1b[0m \x1b[1mb\x1b[0m \x1b[1mc\x1b[0m "
" "
" \n \x1b[1m \x1b[0m \x1b[1mhey\x1b[0m "
"\x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m "
" "
" \n ───────────────────── "
" "
" \n \x1b[1m3\x1b[0m \x1b[1m 1\x1b[0m "
" 1 4 6 "
" \n \x1b[1m4\x1b["
"0m \x1b[1m 1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_last_index_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing lasst index index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead th {\n "
" text-align: right;\n }\n</style>\n<tabl"
'e border="1" class="dataframe">\n <thead'
'>\n <tr style="text-align: right;">\n '
" <th></th>\n <th></th>\n <th>"
"a</th>\n <th>b</th>\n <th>c</th>"
"\n </tr>\n <tr>\n <th>hey</th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n </tr>\n </the"
"ad>\n <tbody>\n <tr>\n <th>3</th>\n"
" <th>1</th>\n <td>1</td>\n "
"<td>4</td>\n <td>6</td>\n </tr>\n "
" <tr>\n <th>4</th>\n <th>1</th>"
"\n <td>2</td>\n <td>5</td>\n "
" <td>7</td>\n </tr>\n </tbody>\n</table"
">\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=59302;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view "
"HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;247"
"m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b["
"1ma\x1b[0m \x1b[1mb\x1b[0m \x1b[1mc\x1b[0m "
" "
" \n \x1b[1mhey\x1b[0m \x1b[1m \x1b[0m \x1b"
"[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m "
" "
" \n ───────────────────── "
" "
" \n \x1b[1m 3\x1b[0m \x1b[1m1\x1b[0m "
" 1 4 6 "
" \n \x1b[1m 4\x1b"
"[0m \x1b[1m1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_plain_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame in a plain style."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th></th>"
"\n <th>lorep</th>\n <th colspan="
'"2" halign="left">hey</th>\n <th>bye'
"</th>\n </tr>\n <tr>\n <th></th>"
"\n <th></th>\n <th>ipsum</th>\n "
" <th>hi</th>\n <th>very_long_word"
"</th>\n <th>hi</th>\n </tr>\n <t"
"r>\n <th>first</th>\n <th>second"
"</th>\n <th>third</th>\n <th></t"
"h>\n <th></th>\n <th></th>\n <"
"/tr>\n </thead>\n <tbody>\n <tr>\n "
' <th rowspan="3" valign="top">bar</th>\n '
' <th rowspan="2" valign="top">one</t'
"h>\n <th>1</th>\n <td>1</td>\n "
" <td>2</td>\n <td>4</td>\n </tr>"
"\n <tr>\n <th>10</th>\n <td>3<"
"/td>\n <td>4</td>\n <td>-1</td>\n"
" </tr>\n <tr>\n <th>three</th>\n"
" <th>3</th>\n <td>3</td>\n "
"<td>4</td>\n <td>-1</td>\n </tr>\n "
" <tr>\n <th>foo</th>\n <th>one"
"</th>\n <th>1</th>\n <td>3</td>\n"
" <td>4</td>\n <td>-1</td>\n <"
"/tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" "
" "
"\n "
" "
" \n\x1b]8;id=1627258290.675266-113809;file:/"
f"/{tempfile_path}0.html\x1b\\"
"\x1b[94m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \nlorep hey "
" bye "
" \nipsum hi"
" very_long_word hi "
" \nfirst second third "
" "
" \nbar one 1 "
" 1 2 4 "
" \n 10 "
" 3 4 -1 "
" \n three 3 "
" 3 4 -1 "
" \nfoo one 1 "
" 3 4 -1 "
" \n"
)
output = rich_notebook_output(code_cell, plain=True)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_columns_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing columns."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
\n
<tr>
\n
<th class="index_name level0">Model:</th>
\n
<th class="col_heading level0 col0" colspan="2">Decision Tree</th>
\n
<th class="col_heading level0 col2" colspan="2">Regression</th>
\n
</tr>
\n
<tr>
\n
<th class="col_heading level1 col0">Tumour</th>
\n
<th class="col_heading level1 col1">Non-Tumour</th>
\n
<th class="col_heading level1 col2">Tumour</th>
\n
<th class="col_heading level1 col3">Non-Tumour</th>
\n
</tr>
\n
<tr>
\n
<th class="index_name level0">Actual Label:</th>
\n
<th class="blank col0"> </th>
\n
<th class="blank col1"> </th>
\n
<th class="blank col2"> </th>
\n
<th class="blank col3"> </th>
\n
</tr>
\n
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col0" class="data row0 col0">38</td>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=635975;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1mDecision Tree\x1b[0m "
" \x1b[1mRegression\x1b[0m \n \x1b["
"1m Tumour\x1b[0m \x1b[1mNon-Tumour"
"\x1b[0m \x1b[1m Tumour\x1b[0m \x1b[1mNon-T"
"umour\x1b[0m \n \x1b[1m A"
"ctual Label:\x1b[0m \x1b[1m \x1b[0m "
"\x1b[1m \x1b[0m \x1b[1m \x1b["
"0m \x1b[1m \x1b[0m \n ─────────"
"────────────────────────────────────────"
"─────────────────────────\n \x1b[1mTum"
"our (Positive)\x1b[0m 38 "
" 2 18 22 \n "
" \x1b[1m Non-Tumour\x1b[0m 19"
" 439 6 4"
"52 \n \x1b[1m (Negative)\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_no_columns_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing columns."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col0" class="data row0 col0">38</td>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=380451;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1mTumour (Positive) \x1b["
"0m 38 2 18 22 "
" \n \x1b[1mNon-Tumour (Ne"
"gative)\x1b[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_data_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with non square data."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=330589;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1mTumour (Positive) \x1b["
"0m 2 18 22 "
" \n \x1b[1mNon-Tumour (Ne"
"gative)\x1b[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_index_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with uneven index names."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=487619;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m 2 18 "
" 22 "
" \n \x1b[1mNon-Tumour (Negative)\x1b"
"[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_empty_html_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a blank output when given an empty table."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=316923;file://{tempfile_path}0.html"
"\x1b\\\x1b[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_stderr_stream(rich_notebook_output: RichOutput) -> None:
"""It renders the stderr stream."""
stderr_cell = {
"cell_type": "code",
"execution_count": 5,
"id": "impressed-canadian",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "<ipython-input-5-bc08279b5148>:2: UserWarning: Lorep\n"
' warnings.warn("Lorep")\n',
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[5]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b[48;5;174m "
" "
" \x1b[0m\n "
" \x1b[48;5;174m \x1b[0m\x1b[38;5;237;48;5;174m<ip"
"ython-input-5-bc08279b5148>:2: UserWarni"
"ng: Lorep \x1b[0m\x1b[48;5;"
"174m \x1b[0m\n \x1b[48;5;174m \x1b[0m\x1b[38;5;2"
'37;48;5;174m warnings.warn("Lorep") '
" "
" \x1b[0m\x1b[48;5;174m \x1b[0m\n \x1b[48;5;17"
"4m \x1b[0m\x1b[38;5;237;48;5;174m "
" "
" \x1b[0m\x1b[48;5;174m \x1b[0m\n"
)
output = rich_notebook_output(stderr_cell)
assert output == expected_output
def test_render_stream_stdout(rich_notebook_output: RichOutput) -> None:
"""It renders stdout."""
stdout_cell = {
"cell_type": "code",
"execution_count": 6,
"id": "underlying-merit",
"metadata": {},
"outputs": [{"name": "stdout", "output_type": "stream", "text": "Lorep\n"}],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[6]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Lorep "
" "
" \n"
)
output = rich_notebook_output(stdout_cell)
assert output == expected_output
def test_render_error_traceback(rich_notebook_output: RichOutput) -> None:
"""It renders the traceback from an error."""
traceback_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "brave-sheep",
"metadata": {},
"outputs": [
{
"ename": "ZeroDivisionError",
"evalue": "division by zero",
"output_type": "error",
"traceback": [
"\x1b[1;31m----------------------------------------"
"-----------------------------------\x1b[0m",
"\x1b[1;31mZeroDivisionError\x1b[0m "
" Traceback (most recent call last)",
"\x1b[1;32m<ipython-input-7-9e1622b385b6>\x1b[0m in"
" \x1b[0;36m<module>\x1b[1;34m\x1b[0m\n\x1b[1;32m--"
"--> 1\x1b[1;33m \x1b[1;36m1\x1b[0m\x1b[1;33m/\x1b["
"0m\x1b[1;36m0\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[0m\n\x1b[0m",
"\x1b[1;31mZeroDivisionError\x1b[0m: division by zero",
],
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b[1;31m--------"
"----------------------------------------"
"-------------------------…\x1b[0m\n \x1b[1"
";31mZeroDivisionError\x1b[0m "
" Traceback (most recent call "
" \n last) "
" "
" \n \x1b[1;32m<ipython-input-7-9e1622"
"b385b6>\x1b[0m in \x1b[36m<module>\x1b[0m "
" \n \x1b[1;32m--"
"--> 1\x1b[0m\x1b[1;33m \x1b[0m\x1b[1;36m1\x1b[0m\x1b[1;33m"
"/\x1b[0m\x1b[1;36m0\x1b[0m "
" "
"\n "
" "
" \n \x1b[1;31mZeroDivisionError\x1b[0m: di"
"vision by zero "
" \n"
)
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_error_traceback_no_hang(
rich_notebook_output: RichOutput, expected_output: str
) -> None:
"""It renders the traceback from an error without hanging."""
traceback_cell = {
"cell_type": "code",
"execution_count": 4,
"id": "allied-contrary",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "bash: line 1: ech: command not found\n",
},
{
"ename": "CalledProcessError",
"evalue": "Command 'b'ech\\n'' returned non-zero exit status 127.",
"output_type": "error",
"traceback": [
"\x1b[1;31m----------------------------------------"
"-----------------------------------\x1b[0m",
"\x1b[1;31mCalledProcessError\x1b[0m "
" Traceback (most recent call last)",
"\x1b[1;32m<ipython-input-4-4fb31ecfb364>\x1b[0m in"
" \x1b[0;36m<module>\x1b[1;34m\x1b[0m\n\x1b[1;32m--"
"--> 1\x1b[1;33m \x1b[0mget_ipython\x1b[0m\x1b[1;33"
"m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m.\x1b[0m\x1b["
"0mrun_cell_magic\x1b[0m\x1b[1;33m(\x1b[0m\x1b[1;34"
"m'bash'\x1b[0m\x1b[1;33m,\x1b[0m \x1b[1;34m''\x1b["
"0m\x1b[1;33m,\x1b[0m \x1b[1;34m'ech\\n'\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8/"
"site-packages/IPython/core/interactiveshell.py\x1b"
"[0m in \x1b[0;36mrun_cell_magic\x1b[1;34m(self, "
"magic_name, line, cell)\x1b[0m\n\x1b[0;32m 2389"
"\x1b[0m \x1b[1;32mwith\x1b[0m \x1b"
"[0mself\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mbuiltin_tra"
"p\x1b[0m\x1b[1;33m:\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 2390\x1b[0m "
" \x1b[0margs\x1b[0m \x1b[1;33m=\x1b[0m"
" \x1b[1;33m(\x1b[0m\x1b[0mmagic_arg_s\x1b[0m\x1b"
"[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b[1;33m)\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b"
"[1;32m-> 2391\x1b[1;33m \x1b"
"[0mresult\x1b[0m \x1b[1;33m=\x1b[0m \x1b[0mfn\x1b"
"[0m\x1b[1;33m(\x1b[0m\x1b[1;33m*\x1b[0m\x1b[0margs"
"\x1b[0m\x1b[1;33m,\x1b[0m \x1b[1;33m**\x1b[0m\x1b"
"[0mkwargs\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0m\x1b[0;32m "
"2392\x1b[0m \x1b[1;32mreturn\x1b[0m "
"\x1b[0mresult\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[0m\n\x1b[0;32m 2393\x1b[0m "
"\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8/"
"site-packages/IPython/core/magics/script.py\x1b[0m"
" in \x1b[0;36mnamed_script_magic\x1b[1;34m(line,"
" cell)\x1b[0m\n\x1b[0;32m 140\x1b[0m "
" \x1b[1;32melse\x1b[0m\x1b[1;33m:\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m"
" 141\x1b[0m \x1b[0mline\x1b[0m"
" \x1b[1;33m=\x1b[0m \x1b[0mscript\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[1;32m--> 142"
"\x1b[1;33m \x1b[1;32mreturn\x1b[0m"
" \x1b[0mself\x1b[0m\x1b[1;33m.\x1b[0m\x1b"
"[0mshebang\x1b[0m\x1b[1;33m(\x1b[0m\x1b[0mline\x1b"
"[0m\x1b[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m\x1b[0;32m 143\x1b[0m \x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 144\x1b[0m "
" \x1b[1;31m# write a basic docstring:\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[0m\n",
"\x1b[1;32m<decorator-gen-103>\x1b[0m in \x1b[0;36m"
"shebang\x1b[1;34m(self, line, cell)\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8"
"/site-packages/IPython/core/magic.py\x1b[0m in "
"\x1b[0;36m<lambda>\x1b[1;34m(f, *a, **k)\x1b[0m\n"
"\x1b[0;32m 185\x1b[0m \x1b[1;31m# but it's"
" overkill for just that one bit of state.\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[0m\n\x1b[0;32m 186\x1b[0m \x1b[1;32"
"mdef\x1b[0m \x1b[0mmagic_deco\x1b[0m\x1b[1;33m("
"\x1b[0m\x1b[0marg\x1b[0m\x1b[1;33m)\x1b[0m\x1b"
"[1;33m:\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[1;32m--> 187\x1b[1;33m \x1b"
"[0mcall\x1b[0m \x1b[1;33m=\x1b[0m \x1b[1;32mlambda"
"\x1b[0m \x1b[0mf\x1b[0m\x1b[1;33m,\x1b[0m \x1b"
"[1;33m*\x1b[0m\x1b[0ma\x1b[0m\x1b[1;33m,\x1b[0m "
"\x1b[1;33m**\x1b[0m\x1b[0mk\x1b[0m\x1b[1;33m:"
"\x1b[0m \x1b[0mf\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[1;33m*\x1b[0m\x1b[0ma\x1b[0m\x1b[1;33m,\x1b[0m "
"\x1b[1;33m**\x1b[0m\x1b[0mk\x1b[0m\x1b[1;33m)\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b"
"[0m\x1b[0;32m 188\x1b[0m \x1b[1;33m\x1b[0m\x1b"
"[0m\n\x1b[0;32m 189\x1b[0m \x1b[1;32mif"
"\x1b[0m \x1b[0mcallable\x1b[0m\x1b[1;33m(\x1b[0m"
"\x1b[0marg\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m:\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8"
"/site-packages/IPython/core/magics/script.py\x1b"
"[0m in \x1b[0;36mshebang\x1b[1;34m(self, line, "
"cell)\x1b[0m\n\x1b[0;32m 243\x1b[0m "
" \x1b[0msys\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mstderr"
"\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mflush\x1b[0m\x1b"
"[1;33m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 244\x1b[0m"
" \x1b[1;32mif\x1b[0m \x1b[0margs\x1b[0m"
"\x1b[1;33m.\x1b[0m\x1b[0mraise_error\x1b[0m \x1b"
"[1;32mand\x1b[0m \x1b[0mp\x1b[0m\x1b[1;33m.\x1b[0m"
"\x1b[0mreturncode\x1b[0m\x1b[1;33m!=\x1b[0m\x1b"
"[1;36m0\x1b[0m\x1b[1;33m:\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[1;32m--> 245\x1b"
"[1;33m \x1b[1;32mraise\x1b[0m \x1b[0m"
"CalledProcessError\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[0mp\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mreturncode\x1b"
"[0m\x1b[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b[1;33m"
",\x1b[0m \x1b[0moutput\x1b[0m\x1b[1;33m=\x1b[0m"
"\x1b[0mout\x1b[0m\x1b[1;33m,\x1b[0m \x1b[0mstderr"
"\x1b[0m\x1b[1;33m=\x1b[0m\x1b[0merr\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m\x1b[0;32m 246\x1b[0m \x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 247\x1b[0m "
" \x1b[1;32mdef\x1b[0m \x1b[0m_run_script\x1b[0m"
"\x1b[1;33m(\x1b[0m\x1b[0mself\x1b[0m\x1b[1;33m,"
"\x1b[0m \x1b[0mp\x1b[0m\x1b[1;33m,\x1b[0m \x1b"
"[0mcell\x1b[0m\x1b[1;33m,\x1b[0m \x1b[0mto_close"
"\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m:\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;31mCalledProcessError\x1b[0m: Command "
"'b'ech\\n'' returned non-zero exit status 127.",
],
},
],
"source": "%%bash\nech",
}
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_debugger_output(
rich_notebook_output: RichOutput, expected_output: str
) -> None:
"""It renders the output from the debugger."""
debugger_output_cell = {
"cell_type": "code",
"execution_count": 4,
"id": "fa534da6-88ac-43bc-b00f-cc68ace69fb7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": "> \x1b[1;32m<ipython-input-4-a2d401806d89>\x1b"
"[0m(1)\x1b[0;36m<module>\x1b[1;34m()\x1b[0m\n\x1b"
"[1;32m----> 1 \x1b[1;33m\x1b[0m_jupyterlab_variable"
"inspector_dict_list\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[0m\n\x1b[0m\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> ll\n"},
{
"name": "stdout",
"output_type": "stream",
"text": "\x1b[1;32m----> 1 \x1b[1;33m\x1b[0m_"
"jupyterlab_variableinspector_dict_list\x1b[0m\x1b"
"[1;33m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0m\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> sticky\n"},
{
"name": "stdout",
"output_type": "stream",
"text": "*** NameError: name 'sticky' is not defined\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> q\n"},
],
"source": "%debug",
}
output = rich_notebook_output(debugger_output_cell)
assert output == expected_output
def test_render_result(rich_notebook_output: RichOutput) -> None:
"""It renders a result."""
output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
"3 "
" \n"
)
output = rich_notebook_output(output_cell)
assert output == expected_output
def test_render_unknown_data_format(rich_notebook_output: RichOutput) -> None:
"""It passes on rendering an unknown data format."""
output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"unknown_format": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(output_cell)
assert output == expected_output
def test_render_error_no_traceback(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an error with no traceback."""
traceback_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "brave-sheep",
"metadata": {},
"outputs": [
{
"ename": "ZeroDivisionError",
"evalue": "division by zero",
"output_type": "error",
"traceback": [],
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_markdown_output(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown output."""
markdown_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": "**Lorep** _ipsum_\n",
"text/plain": "<IPython.core.display.Markdown object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "%%markdown\n**Lorep** _ipsum_",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%\x1b[0m\x1b[38;2;187;128;179;49mmarkdow"
"n\x1b[0m "
" │\n │ \x1b[38"
";2;255;83;112;49m**Lorep**\x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;137;221;255;49m_"
"ipsum_\x1b[0m "
" │\n ╰───────"
"────────────────────────────────────────"
"──────────────────────────╯\n "
" "
" \n \x1b[1mL"
"orep\x1b[0m \x1b[3mipsum\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(markdown_output_cell)
assert output == expected_output
def test_render_unknown_display_data(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an unknown data display type."""
unknown_display_data_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"unknown_data_type": "**Lorep** _ipsum_\n",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(unknown_display_data_cell)
assert output == expected_output
def test_render_json_output(rich_notebook_output: RichOutput) -> None:
"""It renders a JSON output."""
json_output_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "behind-authentication",
"metadata": {},
"outputs": [
{
"data": {
"application/json": {"one": 1, "three": {"a": "b"}, "two": 2},
"text/plain": "<IPython.core.display.JSON object>",
},
"execution_count": 1,
"metadata": {"application/json": {"expanded": False, "root": "root"}},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"\x1b[38;2;137;221;255;49m{\x1b[0m\x1b[38;2;255;83"
';112;49m"one"\x1b[0m\x1b[38;2;137;221;255;49m:'
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;24"
"7;140;108;49m1\x1b[0m\x1b[38;2;137;221;255;49m"
",\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;2"
'55;83;112;49m"three"\x1b[0m\x1b[38;2;137;221;2'
"55;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;137;221;255;49m{\x1b[0m\x1b[38;2;255;83;1"
'12;49m"a"\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m'
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;195;23"
'2;141;49m"b"\x1b[0m\x1b[38;2;137;221;255;49m},'
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;25"
'5;83;112;49m"two"\x1b[0m\x1b[38;2;137;221;255;'
"49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;"
"2;247;140;108;49m2\x1b[0m\x1b[38;2;137;221;255"
";49m}\x1b[0m "
" \n"
)
output = rich_notebook_output(json_output_cell)
assert output == expected_output
def test_render_latex_output(rich_notebook_output: RichOutput) -> None:
"""It renders LaTeX output."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": "$$\n\\alpha \\sim \\text{Normal}"
" \\\\\n\\beta \\sim \\text{Normal} \\\\\n\\epsilon"
" \\sim \\text{Half-Cauchy} \\\\\n\\mu = \\alpha +"
" X\\beta \\\\\ny \\sim \\text{Normal}(\\mu, \\epsilon)\n$$\n",
"text/plain": "<IPython.core.display.Latex object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n "
" "
" \n "
" "
" \n α∼Normal"
" "
" \n β∼Norma"
"l "
" \n ϵ∼Half"
"-Cauchy "
" \n μ = α"
" + Xβ "
" \n y ∼N"
"ormal(μ, ϵ) "
" \n "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(latex_output_cell)
assert expected_output == output
def test_render_invalid_latex_output(rich_notebook_output: RichOutput) -> None:
"""It renders invalid LaTeX output."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": r"garbledmess \sef{}",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n garbledmess "
" "
" \n"
)
output = rich_notebook_output(latex_output_cell)
assert expected_output == output
def test_render_latex_output_no_unicode(rich_notebook_output: RichOutput) -> None:
"""It does not render LaTeX output if unicode is False."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": "$$\n\\alpha \\sim \\text{Normal}"
" \\\\\n\\beta \\sim \\text{Normal} \\\\\n\\epsilon"
" \\sim \\text{Half-Cauchy} \\\\\n\\mu = \\alpha +"
" X\\beta \\\\\ny \\sim \\text{Normal}(\\mu, \\epsilon)\n$$\n",
"text/plain": "<IPython.core.display.Latex object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n <IPython.core."
"display.Latex object> "
" \n"
)
output = rich_notebook_output(latex_output_cell, unicode=False)
assert expected_output == output
def test_render_text_display_data(rich_notebook_output: RichOutput) -> None:
"""It renders text display data."""
text_display_data_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": "Lorep ipsum",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Lorep ipsum "
" "
" \n"
)
output = rich_notebook_output(text_display_data_cell)
assert output == expected_output
def test_pdf_emoji_output(rich_notebook_output: RichOutput) -> None:
"""It renders an emoji for PDF output."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n 📄 "
" "
" \n"
)
output = rich_notebook_output(pdf_output_cell, unicode=True)
assert output == expected_output
def test_pdf_nerd_output(rich_notebook_output: RichOutput) -> None:
"""It renders a nerd font icon for PDF output."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \uf1c1 "
" "
" \n"
)
output = rich_notebook_output(pdf_output_cell, nerd_font=True)
assert output == expected_output
def test_pdf_no_unicode_no_nerd(rich_notebook_output: RichOutput) -> None:
"""It does not render a PDF icon if no nerd font or unicode."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(pdf_output_cell, nerd_font=False, unicode=False)
assert output == expected_output
def test_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to a rendered Vega plot."""
vega_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": {
"$schema": "https://vega.github.io/schema/vega/v5.0.json",
"axes": [
{"orient": "bottom", "scale": "xscale"},
{"orient": "left", "scale": "yscale"},
],
"data": [
{
"name": "table",
"values": [
{"amount": 28, "category": "A"},
{"amount": 55, "category": "B"},
{"amount": 43, "category": "C"},
{"amount": 91, "category": "D"},
{"amount": 81, "category": "E"},
{"amount": 53, "category": "F"},
{"amount": 19, "category": "G"},
{"amount": 87, "category": "H"},
],
}
],
"height": 200,
"marks": [
{
"encode": {
"enter": {
"width": {"band": 1, "scale": "xscale"},
"x": {"field": "category", "scale": "xscale"},
"y": {"field": "amount", "scale": "yscale"},
"y2": {"scale": "yscale", "value": 0},
},
"hover": {"fill": {"value": "red"}},
"update": {"fill": {"value": "steelblue"}},
},
"from": {"data": "table"},
"type": "rect",
},
{
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"},
},
"update": {
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1},
],
"text": {"signal": "tooltip.amount"},
"x": {
"band": 0.5,
"scale": "xscale",
"signal": "tooltip.category",
},
"y": {
"offset": -2,
"scale": "yscale",
"signal": "tooltip.amount",
},
},
},
"type": "text",
},
],
"padding": 5,
"scales": [
{
"domain": {"data": "table", "field": "category"},
"name": "xscale",
"padding": 0.05,
"range": "width",
"round": True,
"type": "band",
},
{
"domain": {"data": "table", "field": "amount"},
"name": "yscale",
"nice": True,
"range": "height",
},
],
"signals": [
{
"name": "tooltip",
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"},
],
"value": {},
}
],
"width": 400,
},
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281369"
f"58.012196-350876;file://{tempfile_path}0.html\x1b\\\x1b[94m\uf080"
" Click to v"
"iew Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
)
output = rich_notebook_output(
vega_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_invalid_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to an invalid Vega plot."""
vega_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": {
"invalid": "no",
},
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281369"
f"58.012196-350876;file://{tempfile_path}0.html\x1b\\\x1b[94m\uf080"
" Click to v"
"iew Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
)
output = rich_notebook_output(
vega_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vegalite_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a rendered Vega plot."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=304082;f"
f"ile://{tempfile_path}0.h"
"tml\x1b\\\x1b[94m\uf080 Click to view Vega chart\x1b[0m"
"\x1b]8;;\x1b\\ "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_hints(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a Vega plot without hints."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=90200;fi"
f"le://{tempfile_path}0.ht"
"ml\x1b\\\x1b[94m\uf080 \x1b[0m\x1b]8;;\x1b\\ "
" "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=True,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_nerd_font(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a Vega plot without nerd fonts."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=2129;fil"
f"e://{tempfile_path}0.htm"
"l\x1b\\\x1b[94m📊 Click to view Vega chart\x1b[0m\x1b]"
"8;;\x1b\\ "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_nerd_font_no_unicode(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to plot without nerd fonts or unicode."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281372"
f"55.127551-234092;file://{tempfile_path}0.html\x1b\\\x1b[94mClick to vie"
"w Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vegalite_output_no_files(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a message representing a Vega plot."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n 📊 Vega chart "
" "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=False,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=True,
)
tempfile_directory = tempfile_path.parent
for file in tempfile_directory.glob(
f"{tempfile_path.stem}*.html"
): # pragma: no cover
assert not file.exists()
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_write_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
parse_link_filepath: Callable[[str], Path],
) -> None:
"""It writes the Vega plot to a file."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_contents = (
'<html>\n<head>\n <script src="https://c'
'dn.jsdelivr.net/npm/vega@5"></script>\n '
' <script src="https://cdn.jsdelivr.net/'
'npm/vega-lite@5"></script>\n <script s'
'rc="https://cdn.jsdelivr.net/npm/vega-em'
'bed@6"></script>\n <script src="https:'
"//cdn.jsdelivr.net/gh/koaning/justcharts"
'/justcharts.js"></script>\n <title>Veg'
"a chart</title>\n</head>\n<body>\n <vega"
'chart style="width: 100%">\n {"$sc'
'hema": "https://vega.github.io/schema/ve'
'ga-lite/v4.json", "data": {"values": [{"'
'a": "A", "b": 28}, {"a": "B", "b": 55}, '
'{"a": "C", "b": 43}, {"a": "D", "b": 91}'
', {"a": "E", "b": 81}, {"a": "F", "b": 5'
'3}, {"a": "G", "b": 19}, {"a": "H", "b":'
' 87}, {"a": "I", "b": 52}]}, "descriptio'
'n": "A simple bar chart with embedded da'
'ta.", "encoding": {"x": {"field": "a", "'
'type": "ordinal"}, "y": {"field": "b", "'
'type": "quantitative"}}, "mark": "bar"}\n'
" </vegachart>\n</body>\n<html></html>\n<"
"/html>"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
tempfile_path = parse_link_filepath(output)
file_contents = tempfile_path.read_text()
assert file_contents == expected_contents
def test_vega_no_icon_no_message(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders subject text when no icons or messages are used."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281373"
f"35.10625-550844;file://{tempfile_path}0.html\x1b\\\x1b[94mVega"
" chart\x1b[0"
"m\x1b]8;;\x1b\\ "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=True,
unicode=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vega_no_hyperlink(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders the file path when no hyperlinks are allowed."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
tempfile_text = f"📊 file://{tempfile_path}0.html"
line_width = 80 - 6
if line_width - 1 < len(tempfile_text) < line_width + 2:
first_line, second_line = tempfile_text.split(maxsplit=1)
wrapped_file_path = "\n".join(
(f"{'':>6}{first_line:<73}", f"{'':>6}{second_line:<74}")
)
else:
wrapped_file_path = "\n".join(
[f"{'':>6}{tempfile_text[:line_width - 1]:<73}"]
+ [
f"{'':>6}{tempfile_text[i: i + line_width]:<74}"
for i in range(line_width - 1, len(tempfile_text), line_width)
]
)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
f" \n{wrapped_file_path}\n"
f"{'':<80}\n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 0)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=False,
hide_hyperlink_hints=True,
unicode=True,
)
assert output.rstrip() == adjusted_expected_output.rstrip()
def test_vega_url(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
mocker: MockerFixture,
parse_link_filepath: Callable[[str], Path],
) -> None:
"""It pulls the JSON data from the URL and writes to file."""
mock = mocker.patch("httpx.get")
mock.return_value.text = json.dumps(
{
"$schema": "https://vega.github.io/schema/vega-lite/v5.json",
"description": "A simple bar chart with embedded data.",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "nominal", "axis": {"labelAngle": 0}},
"y": {"field": "b", "type": "quantitative"},
},
}
)
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": "https://raw.githubusercontent.com/"
"vega/vega/master/docs/examples/bar-chart.vg.json",
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_contents = (
'<html>\n<head>\n <script src="https://c'
'dn.jsdelivr.net/npm/vega@5"></script>\n '
' <script src="https://cdn.jsdelivr.net/'
'npm/vega-lite@5"></script>\n <script s'
'rc="https://cdn.jsdelivr.net/npm/vega-em'
'bed@6"></script>\n <script src="https:'
"//cdn.jsdelivr.net/gh/koaning/justcharts"
'/justcharts.js"></script>\n <title>Veg'
"a chart</title>\n</head>\n<body>\n <vega"
'chart style="width: 100%">\n {"$sc'
'hema": "https://vega.github.io/schema/ve'
'ga-lite/v5.json", "description": "A simp'
'le bar chart with embedded data.", "data'
'": {"values": [{"a": "A", "b": 28}, {"a"'
': "B", "b": 55}, {"a": "C", "b": 43}, {"'
'a": "D", "b": 91}, {"a": "E", "b": 81}, '
'{"a": "F", "b": 53}, {"a": "G", "b": 19}'
', {"a": "H", "b": 87}, {"a": "I", "b": 5'
'2}]}, "mark": "bar", "encoding": {"x": {'
'"field": "a", "type": "nominal", "axis":'
' {"labelAngle": 0}}, "y": {"field": "b",'
' "type": "quantitative"}}}\n </vegacha'
"rt>\n</body>\n<html></html>\n</html>"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
tempfile_path = parse_link_filepath(output)
file_contents = tempfile_path.read_text()
mock.assert_called_with(
url="https://raw.githubusercontent.com"
"/vega/vega/master/docs/examples/bar-chart.vg.json"
)
assert file_contents == expected_contents
def test_vega_url_request_error(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
) -> None:
"""It falls back to rendering a message if there is a RequestError."""
mocker.patch("httpx.get", side_effect=httpx.RequestError("Mock"))
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": "https://raw.githubusercontent.com/"
"vega/vega/master/docs/examples/bar-chart.vg.json",
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Vega chart "
" "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
assert output == expected_output
def test_render_html(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders HTML output."""
html_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {
"text/html": " <head>\n"
" <title>Example</title>\n </head>\n "
"<body>\n <p><strong>Lorep</strong> "
"<em>Ipsum</em> </p>\n </body>\n",
"text/plain": "<IPython.core.display.HTML object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281375"
f"06.111208-917276;file://{tempfile_path}0.html\x1b\\\x1b[94m🌐 Click to v"
"iew HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n "
"\x1b[1mLorep\x1b[0m \x1b[3mIpsum\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(html_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_html_table(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders an HTML table."""
html_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {
"text/html": """\
<table>
<tr>
<th>Company</th>
<th>Contact</th>
<th>Country</th>
</tr>
<tr>
<td><NAME></td>
<td><NAME></td>
<td>Germany</td>
</tr>
<tr>
<td>Centro comercial Moctezuma</td>
<td>Francisco Chang</td>
<td>Mexico</td>
</tr>
</table>
""",
"text/plain": "<IPython.core.display.HTML object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=58222;fi"
f"le://{tempfile_path}0.ht"
"ml\x1b\\\x1b[94m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\"
" "
" \n "
" "
" \n "
" "
" \n \x1b[1mCompany\x1b[0m "
" \x1b[1mContact\x1b[0m "
" \x1b[1mCountry\x1b[0m "
"\n ─────────────────────────────────"
"────────────────────────────────────────"
"─\n <NAME> Maria "
"Anders Germany "
" \n Centro comercial Franc"
"isco Chang Mexico "
" \n Moctezuma "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(html_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_unknown_data_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an unknown output type."""
unknown_data_type = {
"cell_type": "code",
"execution_count": 11,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"unkown_data_type": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
output = rich_notebook_output(unknown_data_type)
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[11]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
assert output == expected_output
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_block_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
disable_capture: ContextManager[_PluggyPlugin],
expected_output: str,
) -> None:
"""It renders a block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"<KEY>"
"<KEY>"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wD<KEY>lSBm3P+<KEY>"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"<KEY>"
"<KEY>"
"<KEY>"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=True, image_drawing="block")
assert remove_link_ids(output) == expected_output
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_invalid_block_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
disable_capture: ContextManager[_PluggyPlugin],
tempfile_path: Path,
) -> None:
"""It renders a fallback when image is invalid."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "bad_image_data\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=True, image_drawing="block")
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=45753;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to view"
" Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n \x1b["
"38;2;187;134;252m<Figure size 432x288 wi"
"th 1 Axes> "
" \x1b[0m\n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_height_constrained_block_image(
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
disable_capture: ContextManager[_PluggyPlugin],
expected_output: str,
) -> None:
"""It renders a height constrained block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"<KEY>"
"<KEY>"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"<KEY>"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
notebook_node = make_notebook(image_cell)
rendered_notebook = notebook.Notebook(
notebook_node,
images=True,
image_drawing="block",
)
with disable_capture:
con = console.Console(
file=io.StringIO(),
width=80,
height=20,
color_system="truecolor",
legacy_windows=False,
force_terminal=True,
)
con.print(rendered_notebook)
output = con.file.getvalue() # type: ignore[attr-defined]
assert remove_link_ids(output) == expected_output
def test_render_image_link(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
disable_capture: ContextManager[_PluggyPlugin],
) -> None:
"""It renders a link to an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"<KEY>iLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=False)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=42532;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to view"
" Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n <F"
"igure size 432x288 with 1 Axes> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_charater_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a character drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"<KEY>TPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"<KEY>"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="character", files=False
)
assert output == expected_output
def test_braille_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"<KEY>"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"<KEY>sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"<KEY>mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/<KEY>c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="braille", files=False
)
assert output == expected_output
def test_invalid_image_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
) -> None:
"""It fallsback to text when failing to draw image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "ib45",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="character", files=False
)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
" 🖼 Image "
" \n "
" "
" \n "
" \x1b[38;2;187;134;252m<Figure size 432x"
"288 with 1 Axes> "
" \x1b[0m\n"
)
assert output == expected_output
def test_render_image_link_no_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
disable_capture: ContextManager[_PluggyPlugin],
) -> None:
"""It renders a link to an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"<KEY>kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>kFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/<KEY>m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=False)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=236660;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to vie"
"w Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n <"
"Figure size 432x288 with 1 Axes> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_svg_link(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a link to an image."""
svg_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "1a2e22b6-ae2b-4c0c-a8db-ec0c0ea1227b",
"metadata": {},
"outputs": [
{
"data": {
"image/svg+xml": (
'<?xml version="1.0" encoding="UTF-8" sta'
'ndalone="no"?>\n<!DOCTYPE svg PUBLIC "-//'
'W3C//DTD SVG 1.1//EN"\n "http://www.w3.or'
'g/Graphics/SVG/1.1/DTD/svg11.dtd">\n<!-- '
"Generated by graphviz version 2.47.2 (20"
"210527.0053)\n -->\n<!-- Pages: 1 -->\n<svg"
' width="514pt" height="44pt"\n viewBox="0'
'.00 0.00 513.94 44.00" xmlns="http://www'
'.w3.org/2000/svg" xmlns:xlink="http://ww'
'w.w3.org/1999/xlink">\n<g id="graph0" cla'
'ss="graph" transform="scale(1 1) rotate('
'0) translate(4 40)">\n<polygon fill="whit'
'e" stroke="transparent" points="-4,4 -4,'
'-40 509.94,-40 509.94,4 -4,4"/>\n<!-- A -'
'->\n<g id="node1" class="node">\n<title>A<'
'/title>\n<ellipse fill="none" stroke="bla'
'ck" cx="53.95" cy="-18" rx="53.89" ry="1'
'8"/>\n<text text-anchor="middle" x="53.95'
'" y="-14.3" font-family="Times,serif" fo'
'nt-size="14.00">King Arthur</text>\n</g>\n'
'<!-- B -->\n<g id="node2" class="node">\n<'
'title>B</title>\n<ellipse fill="none" str'
'oke="black" cx="215.95" cy="-18" rx="90.'
'18" ry="18"/>\n<text text-anchor="middle"'
' x="215.95" y="-14.3" font-family="Times'
',serif" font-size="14.00">Sir Bedevere t'
'he Wise</text>\n</g>\n<!-- L -->\n<g id="no'
'de3" class="node">\n<title>L</title>\n<ell'
'ipse fill="none" stroke="black" cx="414.'
'95" cy="-18" rx="90.98" ry="18"/>\n<text '
'text-anchor="middle" x="414.95" y="-14.3'
'" font-family="Times,serif" font-size="1'
'4.00">Sir Lancelot the Brave</text>\n</g>'
"\n</g>\n</svg>\n"
),
"text/plain": "<graphviz.dot.Digraph at 0x108eb9430>",
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
output = rich_notebook_output(svg_cell)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=1627259094.976956-618609;file://{tempfile_path}0.svg"
"\x1b\\\x1b[9"
"4m🖼 Click to view Image\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m <graphviz."
"dot.Digraph at 0x108eb9430> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_unknown_language() -> None:
"""It sets the language to Python when it cannot be parsed."""
notebook_node = nbformat.from_dict(
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5,
}
)
rendered_notebook = notebook.Notebook(notebook_node)
expected_output = "python"
acutal_output = rendered_notebook.language
assert acutal_output == expected_output
def test_skip_unknown_cell_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering a cell if the type is not known."""
markdown_cell = {
"cell_type": "unknown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = ""
assert output == expected_output
def test_skip_no_cell_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering a cell if there is not cell type."""
markdown_cell = {
"metadata": {"no"},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = ""
assert output == expected_output
def test_image_link_not_image(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
) -> None:
"""It falls back to skipping drawing if content is not an image."""
mock = mocker.patch("httpx.get")
mock.return_value.content = "Bad image"
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="character")
expected_output = (
" \x1b]8;id=246597;https://github.com/paw-l"
"u/nbpreview/tests/assets/outline_article"
"_white_48dp.png\x1b\\\x1b[94m🌐 Click to view Az"
"ores\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_relative_dir_markdown_link(
rich_notebook_output: RichOutput,
remove_link_ids: Callable[[str], str],
) -> None:
"""It adds a path prefix to the image hyperlink."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
relative_dir = pathlib.Path("/", "Users", "test")
output = rich_notebook_output(
markdown_cell, relative_dir=relative_dir, hyperlinks=True
)
expected_output = (
" \x1b]8;id=835649;"
f"file://{relative_dir.resolve() / 'image.png'}\x1b\\\x1b"
"[94m🖼 Click to view Test image\x1b[0m\x1b]8;;\x1b"
"\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_notebook_code_line_numbers(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell with line numbers."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell, line_numbers=True)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[2m1 \x1b[0m\x1b[38;"
"2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;255;2"
"55;49m \x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0m"
"\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;238;25"
"5;255;49mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0"
"m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;1"
"70;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49"
"m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"238;255;255;49my\x1b[0m\x1b[38;2;137;221;255;4"
"9m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2"
";130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;"
"255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;137;221"
";255;49m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;13"
"7;221;255;49m:\x1b[0m "
" │\n │ \x1b[2m2 \x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;187;128;179;4"
"9mreturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;238;255;255;49mx\x1b[0m\x1b[38;2;238;255"
";255;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238;25"
"5;255;49my\x1b[0m "
" │\n ╰──────"
"────────────────────────────────────────"
"───────────────────────────╯\n"
)
assert output == expected_output
def test_notebook_line_numbers_magic_code_cell(
rich_notebook_output: RichOutput,
) -> None:
"""It renders line numbers in a code cell with language magic."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[2m1 \x1b[0m\x1b[38;"
"2;137;221;255;49m%%\x1b[0m\x1b[38;2;187;128;17"
"9;49mbash\x1b[0m "
" │\n "
" │ \x1b[2m2 \x1b[0m\x1b[38;2;130;170;255;49mec"
"ho\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"195;232;141;49m'lorep'\x1b[0m "
" "
" │\n ╰──────────────────────────────"
"────────────────────────────────────────"
"───╯\n"
)
output = rich_notebook_output(code_cell, line_numbers=True)
assert output == expected_output
def test_code_wrap(rich_notebook_output: RichOutput) -> None:
"""It wraps code when narrow."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "non_monkeys ="
' [animal for animal in get_animals("mamals") if animal != "monkey"]',
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;238;255;25"
"5;49mnon_monkeys\x1b[0m\x1b[38;2;238;255;255;4"
"9m \x1b[0m\x1b[38;2;137;221;255;49m=\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;137;221;255;"
"49m[\x1b[0m\x1b[38;2;238;255;255;49manimal\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;187;12"
"8;179;49mfor\x1b[0m\x1b[38;2;238;255;255;49m \x1b"
"[0m\x1b[38;2;238;255;255;49manimal\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[3;38;2;137;221;2"
"55;49min\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;238;255;255;49mget_animals\x1b[0m\x1b[38"
";2;137;221;255;49m(\x1b[0m\x1b[38;2;195;232;14"
'1;49m"\x1b[0m\x1b[38;2;195;232;141;49mmamals\x1b['
'0m\x1b[38;2;195;232;141;49m"\x1b[0m\x1b[38;2;137;'
"221;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b"
"[0m\x1b[38;2;187;128;179;49mif\x1b[0m\x1b[38;2;23"
"8;255;255;49m \x1b[0m\x1b[38;2;238;255;255;49m"
"animal\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[3"
"8;2;137;221;255;49m!=\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m │\n │ \x1b[38;2;195;232;141"
';49m"\x1b[0m\x1b[38;2;195;232;141;49mmonkey\x1b[0'
'm\x1b[38;2;195;232;141;49m"\x1b[0m\x1b[38;2;137;2'
"21;255;49m]\x1b[0m "
" │\n"
" ╰──────────────────────────────────"
"───────────────────────────────────────╯"
"\n"
)
output = rich_notebook_output(code_cell, code_wrap=True)
assert output == expected_output
|
paw-lu/nbpreview | tests/unit/test_main.py | <reponame>paw-lu/nbpreview
"""Test cases for the __main__ module."""
import collections
import functools
import io
import itertools
import json
import operator
import os
import pathlib
import platform
import shlex
import sys
import tempfile
import textwrap
from pathlib import Path
from typing import (
IO,
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Protocol,
Union,
)
from unittest.mock import Mock
import nbformat
import pytest
import rich
from _pytest.monkeypatch import MonkeyPatch
from click import testing
from click.testing import CliRunner, Result
from nbformat.notebooknode import NotebookNode
from pytest_mock import MockerFixture
from rich import box, console, panel, style, text
import nbpreview
from nbpreview import __main__
from tests.unit import test_notebook
class RunCli(Protocol):
"""Typing protocol for run_cli."""
def __call__(
self,
cell: Optional[Dict[str, Any]] = None,
args: Optional[Union[str, Iterable[str]]] = None,
input: Optional[Union[bytes, str, IO[Any]]] = None,
env: Optional[Mapping[str, str]] = None,
catch_exceptions: bool = True,
color: bool = False,
**extra: Any,
) -> Result: # pragma: no cover
"""Callable types."""
...
@pytest.fixture
def notebook_path() -> Path:
"""Return path of example test notebook."""
notebook_path = pathlib.Path(__file__).parent / pathlib.Path(
"assets", "notebook.ipynb"
)
return notebook_path
@pytest.fixture(autouse=True)
def patch_env(monkeypatch: MonkeyPatch) -> None:
"""Patch environmental variables that affect tests."""
for environment_variable in (
"TERM",
"NO_COLOR",
"PAGER",
"NBPREVIEW_PLAIN",
"NBPREVIEW_THEME",
"NBPREVIEW_UNICODE",
"NBPREVIEW_WIDTH",
):
monkeypatch.delenv(environment_variable, raising=False)
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return testing.CliRunner()
@pytest.fixture
def temp_file() -> Generator[Callable[[Optional[str]], str], None, None]:
"""Fixture that returns function to create temporary file.
This is used in place of NamedTemporaryFile as a contex manager
because of the inability to read from an open file created on
Windows.
Yields:
Generator[Callable[[Optional[str]], str]: Function to create
tempfile that is delted at teardown.
"""
file = tempfile.NamedTemporaryFile(delete=False)
file_name = file.name
tempfile_path = pathlib.Path(file_name)
def _named_temp_file(text: Optional[str] = None) -> str:
"""Create a temporary file.
Args:
text (Optional[str], optional): The text to fill the file
with. Defaults to None, which creates a blank file.
Returns:
str: The path of the temporary file.
"""
if text is not None:
tempfile_path.write_text(text)
file.close()
return file_name
yield _named_temp_file
tempfile_path.unlink()
@pytest.fixture
def write_notebook(
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
temp_file: Callable[[Optional[str]], str],
) -> Callable[[Union[Dict[str, Any], None]], str]:
"""Fixture for generating notebook files."""
def _write_notebook(cell: Union[Dict[str, Any], None]) -> str:
"""Writes a notebook file.
Args:
cell (Union[Dict[str, Any], None]): The cell of the notebook
to render
Returns:
str: The path of the notebook file.
"""
notebook_node = make_notebook(cell)
notebook_path = temp_file(nbformat.writes(notebook_node))
return notebook_path
return _write_notebook
@pytest.fixture
def run_cli(
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
) -> RunCli:
"""Fixture for running the cli against a notebook file."""
def _run_cli(
cell: Optional[Dict[str, Any]] = None,
args: Optional[Union[str, Iterable[str]]] = None,
input: Optional[Union[bytes, str, IO[Any]]] = None,
env: Optional[Mapping[str, str]] = None,
catch_exceptions: bool = True,
color: bool = False,
**extra: Any,
) -> Result:
r"""Runs the CLI against a notebook file.
Args:
cell (Optional[Dict[str, Any]], optional): The cell to add
to the notebook file. Defaults to None.
args (Optional[Union[str, Iterable[str]]]): The extra
arguments to invoke. By default --width=80 and
--unicode are included.
input (Optional[Union[bytes, Text, IO[Any]]]): The input
data. By default None.
env (Optional[Mapping[str, str]]): The environmental
overrides. By default None.
catch_exceptions (bool): Whether to catch exceptions.
color (bool): Whether the output should contain color codes.
**extra (Any): Extra arguments to pass.
Returns:
Result: The result from running the CLI command against the
notebook.
"""
notebook_path = write_notebook(cell)
if isinstance(args, str):
args = shlex.split(args)
default_args = [
"--decorated",
"--unicode",
"--width=80",
"--theme=material",
notebook_path,
]
full_args = [*args, *default_args] if args is not None else default_args
result = runner.invoke(
__main__.typer_click_object,
args=full_args,
input=input,
env=env,
catch_exceptions=catch_exceptions,
color=color,
**extra,
)
return result
return _run_cli
@pytest.fixture
def mock_stdin_tty(mocker: MockerFixture) -> Iterator[Mock]:
"""Fixture yielding mock stdin acting like a TTY."""
stdin_mock = mocker.patch("nbpreview.__main__.stdin.isatty", return_value=True)
yield stdin_mock
@pytest.fixture
def mock_stdout_tty(mocker: MockerFixture) -> Iterator[Mock]:
"""Fixture yielding mock stdout acting like a TTY."""
stdout_mock = mocker.patch("nbpreview.__main__.stdout.isatty", return_value=True)
yield stdout_mock
@pytest.fixture
def cli_arg(
runner: CliRunner,
notebook_path: Path,
mock_terminal: Mock,
remove_link_ids: Callable[[str], str],
mock_tempfile_file: Mock,
mock_stdin_tty: Mock,
mock_stdout_tty: Mock,
) -> Callable[..., str]:
"""Return function that applies arguments to cli."""
def _cli_arg(
*args: Union[str, None],
truecolor: bool = True,
paging: Union[bool, None] = False,
material_theme: bool = True,
images: bool = True,
**kwargs: Union[str, None],
) -> str:
"""Apply given arguments to cli.
Args:
*args (Union[str, None]): The extra arguments to pass to the
command.
truecolor (bool): Whether to pass
'--color-system=truecolor' option. By default True.
paging (Union[bool, None]): Whether to pass '--paging' or
'--no-paging' option. By default False, which
corresponds to '--no-paging'.
material_theme (bool): Whether to set the theme to
'material'. By default True.
images (bool): Whether to pass '--images'. By default True.
**kwargs (Union[str, None]): Environmental variables to set.
Will be uppercased.
Returns:
str: The output of the invoked command.
"""
cleaned_args = [arg for arg in args if arg is not None]
upper_kwargs = {
name.upper(): value for name, value in kwargs.items() if value is not None
}
cli_args = [os.fsdecode(notebook_path), *cleaned_args]
if images:
cli_args.append("--images")
if material_theme:
cli_args.append("--theme=material")
if truecolor:
cli_args.append("--color-system=truecolor")
if paging is True:
cli_args.append("--paging")
elif paging is False:
cli_args.append("--no-paging")
result = runner.invoke(
__main__.typer_click_object,
args=cli_args,
color=True,
env=upper_kwargs,
)
output = remove_link_ids(result.output)
return output
return _cli_arg
@pytest.fixture
def test_cli(
cli_arg: Callable[..., str],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> Callable[..., None]:
"""Return fixture that tests expected argument output."""
def _test_cli(
*args: Union[str, None],
truecolor: bool = True,
paging: Union[bool, None] = False,
material_theme: bool = True,
images: bool = True,
**kwargs: Union[str, None],
) -> None:
"""Tests expected argument output.
Args:
*args (Union[str, None]): The extra arguments to pass to the
command.
truecolor (bool): Whether to pass '--color-system=truecolor'
option. By default True.
paging (Union[bool, None]): Whether to pass '--paging' or
'--no-paging' option. By default False, which
corresponds to '--no-paging'.
material_theme (bool): Whether to set the theme to
'material'. By default True.
images (bool): Whether to pass '--images'. By default True.
**kwargs (Union[str, None]): Environmental variables to set.
Will be uppercased.
"""
output = cli_arg(
*args,
truecolor=truecolor,
paging=paging,
material_theme=material_theme,
images=images,
**kwargs,
)
assert output == remove_link_ids(expected_output)
return _test_cli
def test_no_duplicate_parameter_names() -> None:
"""It has only unique parameter names."""
cli_parameters = __main__.typer_click_object.params
all_options = itertools.chain(
*(
option_getter(parameter)
for parameter in cli_parameters
for option_getter in [
operator.attrgetter("opts"),
operator.attrgetter("secondary_opts"),
]
)
)
option_count = collections.Counter(all_options)
assert max(option_count.values()) == 1
def test_main_succeeds(run_cli: RunCli) -> None:
"""It exits with a status code of zero with a valid file."""
result = run_cli()
assert result.exit_code == 0
@pytest.mark.parametrize("option", ("--version", "-V"))
def test_version(runner: CliRunner, option: str) -> None:
"""It returns the version number."""
result = runner.invoke(__main__.typer_click_object, [option])
assert result.stdout == f"nbpreview {nbpreview.__version__}\n"
def test_exit_invalid_file_status(
runner: CliRunner, temp_file: Callable[[Optional[str]], str]
) -> None:
"""It exits with a status code of 2 when fed an invalid file."""
invalid_path = temp_file(None)
result = runner.invoke(__main__.typer_click_object, [invalid_path])
assert result.exit_code == 2
def test_exit_invalid_file_output(
runner: CliRunner,
temp_file: Callable[[Optional[str]], str],
) -> None:
"""It outputs a message when fed an invalid file."""
invalid_path = temp_file(None)
result = runner.invoke(__main__.typer_click_object, [invalid_path])
output = result.output
expected_output = (
"Usage: main [OPTIONS] [FILE]..."
"\nTry 'main --help' for help."
f"\n\nError: Invalid value for 'FILE...': {invalid_path}"
" is not a valid Jupyter Notebook path.\n"
)
assert output == expected_output
def test_render_notebook(run_cli: RunCli) -> None:
"""It renders a notebook."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
result = run_cli(code_cell)
expected_output = textwrap.dedent(
"""\
╭─────────────────────────────────────────────────────────────────────────╮
[2]: │ def foo(x: float, y: float) -> float: │
│ return x + y │
╰─────────────────────────────────────────────────────────────────────────╯
"""
)
assert result.output == expected_output
def test_render_notebook_option(run_cli: RunCli) -> None:
"""It respects cli options."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
result = run_cli(code_cell, args="--color --color-system=256")
output = result.output
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;5;182;49mdef"
"\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b[38;5;147;49mfoo"
"\x1b[0m\x1b[38;5;153;49m(\x1b[0m\x1b[38;5;231;49mx\x1b["
"0m\x1b[38;5;153;49m:\x1b[0m\x1b[38;5;231;49m \x1b[0m"
"\x1b[38;5;147;49mfloat\x1b[0m\x1b[38;5;153;49m,\x1b["
"0m\x1b[38;5;231;49m \x1b[0m\x1b[38;5;231;49my\x1b[0m"
"\x1b[38;5;153;49m:\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b["
"38;5;147;49mfloat\x1b[0m\x1b[38;5;153;49m)\x1b[0m"
"\x1b[38;5;231;49m \x1b[0m\x1b[38;5;153;49m-\x1b[0m\x1b["
"38;5;153;49m>\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b[38"
";5;147;49mfloat\x1b[0m\x1b[38;5;153;49m:\x1b[0m "
" │\n "
"│ \x1b[38;5;231;49m \x1b[0m\x1b[38;5;182;49mre"
"turn\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b[38;5;231;49"
"mx\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b[38;5;153;49m+"
"\x1b[0m\x1b[38;5;231;49m \x1b[0m\x1b[38;5;231;49my\x1b["
"0m "
" │\n ╰──────────────"
"────────────────────────────────────────"
"───────────────────╯\n"
)
assert output == expected_output
def test_render_markdown(run_cli: RunCli) -> None:
"""It renders a markdown cell."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "Lorep",
}
result = run_cli(markdown_cell)
assert result.output == (
" Lorep "
" \n"
)
@pytest.mark.parametrize(
"arg, env",
(("--plain", None), ("-p", None), (None, {"NBPREVIEW_PLAIN": "TRUE"})),
)
def test_force_plain(
arg: Optional[str],
env: Optional[Mapping[str, str]],
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
) -> None:
"""It renders in plain format when flag or env is specified."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
notebook_path = write_notebook(code_cell)
args = ["--unicode", "--width=80", notebook_path]
if arg is not None:
args = [arg] + args
result = runner.invoke(__main__.typer_click_object, args=args, env=env)
expected_output = (
"def foo(x: float, y: float) -> float: "
" \n return x + y "
" \n"
)
assert result.output == expected_output
def test_raise_no_source(
runner: CliRunner,
temp_file: Callable[[Optional[str]], str],
make_notebook_dict: Callable[[Optional[Dict[str, Any]]], Dict[str, Any]],
) -> None:
"""It returns an error message if there is no source."""
no_source_cell = {
"cell_type": "code",
"outputs": [],
}
notebook_dict = make_notebook_dict(no_source_cell)
notebook_path = temp_file(json.dumps(notebook_dict))
result = runner.invoke(__main__.typer_click_object, args=[notebook_path])
output = result.output
expected_output = (
"Usage: main [OPTIONS] [FILE]..."
"\nTry 'main --help' for help."
"\n\nError: Invalid value for 'FILE...':"
f" {notebook_path} is not a valid Jupyter Notebook path.\n"
)
assert output == expected_output
def test_raise_no_output(
runner: CliRunner,
temp_file: Callable[[Optional[str]], str],
make_notebook_dict: Callable[[Optional[Dict[str, Any]]], Dict[str, Any]],
) -> None:
"""It returns an error message if no output in a code cell."""
no_source_cell = {"cell_type": "code", "source": ["x = 1\n"]}
notebook_dict = make_notebook_dict(no_source_cell)
notebook_path = temp_file(json.dumps(notebook_dict))
result = runner.invoke(__main__.typer_click_object, args=[notebook_path])
output = result.output
expected_output = (
"Usage: main [OPTIONS] [FILE]...\nTry 'main -"
"-help' for help.\n\nError: Invalid value f"
f"or 'FILE...': {notebook_path} is not a v"
"alid Jupyter Notebook path.\n"
)
assert output == expected_output
@pytest.fixture
def mock_pygment_styles(mocker: MockerFixture) -> Iterator[Mock]:
"""Mock pygment styles.
Control the styles outputted here so that test does not break every
time pygments adds or removes a style
"""
mock = mocker.patch(
"nbpreview.option_values.styles.get_all_styles",
return_value=(style for style in ("material", "monokai", "zenburn")),
)
yield mock
@pytest.fixture
def mock_terminal(mocker: MockerFixture) -> Iterator[Mock]:
"""Mock a modern terminal."""
terminal_console = functools.partial(
console.Console,
color_system="truecolor",
force_terminal=True,
width=100,
no_color=False,
legacy_windows=False,
force_jupyter=False,
)
mock = mocker.patch("nbpreview.__main__.console.Console", new=terminal_console)
yield mock
def test_default_color_system_auto(
runner: CliRunner,
mocker: MockerFixture,
notebook_path: Path,
) -> None:
"""Its default value is 'auto'."""
mock = mocker.patch("nbpreview.__main__.console.Console")
runner.invoke(
__main__.typer_click_object, args=[os.fsdecode(notebook_path)], color=True
)
# console.Console is called multiple times, first time should be
# console representing terminal
assert mock.call_args_list[0].kwargs["color_system"] == "auto"
def test_list_themes(
runner: CliRunner,
mocker: MockerFixture,
expected_output: str,
mock_terminal: Mock,
mock_pygment_styles: Mock,
) -> None:
"""It renders an example of all available themes."""
result = runner.invoke(
__main__.typer_click_object,
args=["--list-themes"],
color=True,
)
output = result.output
assert output == expected_output
@pytest.mark.parametrize("option_name", ("--list-themes", "--lt"))
def test_list_themes_no_terminal(
option_name: str, runner: CliRunner, mock_pygment_styles: Mock
) -> None:
"""It lists all themes with no preview when not a terminal."""
result = runner.invoke(
__main__.typer_click_object,
args=[option_name],
color=True,
)
output = result.output
expected_output = (
"material\nmonokai\nzenburn\nlight / ansi_li" "ght\ndark / ansi_dark\n"
)
assert output == expected_output
def test_render_notebook_file(test_cli: Callable[..., None]) -> None:
"""It renders a notebook file."""
test_cli()
@pytest.mark.parametrize(
"option_name, theme, env",
(
("--theme", "light", None),
("-t", "dark", None),
("-t", "monokai", None),
(None, None, "default"),
),
)
def test_change_theme_notebook_file(
option_name: Union[str, None],
theme: Union[str, None],
env: Union[str, None],
test_cli: Callable[..., None],
) -> None:
"""It changes the theme of the notebook."""
args: List[Union[str, None]]
args = (
[option_name, theme]
if theme is not None and option_name is not None
else [None]
)
test_cli(*args, nbpreview_theme=env, material_theme=False)
@pytest.mark.parametrize(
"option_name, env", (("--hide-output", None), ("-h", None), (None, "1"))
)
def test_hide_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It hides the output of a notebook file."""
test_cli(option_name, nbpreview_hide_output=env)
@pytest.mark.parametrize(
"option_name, env", (("--plain", None), ("-p", None), (None, "1"))
)
def test_plain_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It renders a notebook in a plain format."""
test_cli(option_name, nbpreview_plain=env)
@pytest.mark.parametrize(
"option_name, env",
(
("--unicode", None),
("-u", None),
("--no-unicode", None),
("-x", None),
(None, "0"),
),
)
def test_unicode_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It renders a notebook with and without unicode characters."""
test_cli(option_name, nbpreview_unicode=env)
@pytest.mark.parametrize(
"option_name, env",
(("--nerd-font", None), ("-n", None), (None, "1")),
)
def test_nerd_font_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It renders a notebook with nerd font characters."""
test_cli(option_name, nbpreview_nerd_font=env)
@pytest.mark.parametrize(
"option_name, env",
(("--no-files", None), ("-l", None), (None, "1")),
)
def test_files_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It does not write temporary files if options are specified."""
test_cli(option_name, nbpreview_no_files=env)
@pytest.mark.parametrize(
"option_name, env",
(("--positive-space", None), ("-s", None), (None, "1")),
)
def test_positive_space_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It draws images in positive space if options are specified."""
test_cli(option_name, nbpreview_positive_space=env)
@pytest.mark.parametrize(
"option_name, env",
(
("--hyperlinks", None),
("-k", None),
(None, "1"),
("--no-hyperlinks", None),
("-r", None),
(None, "0"),
),
)
def test_hyperlinks_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It includes or excludes hyperlinks depending on options."""
test_cli(option_name, nbpreview_hyperlinks=env)
@pytest.mark.parametrize(
"option_name, env",
(
("--hide-hyperlink-hints", None),
("-y", None),
(None, "1"),
),
)
def test_hyperlink_hints_output_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It does not render hints to click the hyperlinks."""
test_cli(option_name, nbpreview_hide_hyperlink_hints=env)
@pytest.mark.parametrize(
"option_name, env",
[
("--no-images", None),
("-e", None),
("--images", None),
("-i", None),
(None, None),
(None, "1"),
(None, "0"),
],
)
def test_image_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It does not draw images when specified."""
test_cli(option_name, nbpreview_images=env, images=False)
def test_no_color_no_image(test_cli: Callable[..., None]) -> None:
"""By default images will not render if no color."""
test_cli("--no-color", images=False)
@pytest.mark.parametrize(
"option_name, drawing_type, env",
(
("--image-drawing", "braille", None),
("--id", "character", None),
(None, None, "braille"),
),
)
def test_image_drawing_notebook_file(
option_name: Union[str, None],
drawing_type: Union[str, None],
env: Union[str, None],
test_cli: Callable[..., None],
) -> None:
"""It draws images only when option is set."""
arg = (
f"{option_name}={drawing_type}"
if option_name is not None and drawing_type is not None
else None
)
test_cli(
arg,
nbpreview_image_drawing=env,
)
@pytest.mark.xfail(
"terminedia" in sys.modules,
reason=test_notebook.SKIP_TERMINEDIA_REASON,
strict=True,
)
def test_message_failed_terminedia_import(cli_arg: Callable[..., str]) -> None:
"""It raises a user-friendly warning message if import fails."""
output = cli_arg("--image-drawing=block")
expected_output = (
"Usage: main [OPTIONS] [FILE]..."
"\nTry 'main --help' for help."
"\n\nError: Invalid value for '--image-drawing' / '--id':"
" 'block' cannot be used on this system."
" This might be because it is being run on Windows.\n"
)
assert output == expected_output
@pytest.mark.parametrize(
"option_name, env_name, env_value",
(
("--color", None, None),
("-c", None, None),
("--no-color", None, None),
("-o", None, None),
(None, "NBPREVIEW_COLOR", "0"),
(None, "NO_COLOR", "1"),
(None, "NBPREVIEW_NO_COLOR", "true"),
(None, "TERM", "dumb"),
),
)
def test_color_notebook_file(
option_name: Union[str, None],
env_name: Union[str, None],
env_value: Union[str, None],
test_cli: Callable[..., None],
) -> None:
"""It does not use color when specified."""
if env_name is not None:
test_cli(option_name, **{env_name: env_value})
else:
test_cli(option_name)
@pytest.mark.parametrize(
"option_name, color_system, env_value",
(
("--color-system", "standard", None),
("--color-system", "none", None),
("--cs", "256", None),
(None, None, "windows"),
),
)
def test_color_system_notebook_file(
option_name: Union[str, None],
color_system: Union[str, None],
env_value: Union[str, None],
test_cli: Callable[..., None],
) -> None:
"""It uses different color systems depending on option value."""
arg = (
f"{option_name}={color_system}"
if option_name is not None and color_system is not None
else None
)
test_cli(arg, truecolor=False, nbpreview_color_system=env_value)
@pytest.mark.parametrize(
"option_name, env",
(("--line-numbers", None), ("-m", None), (None, "1")),
)
def test_line_numbers_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It renders a notebook file with line numbers."""
test_cli(option_name, nbpreview_line_numbers=env)
@pytest.mark.parametrize(
"option_name, env",
(("--code-wrap", None), ("-q", None), (None, "1")),
)
def test_code_wrap_notebook_file(
option_name: Union[str, None], env: Union[str, None], test_cli: Callable[..., None]
) -> None:
"""It renders a notebook file with line numbers."""
test_cli(option_name, nbpreview_code_wrap=env)
@pytest.mark.parametrize("paging", [True, None])
def test_paging_notebook_stdout_file(
paging: Union[bool, None], test_cli: Callable[..., None]
) -> None:
"""It simply prints the text when not in a terminal."""
test_cli("--color", paging=paging)
@pytest.fixture
def echo_via_pager_mock(mocker: MockerFixture) -> Iterator[Mock]:
"""Return a mock for click.echo_via_pager."""
echo_via_pager_mock = mocker.patch("nbpreview.__main__.click.echo_via_pager")
yield echo_via_pager_mock
@pytest.mark.parametrize(
"option_name, code_lines, is_expected_called",
(
("--no-paging", 300, False),
("--paging", 1, True),
("-g", 50, True),
("-f", 400, False),
("", 500, True),
("", 2, False),
),
)
def test_automatic_paging_notebook(
run_cli: RunCli,
mock_terminal: Mock,
echo_via_pager_mock: Mock,
option_name: str,
code_lines: int,
is_expected_called: bool,
mock_stdin_tty: Mock,
mock_stdout_tty: Mock,
) -> None:
"""It uses the pager only when notebook is long or forced."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "[i for i in range(20)]\n" * code_lines,
}
run_cli(code_cell, option_name)
assert echo_via_pager_mock.called is is_expected_called
@pytest.mark.parametrize(
"option_name, color", (("--color", True), ("--no-color", False), (None, None))
)
def test_color_passed_to_pager(
cli_arg: Callable[..., str],
echo_via_pager_mock: Mock,
mock_terminal: Mock,
option_name: Union[str, None],
color: Union[bool, None],
) -> None:
"""It passes the color arg value to the pager."""
cli_arg(option_name, paging=True)
color_arg = echo_via_pager_mock.call_args[1]["color"]
assert color_arg == color
@pytest.mark.parametrize("file_argument", [None, "-"])
def test_render_stdin(
file_argument: Union[None, str],
runner: CliRunner,
notebook_path: Path,
mock_tempfile_file: Mock,
expected_output: str,
mock_terminal: Mock,
remove_link_ids: Callable[[str], str],
) -> None:
"""It treats stdin as a file's text and renders a notebook."""
stdin = notebook_path.read_text()
args = ["--color-system=truecolor", "--no-images", "--theme=material"]
if file_argument is not None:
args.append(file_argument)
result = runner.invoke(__main__.typer_click_object, args=args, input=stdin)
output = result.output
assert remove_link_ids(output) == expected_output
def test_stdin_cwd_path(
runner: CliRunner,
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
remove_link_ids: Callable[[str], str],
mock_terminal: Mock,
) -> None:
"""It uses the current working the directory when using stdin."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
notebook_nodes = make_notebook(markdown_cell)
notebook_stdin = nbformat.writes(notebook_nodes)
current_working_directory = pathlib.Path.cwd()
result = runner.invoke(
__main__.typer_click_object,
args=["--color-system=truecolor", "--no-images", "--theme=material"],
input=notebook_stdin,
)
output = result.output
expected_output = (
" \x1b]8;id=835649;"
f"file://{current_working_directory.resolve() / 'image.png'}\x1b\\\x1b"
"[94m🖼 Click to view Test image\x1b[0m\x1b]8;;\x1b"
"\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_multiple_files(
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
notebook_path: Path,
mock_terminal: Mock,
) -> None:
"""It renders multiple files."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
code_notebook_path = write_notebook(code_cell)
result = runner.invoke(
__main__.typer_click_object,
args=[
code_notebook_path,
code_notebook_path,
"--color-system=truecolor",
"--theme=material",
],
)
output = result.output
path_width = 80 - 6
tempfile_name = (
os.fsdecode(pathlib.Path(code_notebook_path).name)
if path_width < len(code_notebook_path)
else code_notebook_path
)
tempfile_name = f"{tempfile_name} "
expected_output = (
f"┏━ {tempfile_name:━<{path_width + 1}}━┓"
"\n┃ "
" "
"┃\n┃ ╭─────────────────────────────"
"──────────────────────────────────────╮ "
" ┃\n┃ \x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;187;1"
"28;179;49mdef\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0m\x1b[38;2;"
"137;221;255;49m(\x1b[0m\x1b[38;2;238;255;255;4"
"9mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;130;170;255;"
"49mfloat\x1b[0m\x1b[38;2;137;221;255;49m,\x1b[0m\x1b"
"[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255"
";255;49my\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;17"
"0;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49m"
")\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;1"
"37;221;255;49m-\x1b[0m\x1b[38;2;137;221;255;49"
"m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m:\x1b[0m "
"│ ┃\n┃ │ \x1b[38;2;238;255;255;49m "
" \x1b[0m\x1b[38;2;187;128;179;49mreturn\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49mx\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;137;221;255;49m+\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m "
" "
" │ ┃\n┃ ╰─────────────────"
"────────────────────────────────────────"
"──────────╯ ┃\n┃ "
" "
" ┃\n┗━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
f"━━━━━━━━━━━━━━━┛\n\n┏━ {tempfile_name:━<{path_width + 1}}━┓\n┃"
" "
" "
" ┃\n┃ ╭───────────"
"────────────────────────────────────────"
"────────────────╮ ┃\n┃ \x1b[38;5;247m[2]:\x1b"
"[0m │ \x1b[38;2;187;128;179;49mdef\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[38;2;130;170;255"
";49mfoo\x1b[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b["
"38;2;238;255;255;49mx\x1b[0m\x1b[38;2;137;221;"
"255;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137"
";221;255;49m,\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m\x1b[38;2;13"
"7;221;255;49m:\x1b[0m\x1b[38;2;238;255;255;49m"
" \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38"
";2;137;221;255;49m)\x1b[0m\x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[3"
"8;2;137;221;255;49m>\x1b[0m\x1b[38;2;238;255;2"
"55;49m \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b["
"0m\x1b[38;2;137;221;255;49m:\x1b[0m "
" │ ┃\n┃ │ \x1b[38;2;"
"238;255;255;49m \x1b[0m\x1b[38;2;187;128;17"
"9;49mreturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b["
"0m\x1b[38;2;238;255;255;49mx\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b"
"[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238"
";255;255;49my\x1b[0m "
" │ ┃\n┃ "
"╰───────────────────────────────────────"
"────────────────────────────╯ ┃\n┃ "
" "
" ┃\n┗━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n"
)
assert output == expected_output
def test_multiple_files_long_path() -> None:
"""It shortens the title to the filename if the path is long."""
path = pathlib.Path("very", "long", "path")
output = __main__._create_file_title(path, width=7)
expected_output = "path"
assert output == expected_output
def test_file_and_stdin(
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
notebook_path: Path,
mock_terminal: Mock,
) -> None:
"""It renders both a file and stdin."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
code_notebook_path = write_notebook(code_cell)
stdin = pathlib.Path(code_notebook_path).read_text()
result = runner.invoke(
__main__.typer_click_object,
args=["--color-system=truecolor", "--theme=material", code_notebook_path, "-"],
input=stdin,
)
output = result.output
path_width = 80 - 6
tempfile_name = (
os.fsdecode(pathlib.Path(code_notebook_path).name)
if path_width < len(code_notebook_path)
else code_notebook_path
)
tempfile_name = f"{tempfile_name} "
expected_output = (
f"┏━ {tempfile_name:━<{path_width + 1}}━┓"
"\n┃ "
" "
"┃\n┃ ╭─────────────────────────────"
"──────────────────────────────────────╮ "
" ┃\n┃ \x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;187;1"
"28;179;49mdef\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0m\x1b[38;2;"
"137;221;255;49m(\x1b[0m\x1b[38;2;238;255;255;4"
"9mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;130;170;255;"
"49mfloat\x1b[0m\x1b[38;2;137;221;255;49m,\x1b[0m\x1b"
"[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255"
";255;49my\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;17"
"0;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49m"
")\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;1"
"37;221;255;49m-\x1b[0m\x1b[38;2;137;221;255;49"
"m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m:\x1b[0m "
"│ ┃\n┃ │ \x1b[38;2;238;255;255;49m "
" \x1b[0m\x1b[38;2;187;128;179;49mreturn\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49mx\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;137;221;255;49m+\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m "
" "
" │ ┃\n┃ ╰─────────────────"
"────────────────────────────────────────"
"──────────╯ ┃\n┃ "
" "
" ┃\n┗━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━┛\n\n┏━ <stdin> ━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━┓\n┃ "
" "
" ┃\n┃ ╭───────────"
"────────────────────────────────────────"
"────────────────╮ ┃\n┃ \x1b[38;5;247m[2]:\x1b"
"[0m │ \x1b[38;2;187;128;179;49mdef\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[38;2;130;170;255"
";49mfoo\x1b[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b["
"38;2;238;255;255;49mx\x1b[0m\x1b[38;2;137;221;"
"255;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137"
";221;255;49m,\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m\x1b[38;2;13"
"7;221;255;49m:\x1b[0m\x1b[38;2;238;255;255;49m"
" \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38"
";2;137;221;255;49m)\x1b[0m\x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[3"
"8;2;137;221;255;49m>\x1b[0m\x1b[38;2;238;255;2"
"55;49m \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b["
"0m\x1b[38;2;137;221;255;49m:\x1b[0m "
" │ ┃\n┃ │ \x1b[38;2;"
"238;255;255;49m \x1b[0m\x1b[38;2;187;128;17"
"9;49mreturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b["
"0m\x1b[38;2;238;255;255;49mx\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b"
"[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238"
";255;255;49my\x1b[0m "
" │ ┃\n┃ "
"╰───────────────────────────────────────"
"────────────────────────────╯ ┃\n┃ "
" "
" ┃\n┗━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n"
)
assert output == expected_output
def test_multiple_files_plain(
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
mock_terminal: Mock,
) -> None:
"""It does not draw a border around files when in plain mode."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
code_notebook_path = write_notebook(code_cell)
result = runner.invoke(
__main__.typer_click_object,
args=[
"--color-system=truecolor",
"--plain",
"--theme=material",
code_notebook_path,
code_notebook_path,
],
)
output = result.output
path_width = 80
tempfile_name = (
os.fsdecode(pathlib.Path(code_notebook_path).name)
if path_width < len(code_notebook_path)
else code_notebook_path
)
file = io.StringIO()
rich.print(tempfile_name, file=file)
rendered_tempfile_name = file.getvalue()
expected_output = (
f"{rendered_tempfile_name}\n"
"\x1b[38;2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfo"
"o\x1b[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;2"
"38;255;255;49mx\x1b[0m\x1b[38;2;137;221;255;49"
"m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;238;255;255;49my\x1b[0m\x1b[38;2;137;221;"
"255;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137"
";221;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;13"
"7;221;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m"
" \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38"
";2;137;221;255;49m:\x1b[0m "
" \n\x1b[38;2;238;25"
"5;255;49m \x1b[0m\x1b[38;2;187;128;179;49mr"
"eturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38"
";2;238;255;255;49mx\x1b[0m\x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49my\x1b[0m "
" \n\n"
f"\n{rendered_tempfile_name}\n\x1b["
"38;2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;25"
"5;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b"
"[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;238"
";255;255;49mx\x1b[0m\x1b[38;2;137;221;255;49m:"
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;13"
"0;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;255"
";49m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38"
";2;238;255;255;49my\x1b[0m\x1b[38;2;137;221;25"
"5;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[3"
"8;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137;2"
"21;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b["
"0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;137;"
"221;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b"
"[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2"
";137;221;255;49m:\x1b[0m "
" \n\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;187;128;179;49mret"
"urn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2"
";238;255;255;49mx\x1b[0m\x1b[38;2;238;255;255;"
"49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;255"
";49my\x1b[0m "
" \n\n\n"
)
assert output == expected_output
def test_multiple_files_all_fail(
runner: CliRunner, temp_file: Callable[[Optional[str]], str]
) -> None:
"""It exists with a status code of 2 when fed invalid files."""
invalid_path = temp_file(None)
result = runner.invoke(__main__.typer_click_object, [invalid_path, invalid_path])
assert result.exit_code == 2
def test_multiple_files_all_fail_message(
runner: CliRunner, temp_file: Callable[[Optional[str]], str]
) -> None:
"""It exists with a status code of 2 when fed invalid files."""
invalid_path = temp_file(None)
result = runner.invoke(__main__.typer_click_object, [invalid_path, invalid_path])
output = result.output
expected_output = (
"Usage: main [OPTIONS] [FILE]...\nTry 'mai"
"n --help' for help.\n\nError: Invalid valu"
f"e for 'FILE...': {invalid_path}, {invalid_path}"
" are not a valid Jupyter Notebook paths.\n"
)
assert output == expected_output
def test_multiple_files_some_fail(
runner: CliRunner,
write_notebook: Callable[[Union[Dict[str, Any], None]], str],
notebook_path: Path,
mock_terminal: Mock,
) -> None:
"""It still renders valid files when some are invalid."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
code_notebook_path = write_notebook(code_cell)
invalid_file_path = os.fsdecode(pathlib.Path(__file__))
result = runner.invoke(
__main__.typer_click_object,
args=[
"--color-system=truecolor",
"--theme=material",
code_notebook_path,
invalid_file_path,
],
)
output = result.output
path_width = 80 - 6
tempfile_name = (
os.fsdecode(pathlib.Path(code_notebook_path).name)
if path_width < len(code_notebook_path)
else code_notebook_path
)
tempfile_name = f"{tempfile_name} "
invalid_file_name = (
os.fsdecode(pathlib.Path(invalid_file_path).name)
if path_width < len(invalid_file_path)
else invalid_file_path
)
invalid_file_panel = panel.Panel(
text.Text(
f"{os.fsdecode(invalid_file_name)} is not a valid Jupyter Notebook path.",
style=style.Style(color="color(178)"),
),
box=box.HEAVY,
title_align="left",
expand=True,
padding=(1, 2, 1, 2),
safe_box=True,
width=80,
title=invalid_file_name,
)
file = io.StringIO()
rich.print(invalid_file_panel, file=file)
expected_output = (
f"┏━ {tempfile_name:━<{path_width + 1}}━┓"
"\n┃ "
" "
"┃\n┃ ╭─────────────────────────────"
"──────────────────────────────────────╮ "
" ┃\n┃ \x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;187;1"
"28;179;49mdef\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0m\x1b[38;2;"
"137;221;255;49m(\x1b[0m\x1b[38;2;238;255;255;4"
"9mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;130;170;255;"
"49mfloat\x1b[0m\x1b[38;2;137;221;255;49m,\x1b[0m\x1b"
"[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255"
";255;49my\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;17"
"0;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49m"
")\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;1"
"37;221;255;49m-\x1b[0m\x1b[38;2;137;221;255;49"
"m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m:\x1b[0m "
"│ ┃\n┃ │ \x1b[38;2;238;255;255;49m "
" \x1b[0m\x1b[38;2;187;128;179;49mreturn\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49mx\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;137;221;255;49m+\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m "
" "
" │ ┃\n┃ ╰─────────────────"
"────────────────────────────────────────"
"──────────╯ ┃\n┃ "
" "
" ┃\n┗━━━━━━━━━━━━━━━━━━━━━━━"
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
f"━━━━━━━━━━━━━━━┛\n\n{file.getvalue()}\n"
)
assert output == expected_output
def test_help(runner: CliRunner) -> None:
"""It returns a help message when prompted."""
result = runner.invoke(__main__.typer_click_object, args=["--help"])
output = result.output
expected_prefix = """\
Usage: main [OPTIONS] [FILE]...
Render a Jupyter Notebook in the terminal.
Options:
[FILE]... Jupyter notebook file(s) to render on the
terminal. Use a dash ('-') or pipe in data to
the command to read from standard input."""
assert output.startswith(expected_prefix)
@pytest.mark.skipif(
platform.system() == "Windows", reason="Does not colorize on Windows terminals."
)
def test_color_help(runner: CliRunner) -> None:
"""It colors the help message when prompted."""
result = runner.invoke(__main__.typer_click_object, args=["--help"], color=True)
output = result.output
expected_prefix = (
"\x1b[35mUsage: \x1b[0mmain [OPTIONS] [FILE]..."
"\n\n Render a Jupyter Notebook in the ter"
"minal.\n\n\x1b[35mOptions\x1b[0m:\n \x1b[36m[FILE]."
"..\x1b[0m Jupyter not"
"ebook file(s) to render on the\n "
" terminal. Use a"
" dash ('-') or pipe in data to\n "
" the command to "
"read from standard input."
)
assert output.startswith(expected_prefix)
|
paw-lu/nbpreview | src/nbpreview/component/markdown.py | <reponame>paw-lu/nbpreview
"""Override rich's markdown renderer with custom components."""
import dataclasses
import enum
import io
import os
import pathlib
import textwrap
from io import BytesIO
from pathlib import Path
from typing import Iterable, Iterator, Optional, Union
from urllib import parse
import httpx
import PIL
import validators
import yarl
from PIL import Image
from rich import _loop, markdown, measure, rule, segment, style, syntax, text
from rich.console import (
Console,
ConsoleOptions,
JustifyMethod,
RenderableType,
RenderResult,
)
from rich.measure import Measurement
from rich.style import Style
from rich.text import Text
from nbpreview.component.content.output.result import drawing, link, markdown_extensions
from nbpreview.component.content.output.result.drawing import ImageDrawing
from nbpreview.component.content.output.result.markdown_extensions import (
MarkdownExtensionSection,
)
class CustomCodeBlock(markdown.CodeBlock):
"""A custom code block with syntax highlighting."""
style_name = "none"
def __init__(self, lexer_name: str, theme: str) -> None:
"""Constructor."""
super().__init__(lexer_name=lexer_name, theme=theme)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render the custom code block."""
code = textwrap.indent(str(self.text).rstrip(), prefix=" " * 4)
rendered_syntax = syntax.Syntax(
code, self.lexer_name, theme=self.theme, background_color="default"
)
yield rendered_syntax
@enum.unique
class HeadingColorEnum(enum.Enum):
"""The heading color."""
PURPLE = enum.auto()
TEAL = enum.auto()
class CustomHeading(markdown.Heading):
"""A custom rendered markdown heading."""
def __init__(self, level: int) -> None:
"""Constructor."""
self.level = level
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render the custom markdown heading."""
source_text = self.text
source_text.justify = "left"
if self.level == 1:
header_color = (
"color(57)"
if console.color_system in ["truecolor", "256"]
else "color(5)"
)
header_style = style.Style(
color="color(231)", bgcolor=header_color, bold=True
)
source_text.stylize(header_style)
if source_text.cell_len < console.width:
source_text = text.Text(" ", style=header_style) + source_text
if source_text.cell_len < console.width:
source_text = source_text + text.Text(" ", style=header_style)
yield source_text
else:
header_color = (
"color(37)"
if console.color_system in ["truecolor", "256"]
else "color(6)"
)
header_style = style.Style(color=header_color, bold=True)
source_text.stylize(header_style)
source_text = (
text.Text(self.level * "#" + " ", style=header_style) + source_text
)
if self.level <= 3:
yield text.Text("")
yield source_text
if self.level < 3:
yield rule.Rule(style=style.Style(color=header_color, dim=True, bold=False))
class CustomBlockQuote(markdown.BlockQuote):
"""A custom block quote."""
style_name = style.Style(dim=True) # type: ignore[assignment]
class CustomHorizontalRule(markdown.HorizontalRule):
"""A customized horizontal rule to divide sections."""
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render the horizontal rule."""
yield rule.Rule(style="none")
class CustomListItem(markdown.ListItem):
"""A custom list element."""
def render_bullet(self, console: Console, options: ConsoleOptions) -> RenderResult:
"""Render a markdown bullet."""
render_options = options.update(width=options.max_width - 3)
lines = console.render_lines(self.elements, render_options, style=self.style)
bullet_style = console.get_style("none")
bullet = segment.Segment(" • ", bullet_style)
padding = segment.Segment(" " * 3, bullet_style)
new_line = segment.Segment("\n")
for first, line in _loop.loop_first(lines):
yield bullet if first else padding
yield from line
yield new_line
def render_number(
self, console: Console, options: ConsoleOptions, number: int, last_number: int
) -> RenderResult:
"""Render a markdown number."""
number_width = len(str(last_number)) + 2
render_options = options.update(width=options.max_width - number_width)
lines = console.render_lines(self.elements, render_options, style=self.style)
number_style = console.get_style("none")
new_line = segment.Segment("\n")
padding = segment.Segment(" " * number_width, number_style)
numeral = segment.Segment(
f"{number}.".rjust(number_width - 1) + " ", number_style
)
for first, line in _loop.loop_first(lines):
yield numeral if first else padding
yield from line
yield new_line
def _get_url_content(url: str) -> Union[BytesIO, None]:
"""Return content from URL."""
try:
response = httpx.get(url)
except httpx.RequestError:
content = None
else:
try:
content = io.BytesIO(response.content)
except TypeError:
content = None
return content
class CustomImageItem(markdown.ImageItem):
"""Renders a placeholder for an image."""
nerd_font: bool = False
unicode: bool = True
images: bool = True
image_drawing: ImageDrawing = "block"
color: bool = True
characters: Optional[str] = None
files: bool = True
hide_hyperlink_hints: bool = False
negative_space: bool = True
relative_dir: Path = dataclasses.field(default_factory=pathlib.Path)
def __init__(self, destination: str, hyperlinks: bool) -> None:
"""Constructor."""
content: Union[None, Path, BytesIO]
self.image_data: Union[None, bytes]
self.destination = destination
if not validators.url(self.destination):
# destination comes in a url quoted format, which will turn
# Windows-like paths into %5c, unquote here to that pathlib
# understands correctly
if (
destination_path := pathlib.Path(parse.unquote(self.destination))
).is_absolute():
self.path = destination_path
else:
self.path = self.relative_dir / destination_path
self.path = self.path.resolve()
self.destination = os.fsdecode(self.path)
content = self.path
self.is_url = False
else:
self.is_url = True
self.path = pathlib.Path(yarl.URL(self.destination).path)
content = _get_url_content(self.destination)
self.extension = self.path.suffix.lstrip(".")
if content is not None and (self.images or (self.is_url and self.files)):
try:
with Image.open(content) as image:
with io.BytesIO() as output:
try:
format = Image.EXTENSION[f".{self.extension}"]
except KeyError:
self.image_data = None
else:
image.save(output, format=format)
self.image_data = output.getvalue()
except (FileNotFoundError, PIL.UnidentifiedImageError):
self.image_data = None
else:
self.image_data = None
super().__init__(destination=self.destination, hyperlinks=hyperlinks)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render the image."""
title = self.text.plain or self.destination
if self.is_url:
rendered_link = link.Link(
path=self.destination,
nerd_font=self.nerd_font,
unicode=self.unicode,
subject=title,
emoji_name="globe_with_meridians",
nerd_font_icon="爵",
hyperlinks=self.hyperlinks,
hide_hyperlink_hints=self.hide_hyperlink_hints,
)
else:
rendered_link = link.Link(
path=f"file://{self.destination}",
nerd_font=self.nerd_font,
unicode=self.unicode,
subject=title,
emoji_name="framed_picture",
nerd_font_icon="",
hyperlinks=self.hyperlinks,
hide_hyperlink_hints=self.hide_hyperlink_hints,
)
yield rendered_link
if self.images:
fallback_title = self.destination.strip("/").rsplit("/", 1)[-1]
rendered_drawing = drawing.choose_drawing(
image=self.image_data,
fallback_text=self.text.plain or fallback_title,
image_type=f"image/{self.extension}",
image_drawing=self.image_drawing,
color=self.color,
negative_space=self.negative_space,
characters=self.characters,
)
if rendered_drawing is not None:
yield text.Text("")
yield rendered_drawing
class MarkdownOverwrite(markdown.Markdown):
"""A custom markdown renderer."""
def __init__(
self,
markup: str,
code_theme: str = "monokai",
justify: Optional[JustifyMethod] = None,
style: Union[str, Style] = "none",
hyperlinks: bool = True,
inline_code_lexer: Optional[str] = None,
inline_code_theme: str = "dark",
nerd_font: bool = False,
unicode: bool = True,
images: bool = True,
image_drawing: ImageDrawing = "block",
color: bool = True,
negative_space: bool = True,
characters: Optional[str] = None,
files: bool = True,
hide_hyperlink_hints: bool = False,
relative_dir: Optional[Path] = None,
) -> None:
"""Constructor."""
relative_dir = relative_dir if relative_dir is not None else pathlib.Path()
self.elements["code_block"] = CustomCodeBlock
self.elements["heading"] = CustomHeading
self.elements["block_quote"] = CustomBlockQuote
self.elements["thematic_break"] = CustomHorizontalRule
self.elements["item"] = CustomListItem
self.elements["image"] = CustomImageItem
CustomImageItem.nerd_font = nerd_font
CustomImageItem.images = images
CustomImageItem.unicode = unicode
CustomImageItem.image_drawing = image_drawing
CustomImageItem.color = color
CustomImageItem.negative_space = negative_space
CustomImageItem.characters = characters
CustomImageItem.files = files
CustomImageItem.hide_hyperlink_hints = hide_hyperlink_hints
CustomImageItem.relative_dir = relative_dir
super().__init__(
markup=markup,
code_theme=code_theme,
justify=justify,
style=style,
hyperlinks=hyperlinks,
inline_code_lexer=inline_code_lexer,
inline_code_theme=inline_code_theme,
)
@dataclasses.dataclass
class CustomMarkdown:
"""A custom markdown renderer with table support."""
source: str
theme: str
relative_dir: Path
hyperlinks: bool = True
nerd_font: bool = False
unicode: bool = True
images: bool = True
image_drawing: ImageDrawing = "block"
color: bool = True
negative_space: bool = True
characters: Optional[str] = None
files: bool = True
hide_hyperlink_hints: bool = False
def __post_init__(self) -> None:
"""Constructor."""
table_sections = markdown_extensions.parse_markdown_extensions(
self.source, unicode=self.unicode
)
self.renderables = [
renderable
for renderable in _splice_tables(
self.source,
table_sections=table_sections,
theme=self.theme,
hyperlinks=self.hyperlinks,
nerd_font=self.nerd_font,
unicode=self.unicode,
images=self.images,
image_drawing=self.image_drawing,
color=self.color,
negative_space=self.negative_space,
characters=self.characters,
files=self.files,
hide_hyperlink_hints=self.hide_hyperlink_hints,
relative_dir=self.relative_dir,
)
]
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render the markdown."""
yield from self.renderables
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
"""Define the dimensions of the rendered markdown."""
measurement = measure.measure_renderables(
console=console, options=options, renderables=self.renderables
)
return measurement
def _splice_tables(
markup: str,
table_sections: Iterable[MarkdownExtensionSection],
theme: str,
hyperlinks: bool,
nerd_font: bool,
unicode: bool,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> Iterator[Union[MarkdownOverwrite, RenderableType, Text]]:
"""Mix in tables with traditional markdown parser."""
markup_lines = markup.splitlines()
last_end_point = 0
for table_section in table_sections:
non_table_section = "\n".join(
markup_lines[last_end_point : table_section.start_line]
)
yield MarkdownOverwrite(
non_table_section,
code_theme=theme,
hyperlinks=hyperlinks,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
characters=characters,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
relative_dir=relative_dir,
)
yield text.Text()
yield table_section.renderable
yield text.Text()
last_end_point = table_section.end_line + 1
end_section = "\n".join(markup_lines[last_end_point:])
yield MarkdownOverwrite(
end_section,
code_theme=theme,
hyperlinks=hyperlinks,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
characters=characters,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
relative_dir=relative_dir,
)
|
paw-lu/nbpreview | src/nbpreview/component/content/output/result/display_data.py | <filename>src/nbpreview/component/content/output/result/display_data.py
"""Notebook display data and execute result."""
import collections
import dataclasses
import enum
import json
from pathlib import Path
from typing import ClassVar, Dict, Iterator, List, Optional, Union
import html2text
from lxml import html
from lxml.html import HtmlElement
from rich import measure, syntax, text
from rich.console import Console, ConsoleOptions, ConsoleRenderable
from rich.emoji import Emoji
from rich.measure import Measurement
from rich.syntax import Syntax
from rich.table import Table
from rich.text import Text
from nbpreview.component import markdown
from nbpreview.component.content.output.result import drawing, latex, link, table
from nbpreview.component.content.output.result.drawing import Drawing, ImageDrawing
from nbpreview.component.markdown import CustomMarkdown
from nbpreview.data import Data
@dataclasses.dataclass
class DisplayData:
"""A notebook's display data."""
content: str
data_type: ClassVar[str]
def __rich__(self) -> Union[ConsoleRenderable, str]:
"""Render the display."""
return self.content
@dataclasses.dataclass
class PlainDisplay(DisplayData):
"""Notebook plain display data."""
data_type: ClassVar[str] = "text/plain"
@classmethod
def from_data(cls, data: Data) -> "PlainDisplay":
"""Create a plain display data from notebook data."""
content = data[cls.data_type]
return cls(content)
@dataclasses.dataclass
class HTMLDisplay(DisplayData):
"""Notebook HTML display data."""
theme: str
nerd_font: bool
unicode: bool
images: bool
image_drawing: ImageDrawing
color: bool
negative_space: bool
hyperlinks: bool
files: bool
hide_hyperlink_hints: bool
relative_dir: Path
characters: Optional[str] = None
data_type: ClassVar[str] = "text/html"
@classmethod
def from_data(
cls,
data: Data,
theme: str,
nerd_font: bool,
unicode: bool,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
hyperlinks: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> "HTMLDisplay":
"""Create an HTML display data from notebook data."""
content = data[cls.data_type]
return cls(
content,
theme=theme,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
def __rich__(self) -> CustomMarkdown:
"""Render the HTML display data."""
converted_markdown = html2text.html2text(self.content)
rendered_html = markdown.CustomMarkdown(
converted_markdown,
theme=self.theme,
unicode=self.unicode,
images=self.images,
image_drawing=self.image_drawing,
color=self.color,
negative_space=self.negative_space,
hyperlinks=self.hyperlinks,
files=self.files,
hide_hyperlink_hints=self.hide_hyperlink_hints,
characters=self.characters,
relative_dir=self.relative_dir,
)
return rendered_html
def _render_table_element(column: HtmlElement, column_width: int) -> List[Text]:
"""Render a DataFrame table element.
Args:
column (HtmlElement): The HTML element to render.
column_width (int): The width of the column to render.
Returns:
List[Text]: The rendered DataFrame element.
"""
attributes = column.attrib
column_width = int(attributes.get("colspan", 1))
header = column.tag == "th"
column_string = column.text.strip() if column.text is not None else ""
element_text = table.create_table_element(column_string, header=header)
table_element = (column_width - 1) * [text.Text("")] + [element_text]
return table_element
@dataclasses.dataclass
class HTMLDataFrameRender:
"""Rich counterpart of HTML table."""
unicode: bool
def __post_init__(self) -> None:
"""Constructor."""
self.table = table.create_table(unicode=self.unicode)
def add_headers(self, column_rows: List[HtmlElement]) -> None:
"""Add headers to table."""
n_column_rows = len(column_rows)
for i, column_row in enumerate(column_rows):
table_row = []
for column in column_row.xpath("th|td"):
attributes = column.attrib
column_width = int(attributes.get("colspan", 1))
if i == 0:
for _ in range(column_width):
self.table.add_column(justify="right")
table_element = _render_table_element(column, column_width=column_width)
table_row.extend(table_element)
end_section = i == n_column_rows - 1
self.table.add_row(*table_row, end_section=end_section)
def add_data(self, data_rows: List[HtmlElement]) -> None:
"""Add data rows to table."""
previous_row_spans: Dict[int, int] = {}
for row in data_rows:
table_row = []
current_row_spans: Dict[int, int] = collections.defaultdict(int)
for i, column in enumerate(row.xpath("th|td")):
attributes = column.attrib
column_width = int(attributes.get("colspan", 1))
row_span = int(attributes.get("rowspan", 1))
table_element = _render_table_element(column, column_width=column_width)
table_row.extend(table_element)
if 1 < row_span:
current_row_spans[i] += row_span
for column, row_span in previous_row_spans.copy().items():
table_row.insert(column, text.Text(""))
remaining_span = row_span - 1
if 1 < remaining_span:
previous_row_spans[column] = remaining_span
else:
previous_row_spans.pop(column, None)
previous_row_spans = {
column: previous_row_spans.get(column, 0)
+ current_row_spans.get(column, 0)
for column in previous_row_spans.keys() | current_row_spans.keys()
}
self.table.add_row(*table_row)
if table.is_only_header(self.table):
# Divide won't show up unless there is content underneath
self.table.add_row("")
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterator[Table]:
"""Render the DataFrame table."""
yield self.table
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
"""Define the dimensions of the rendered DataFrame."""
measurement = measure.Measurement.get(
console=console, options=options, renderable=self.table
)
return measurement
def _render_dataframe(
dataframe_html: HtmlElement, unicode: bool
) -> HTMLDataFrameRender:
"""Render a DataFrame from its HTML.
Args:
dataframe_html (HtmlElement): The DataFrame rendered as HTML.
unicode (bool): Whether to use unicode characters when rendering
the table.
Returns:
HTMLDataFrameRender: The DataFrame rendered as a Rich Table.
"""
thead_element = dataframe_html.find("thead")
column_rows = thead_element.findall("tr") if thead_element is not None else []
rendered_html_dataframe = HTMLDataFrameRender(unicode=unicode)
rendered_html_dataframe.add_headers(column_rows)
tbody_element = dataframe_html.find("tbody")
data_rows = tbody_element.findall("tr") if tbody_element is not None else []
rendered_html_dataframe.add_data(data_rows)
return rendered_html_dataframe
class DataFrameDisplayType(enum.Enum):
"""The type of DataFrame HTML output."""
PLAIN = enum.auto()
STYLED = enum.auto()
@dataclasses.dataclass
class DataFrameDisplay(DisplayData):
"""Notebook DataFrame display data."""
unicode: bool
styled: bool = False
data_type: ClassVar[str] = "text/html"
@staticmethod
def dataframe_display_type(content: str) -> Union[DataFrameDisplayType, None]:
"""Determine the type of DataFrame output."""
html_element = html.fromstring(content)
table_html = html_element.find_class("dataframe")
if table_html and table_html[0].tag == "table":
return DataFrameDisplayType.PLAIN
# Basic check for styled dataframe
try:
style_element, *non_style_elements = html_element.head.iterchildren()
*non_table_elements, table_element = html_element.body.iterchildren()
except (ValueError, IndexError):
pass
else:
if (
len(non_style_elements) == 0
and style_element.tag == "style"
and style_element.attrib == {"type": "text/css"}
and len(non_table_elements) <= 1
and table_element.tag == "table"
):
return DataFrameDisplayType.STYLED
return None
@classmethod
def from_data(cls, data: Data, unicode: bool, styled: bool) -> "DataFrameDisplay":
"""Create DataFrame display data from notebook data."""
content = data[cls.data_type]
return cls(content, unicode=unicode, styled=styled)
def __rich__(self) -> HTMLDataFrameRender:
"""Render the DataFrame display data."""
if self.styled:
dataframe_html, *_ = html.fromstring(self.content).xpath("//body/table")
else:
dataframe_html, *_ = html.fromstring(self.content).find_class("dataframe")
rendered_dataframe = _render_dataframe(dataframe_html, unicode=self.unicode)
return rendered_dataframe
@dataclasses.dataclass
class MarkdownDisplay(DisplayData):
"""Notebook Markdown display data."""
theme: str
nerd_font: bool
unicode: bool
images: bool
image_drawing: ImageDrawing
color: bool
negative_space: bool
hyperlinks: bool
files: bool
hide_hyperlink_hints: bool
relative_dir: Path
characters: Optional[str] = None
data_type: ClassVar[str] = "text/markdown"
@classmethod
def from_data(
cls,
data: Data,
theme: str,
nerd_font: bool,
unicode: bool,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
hyperlinks: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> "MarkdownDisplay":
"""Create Markdown display data from notebook data."""
content = data[cls.data_type]
return cls(
content,
theme=theme,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
def __rich__(self) -> CustomMarkdown:
"""Render the Markdown display data."""
rendered_markdown = markdown.CustomMarkdown(
self.content,
theme=self.theme,
unicode=self.unicode,
images=self.images,
image_drawing=self.image_drawing,
color=self.color,
negative_space=self.negative_space,
hyperlinks=self.hyperlinks,
files=self.files,
hide_hyperlink_hints=self.hide_hyperlink_hints,
characters=self.characters,
relative_dir=self.relative_dir,
)
return rendered_markdown
@dataclasses.dataclass
class LaTeXDisplay(DisplayData):
"""Notebook LaTeX display data."""
data_type: ClassVar[str] = "text/latex"
@classmethod
def from_data(cls, data: Data) -> "LaTeXDisplay":
"""Create LaTeX display data from notebook data."""
content = data[cls.data_type]
return cls(content)
def __rich__(self) -> str:
"""Render the LaTeX display data."""
rendered_latex: str = latex.render_latex(self.content)
return rendered_latex
@dataclasses.dataclass
class JSONDisplay(DisplayData):
"""Notebook JSON display data."""
theme: str
data_type: ClassVar[str] = "application/json"
@classmethod
def from_data(cls, data: Data, theme: str) -> "JSONDisplay":
"""Create JSON display data from notebook data."""
content = json.dumps(data[cls.data_type])
return cls(content, theme=theme)
def __rich__(self) -> Syntax:
"""Render the JSON display data."""
rendered_json = syntax.Syntax(
self.content,
lexer_name="json",
theme=self.theme,
background_color="default",
)
return rendered_json
@dataclasses.dataclass
class PDFDisplay(DisplayData):
"""Notebook PDF display data."""
nerd_font: bool
unicode: bool
data_type: ClassVar[str] = "application/pdf"
def __rich__(self) -> Union[str, Emoji]:
"""Render the PDF display data."""
rendered_pdf = link.select_icon(
"",
emoji_name="page_facing_up",
nerd_font=self.nerd_font,
unicode=self.unicode,
)
return rendered_pdf
@classmethod
def from_data(
cls,
data: Data,
nerd_font: bool,
unicode: bool,
) -> "PDFDisplay":
"""Create PDF display data from notebook data."""
content = data[cls.data_type]
return cls(content, nerd_font=nerd_font, unicode=unicode)
def _render_html(
data: Data,
theme: str,
nerd_font: bool,
unicode: bool,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
hyperlinks: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> Union[DataFrameDisplay, HTMLDisplay]:
"""Render HTML output."""
display_data: Union[DataFrameDisplay, HTMLDisplay]
html_data = data["text/html"]
dataframe_display_type = DataFrameDisplay.dataframe_display_type(html_data)
styled = dataframe_display_type == DataFrameDisplayType.STYLED
if dataframe_display_type is not None:
display_data = DataFrameDisplay.from_data(data, unicode=unicode, styled=styled)
else:
display_data = HTMLDisplay.from_data(
data,
theme=theme,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
return display_data
def _choose_basic_renderer(
data: Data,
unicode: bool,
nerd_font: bool,
theme: str,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
hyperlinks: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> Union[MarkdownDisplay, LaTeXDisplay, JSONDisplay, PDFDisplay, PlainDisplay, None]:
"""Render straightforward text data."""
display_data: DisplayData
if "text/markdown" in data:
display_data = MarkdownDisplay.from_data(
data,
theme=theme,
nerd_font=nerd_font,
unicode=unicode,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
return display_data
elif unicode and "text/latex" in data:
display_data = LaTeXDisplay.from_data(data)
return display_data
elif "application/json" in data:
display_data = JSONDisplay.from_data(data, theme=theme)
return display_data
elif (unicode or nerd_font) and "application/pdf" in data:
display_data = PDFDisplay.from_data(data, nerd_font=nerd_font, unicode=unicode)
return display_data
elif "text/plain" in data:
display_data = PlainDisplay.from_data(data)
return display_data
else:
return None
def render_display_data(
data: Data,
unicode: bool,
plain: bool,
nerd_font: bool,
theme: str,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
hyperlinks: bool,
files: bool,
hide_hyperlink_hints: bool,
relative_dir: Path,
characters: Optional[str] = None,
) -> Union[DisplayData, None, Drawing]:
"""Render the notebook display data."""
display_data: Union[DisplayData, None, Drawing]
if images:
image_types = (
"image/bmp",
"image/gif",
"image/jpeg",
"image/png",
"image/svg+xml",
)
for image_type in image_types:
if image_type in data:
display_data = drawing.render_drawing(
data=data,
image_drawing=image_drawing,
image_type=image_type,
color=color,
negative_space=negative_space,
)
if display_data is not None:
return display_data
if not plain and "text/html" in data:
display_data = _render_html(
data,
unicode=unicode,
theme=theme,
nerd_font=nerd_font,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
return display_data
else:
display_data = _choose_basic_renderer(
data,
unicode=unicode,
nerd_font=nerd_font,
theme=theme,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
characters=characters,
relative_dir=relative_dir,
)
return display_data
|
timothymayor/Plate-Number-Classification | mymltask1.py | <reponame>timothymayor/Plate-Number-Classification<gh_stars>0
# -*- coding: utf-8 -*-
"""myMLTask1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kq28DL9TVVTefBqzzNk61Qg1iLK_N96W
#Task
1. You will build a basic classifier that detects whether an image is a plate number or not a plate number(Minimum of two machine learning algorithms should be used for this classifier)
2. Write a report of 100-200 words detailing how you achieved this task
"""
# Commented out IPython magic to ensure Python compatibility.
#from google.colab import drive
#drive.mount('/content/drive/')
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
!ls "/gdrive/My Drive/HNGInternship6.0/2. Machine Learning/Plate Numbers Classification/"
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import random
import os, cv2, itertools
import matplotlib.pyplot as plt
# %matplotlib inline
plate_number_dir = '/gdrive/My Drive/HNGInternship6.0/2. Machine Learning/Plate Numbers Classification/plate_number/'
negative_images_dir = '/gdrive/My Drive/HNGInternship6.0/2. Machine Learning/Plate Numbers Classification/negative_images/'
ROWS = 64
COLS = 64
CHANNELS = 3
plate_numbers_img = [plate_number_dir+i for i in os.listdir(plate_number_dir)]
negative_images_img = [negative_images_dir+i for i in os.listdir(negative_images_dir)]
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR)
resized_img = cv2.resize(img, (ROWS, COLS), interpolation = cv2.INTER_AREA)
return resized_img
def prep_data(images):
m = len(images)
n_x = ROWS*COLS*CHANNELS
X = np.ndarray((n_x,m), dtype = np.uint8)
y = np.zeros((1,m))
print("X.shape is {}".format(X.shape))
for i,image_file in enumerate(images):
image = read_image(image_file)
print(i, 'done')
X[:,i] = np.squeeze(image.reshape((n_x,1)))
if '-' in image_file.lower():
y[0,i] = 1
elif 'glass' in image_file.lower():
y[0,i] = 0
if i%100 == 0 :
print("Proceed {} of {}".format(i, m))
return X,y
plate_img, negative_img = prep_data(plate_numbers_img + negative_images_img)
classes = {0: 'Negative_Image',
1: 'Plate_Number'}
def show_images(X, y, idx) :
image = X[idx]
image = image.reshape((ROWS, COLS, CHANNELS))
plt.figure(figsize=(4,2))
plt.imshow(image),
plt.title(classes[y[idx,0]])
plt.show()
show_images(plate_img.T, negative_img.T, 0)
from sklearn.linear_model import LogisticRegressionCV
clf = LogisticRegressionCV()
plate_img_lr, neg_img_lr = plate_img.T, negative_img.T.ravel()
clf.fit(plate_img_lr, neg_img_lr)
print("Model accuracy: {:.2f}%".format(clf.score(plate_img_lr, neg_img_lr)*100))
def show_image_prediction(X, idx, model) :
image = X[idx].reshape(1,-1)
image_class = classes[model.predict(image).item()]
image = image.reshape((ROWS, COLS, CHANNELS))
plt.figure(figsize = (4,2))
plt.imshow(image)
plt.title("Test {} : I think this is {}".format(idx, image_class))
plt.show()
plate_img_lr, neg_img_lr = plate_img.T, negative_img.T
for i in np.random.randint(0, len(plate_img_lr), 5) :
show_image_prediction(plate_img_lr, i, clf)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(plate_img_lr, neg_img_lr)
print("Model accuracy: {:.2f}%".format(knn.score(plate_img_lr, neg_img_lr)*100))
plate_img_lr, neg_img_lr = plate_img.T, negative_img.T
for i in np.random.randint(0, len(plate_img_lr), 5) :
show_image_prediction(plate_img_lr, i, knn)
from sklearn.neighbors import RadiusNeighborsClassifier
rnc = RadiusNeighborsClassifier()
rnc.fit(plate_img_lr, neg_img_lr)
print("Model accuracy: {:.2f}%".format(rnc.score(plate_img_lr, neg_img_lr)*100))
plate_img_lr, neg_img_lr = plate_img.T, negative_img.T
for i in np.random.randint(0, len(plate_img_lr), 5):
show_image_prediction(plate_img_lr, i, rnc) |
SylvainCorlay/compiler-explorer | etc/scripts/cpyrighter.py | <reponame>SylvainCorlay/compiler-explorer<filename>etc/scripts/cpyrighter.py<gh_stars>0
# -*- coding: utf-8 -*-
from os import listdir, path, chdir, walk
from re import compile, match, sub, error
from datetime import datetime
from itertools import chain
if __name__ == '__main__':
def_name = '<NAME>'
# Has to be called from the local folder. This should be improved
chdir('../../')
# ?1: Comment slash; 2: Copyright mark; ?3: Starting year; 4: Current year; ?5: Comma separator; ?6: Names
license_re = compile(r'(// )?(Copyright \(c\) )(2012-)?(\d*)(,)?( ?.*)')
year = datetime.utcnow().year
non_recursive_searched_paths = ['static/', 'test/', 'test/handlers/', 'test/compilers/']
recursive_searched_paths = ['lib/']
ignored_files = set(['static/ansi-to-html.js', 'static/gccdump-view.js', 'static/gccdump-rtl-gimple-mode.js'])
found_paths = ['./app.js', './LICENSE']
for root in non_recursive_searched_paths:
found_paths.extend(path.join(root, file_name) for file_name in listdir(root) if file_name.endswith('.js'))
for root, _, files in chain.from_iterable(walk(file_path) for file_path in recursive_searched_paths):
found_paths.extend(path.join(root, file_name) for file_name in files if file_name.endswith('.js'))
found_paths = [file for file in found_paths if file not in ignored_files]
change_count = 0
for path in found_paths:
try:
file_lines = []
with open(path, 'r') as f:
for line in f.readlines():
res = match(license_re, line)
subbed_line = line
try:
if res:
sub_re = r'{}Copyright (c) 2012-{}, {}'.format(res.group(1) or '', year, res.group(6).strip() if res.group(6) else def_name)
subbed_line = sub(license_re, sub_re, line)
change_count += 1
except error as e:
print 'Regex exception "{}" raised in {}\n'.format(e, path)
finally:
file_lines.append(subbed_line)
with open(path, 'w+') as f:
for line in file_lines:
f.write(line)
except OSError as os_error:
print 'OS error: {}'.format(os_error)
print 'Validated {} files out of {}'.format(change_count, len(found_paths))
|
mgoddard-pivotal/retail-data-service | scripts/data_migrator.py | <gh_stars>1-10
#!/usr/bin/env python
import sys
import json
import requests
MAX_TWEETS_PER_USER = 10000
if len(sys.argv) != 3:
print "Usage: %s source_URL dest_URL" % sys.argv[0]
sys.exit(1)
src = sys.argv[1]
dest = sys.argv[2]
# Fetch the list of {"user": maxTweetId}, from which we only need the user portion
def getMaxTweetIds():
rv = []
r = requests.get(src + "/tweetInfo/maxTweetIds")
if (r.status_code == 200):
rv = r.json()
return rv
# Store the JSON via the persistence service
def storeJson(jsonStr):
headers = { "Content-Type": "application/json" }
r = requests.put(dest + "/retailEvent/event", headers=headers, data=jsonStr)
return r.status_code == 200
# Store maxTweetId for screenName
def setMaxTweetId(screenName, maxTweetId):
headers = {"Content-Type": "application/json"}
r = requests.put(dest + "/tweetInfo/" + screenName, headers=headers, data=str(maxTweetId))
if r.status_code != 200:
raise Exception("Failed to set maxTweetId for user %s" % screenName)
# Start the migration
for resp in getMaxTweetIds():
screenName = (resp.keys())[0]
maxTweetId = resp[screenName]
print "Screen name: %s, maxTweetId: %s" % (screenName, str(maxTweetId))
# MaxTweetID
setMaxTweetId(screenName, maxTweetId)
# RetailEvent
r = requests.get(src + "/retailEvent/" + screenName + "/" + str(MAX_TWEETS_PER_USER))
if r.status_code != 200:
raise Exception("Failed to get tweets for user %s" % screenName)
tweetList = r.json()
for tweet in tweetList:
storeJson(json.dumps(tweet))
|
Rq0/django-report-builder | report_builder/views.py | <reponame>Rq0/django-report-builder
import copy
import json
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import get_user_model
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView, View
from six import string_types
from .utils import duplicate
from .models import Report
from .mixins import DataExportMixin
User = get_user_model()
class ReportSPAView(TemplateView):
template_name = "report_builder/spa.html"
def get_context_data(self, **kwargs):
context = super(ReportSPAView, self).get_context_data(**kwargs)
context['ASYNC_REPORT'] = getattr(
settings, 'REPORT_BUILDER_ASYNC_REPORT', False
)
return context
def fieldset_string_to_field(fieldset_dict, model):
if isinstance(fieldset_dict['fields'], tuple):
fieldset_dict['fields'] = list(fieldset_dict['fields'])
i = 0
for dict_field in fieldset_dict['fields']:
if isinstance(dict_field, string_types):
fieldset_dict['fields'][i] = model._meta.get_field_by_name(
dict_field)[0]
elif isinstance(dict_field, list) or isinstance(dict_field, tuple):
dict_field[1]['recursive'] = True
fieldset_string_to_field(dict_field[1], model)
i += 1
def get_fieldsets(model):
""" fieldsets are optional, they are defined in the Model.
"""
fieldsets = getattr(model, 'report_builder_fieldsets', None)
if fieldsets:
for fieldset_name, fieldset_dict in model.report_builder_fieldsets:
fieldset_string_to_field(fieldset_dict, model)
return fieldsets
class DownloadFileView(DataExportMixin, View):
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
return super(DownloadFileView, self).dispatch(*args, **kwargs)
def process_report(self, report_id, user_id,
file_type, to_response, queryset=None):
report = get_object_or_404(Report, pk=report_id)
user = User.objects.get(pk=user_id)
if to_response:
return report.run_report(file_type, user, queryset)
else:
report.run_report(file_type, user, queryset, asynchronous=True)
def get(self, request, *args, **kwargs):
report_id = kwargs['pk']
file_type = kwargs.get('filetype')
if getattr(settings, 'REPORT_BUILDER_ASYNC_REPORT', False):
from .tasks import report_builder_file_async_report_save
report_task = report_builder_file_async_report_save.delay(
report_id, request.user.pk, file_type)
task_id = report_task.task_id
return HttpResponse(
json.dumps({'task_id': task_id}),
content_type="application/json")
else:
return self.process_report(
report_id, request.user.pk, file_type, to_response=True)
@staff_member_required
def ajax_add_star(request, pk):
""" Star or unstar report for user
"""
report = get_object_or_404(Report, pk=pk)
user = request.user
if user in report.starred.all():
added = False
report.starred.remove(request.user)
else:
added = True
report.starred.add(request.user)
return HttpResponse(added)
@staff_member_required
def create_copy(request, pk):
""" Copy a report including related fields """
report = get_object_or_404(Report, pk=pk)
new_report = duplicate(report, changes=(
('name', '{0} (copy)'.format(report.name)),
('user_created', request.user),
('user_modified', request.user),
))
# duplicate does not get related
for display in report.displayfield_set.all():
new_display = copy.copy(display)
new_display.pk = None
new_display.report = new_report
new_display.save()
for report_filter in report.filterfield_set.all():
new_filter = copy.copy(report_filter)
new_filter.pk = None
new_filter.report = new_report
new_filter.save()
return redirect(new_report)
class ExportToReport(DownloadFileView, TemplateView):
""" Export objects (by ID and content type) to an existing or new report
In effect this runs the report with it's display fields. It ignores
filters and filters instead the provided ID's. It can be select
as a global admin action.
"""
template_name = "report_builder/export_to_report.html"
def get_context_data(self, **kwargs):
ctx = super(ExportToReport, self).get_context_data(**kwargs)
ctx['admin_url'] = self.request.GET.get('admin_url', '/')
ct = ContentType.objects.get_for_id(self.request.GET['ct'])
ids = self.request.GET['ids'].split(',')
ctx['ids'] = ",".join(map(str, ids))
ctx['ct'] = ct.id
ctx['number_objects'] = len(ids)
ctx['object_list'] = Report.objects.filter(
root_model=ct).order_by('-modified')
ctx['mode'] = ct.model_class()._meta.verbose_name
return ctx
def get(self, request, *args, **kwargs):
if 'download' in request.GET:
ct = ContentType.objects.get_for_id(request.GET['ct'])
ids = self.request.GET['ids'].split(',')
report = get_object_or_404(Report, pk=request.GET['download'])
queryset = ct.model_class().objects.filter(pk__in=ids)
return self.process_report(
report.id, request.user.pk,
to_response=True,
queryset=queryset,
file_type="xlsx",
)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
@staff_member_required
def check_status(request, pk, task_id):
""" Check if the asyncronous report is ready to download """
from celery.result import AsyncResult
res = AsyncResult(task_id)
link = ''
if res.state == 'SUCCESS':
report = get_object_or_404(Report, pk=pk)
link = report.report_file.url
return HttpResponse(
json.dumps({
'state': res.state,
'link': link,
'email': getattr(
settings,
'REPORT_BUILDER_EMAIL_NOTIFICATION',
False
)
}),
content_type="application/json")
|
vitkl/sandbox | 2019-08-time-series/bart/preprocess.py | <filename>2019-08-time-series/bart/preprocess.py
import argparse
import csv
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import urllib
import torch
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA = os.path.join(ROOT, "data")
# https://www.bart.gov/about/reports/ridership
SOURCE_DIR = "http://64.111.127.166/origin-destination/"
SOURCE_FILES = [
"date-hour-soo-dest-2011.csv.gz",
"date-hour-soo-dest-2012.csv.gz",
"date-hour-soo-dest-2013.csv.gz",
"date-hour-soo-dest-2014.csv.gz",
"date-hour-soo-dest-2015.csv.gz",
"date-hour-soo-dest-2016.csv.gz",
"date-hour-soo-dest-2017.csv.gz",
"date-hour-soo-dest-2018.csv.gz",
]
def mkdir_p(path):
if not os.path.exists(path):
os.makedirs(path)
def _load_hourly_od(args_basename):
args, basename = args_basename
filename = os.path.join(DATA, basename.replace(".csv.gz", ".pkl"))
if os.path.exists(filename):
return torch.load(filename)
# Download source files.
mkdir_p(DATA)
gz_filename = os.path.join(DATA, basename)
if not os.path.exists(gz_filename):
url = SOURCE_DIR + basename
logging.debug("downloading {}".format(url))
urllib.request.urlretrieve(url, gz_filename)
csv_filename = gz_filename[:-3]
assert csv_filename.endswith(".csv")
if not os.path.exists(csv_filename):
logging.debug("unzipping {}".format(gz_filename))
subprocess.check_call(["gunzip", "-k", gz_filename])
assert os.path.exists(csv_filename)
# Convert to PyTorch.
logging.debug("converting {}".format(csv_filename))
start_date = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
stations = {}
num_rows = sum(1 for _ in open(csv_filename))
logging.info("Formatting {} rows".format(num_rows))
rows = torch.empty((num_rows, 4), dtype=torch.long)
with open(csv_filename) as f:
for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):
date = datetime.datetime.strptime(date, "%Y-%m-%d")
date += datetime.timedelta(hours=int(hour))
rows[i, 0] = int((date - start_date).total_seconds() / 3600)
rows[i, 1] = stations.setdefault(origin, len(stations))
rows[i, 2] = stations.setdefault(destin, len(stations))
rows[i, 3] = int(trip_count)
if i % 10000 == 0:
sys.stderr.write(".")
sys.stderr.flush()
# Save data with metadata.
dataset = {
"args": args,
"basename": basename,
"start_date": start_date,
"stations": stations,
"rows": rows,
"schema": ["time_hours", "origin", "destin", "trip_count"],
}
logging.debug("saving {}".format(filename))
torch.save(dataset, filename)
return dataset
def load_hourly_od(args=None):
filename = os.path.join(DATA, "full-counts.pkl")
if os.path.exists(filename):
return torch.load(filename)
datasets = multiprocessing.Pool().map(_load_hourly_od, [
(args, basename)
for basename in SOURCE_FILES
])
stations = sorted(set().union(*(d["stations"].keys() for d in datasets)))
min_time = min(int(d["rows"][:, 0].min()) for d in datasets)
max_time = max(int(d["rows"][:, 0].max()) for d in datasets)
num_rows = max_time - min_time + 1
start_date = datasets[0]["start_date"] + datetime.timedelta(hours=min_time),
logging.info("Loaded data from {} stations, {} hours"
.format(len(stations), num_rows))
result = torch.zeros(num_rows, len(stations), len(stations))
for dataset in datasets:
part_stations = sorted(dataset["stations"], key=dataset["stations"].__getitem__)
part_to_whole = torch.tensor(list(map(stations.index, part_stations)))
time = dataset["rows"][:, 0] - min_time
origin = part_to_whole[dataset["rows"][:, 1]]
destin = part_to_whole[dataset["rows"][:, 2]]
count = dataset["rows"][:, 3].float()
result[time, origin, destin] = count
dataset.clear()
logging.info("Loaded {} shaped data of mean {:0.3g}"
.format(result.shape, result.mean()))
dataset = {
"args": args,
"stations": stations,
"start_date": start_date,
"counts": result,
}
torch.save(dataset, filename)
return dataset
def main(args):
load_hourly_od(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BART data preprocessor")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
logging.basicConfig(format='%(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
main(args)
|
vitkl/sandbox | 2019-02-censored/modeling_censored_time_to_event_data_with_pyro.py | <filename>2019-02-censored/modeling_censored_time_to_event_data_with_pyro.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Modeling Censored Time to Event Data with Pyro
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1AWwBZ5S1hLEht2oSe1BiyDGHPLtqwt-N
"""
# we install the CPU-only version of torch here, since the download is fast
!pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-cp36m-linux_x86_64.whl
!pip install pyro-ppl
import pyro
import torch
import pyro.distributions as dist
import numpy as np
import seaborn as sns
from pyro import infer, optim
from pyro.infer.mcmc import HMC, MCMC
from pyro.infer import EmpiricalMarginal
assert pyro.__version__.startswith('0.3')
"""Let's first generate random samples:"""
n = 500
a = 2
b = 4
c = 8
x = dist.Normal(0, 0.34).sample((n,))
link = torch.nn.functional.softplus(torch.tensor(a*x + b))
y = dist.Exponential(rate=1 / link).sample() # note param is rate, not mean
truncation_label = (y > c).float()
y_obs = y.clamp(max=c)
sns.regplot(x.numpy(), y.numpy())
sns.regplot(x.numpy(), y_obs.numpy())
"""# Modeling using HMC
Here we will start with a basic model that uses HMC to conduct inference. Here the model is specified in such a way that each individual sample will be looped through sequentially. As a result this can be very slow. So we just let it run a few iterations for demonstration purpose.
In the next section, we'll introduct much faster way of specifying the model.
"""
def model(x, y, truncation_label):
a_model = pyro.sample("a_model", dist.Normal(0, 10))
b_model = pyro.sample("b_model", dist.Normal(0, 10))
link = torch.nn.functional.softplus(a_model * x + b_model)
for i in range(len(x)):
y_hidden_dist = dist.Exponential(1 / link[i])
if truncation_label[i] == 0:
y_real = pyro.sample("obs_{}".format(i),
y_hidden_dist,
obs = y[i])
else:
truncation_prob = 1 - y_hidden_dist.cdf(y[i])
pyro.sample("truncation_label_{}".format(i),
dist.Bernoulli(truncation_prob),
obs = truncation_label[i])
pyro.clear_param_store()
hmc_kernel = HMC(model,
step_size = 0.1,
num_steps = 4)
mcmc_run = MCMC(hmc_kernel,
num_samples=5,
warmup_steps=1).run(x, y, truncation_label)
marginal_a = EmpiricalMarginal(mcmc_run,
sites="a_model")
posterior_a = [marginal_a.sample() for i in range(50)]
sns.distplot(posterior_a)
"""# Modeling using HMC with Vectorized Data
Here we try to make the estimation faster using the `plate` and `mask` function.
"""
def model(x, y, truncation_label):
a_model = pyro.sample("a_model", dist.Normal(0, 10))
b_model = pyro.sample("b_model", dist.Normal(0, 10))
link = torch.nn.functional.softplus(a_model * x + b_model)
with pyro.plate("data"):
y_hidden_dist = dist.Exponential(1 / link)
with pyro.poutine.mask(mask = (truncation_label == 0)):
pyro.sample("obs", y_hidden_dist,
obs = y)
with pyro.poutine.mask(mask = (truncation_label == 1)):
truncation_prob = 1 - y_hidden_dist.cdf(y)
pyro.sample("truncation_label",
dist.Bernoulli(truncation_prob),
obs = torch.tensor(1.))
pyro.clear_param_store()
hmc_kernel = HMC(model,
step_size = 0.1,
num_steps = 4)
mcmc_run = MCMC(hmc_kernel,
num_samples=500,
warmup_steps=1000).run(x, y, truncation_label)
marginal_a = EmpiricalMarginal(mcmc_run,
sites="a_model")
posterior_a = [marginal_a.sample() for i in range(100)]
sns.distplot(posterior_a)
"""# Modeling with SVI
Here we make inference using Stochastic Variational Inference. However here we have to define a guide function.
"""
from pyro.contrib.autoguide import AutoMultivariateNormal
guide = AutoMultivariateNormal(model)
pyro.clear_param_store()
adam_params = {"lr": 0.01, "betas": (0.90, 0.999)}
optimizer = optim.Adam(adam_params)
svi = infer.SVI(model,
guide,
optimizer,
loss=infer.Trace_ELBO())
losses = []
for i in range(5000):
loss = svi.step(x, y_obs, truncation_label)
losses.append(loss)
if i % 1000 == 0:
print(', '.join(['{} = {}'.format(*kv) for kv in guide.median().items()]))
print('final result:')
for kv in sorted(guide.median().items()):
print('median {} = {}'.format(*kv))
"""Let's check that the model has converged by plotting losses"""
sns.plt.plot(losses);
"""We can plot approximate posterior distribution using the [guide.quantiles()](http://docs.pyro.ai/en/dev/contrib.autoguide.html#pyro.contrib.autoguide.AutoContinuous.quantiles) function:"""
N = 1000
for name, quantiles in guide.quantiles(torch.arange(0., N) / N).items():
quantiles = np.array(quantiles)
pdf = 1 / (quantiles[1:] - quantiles[:-1]) / N
x = (quantiles[1:] + quantiles[:-1]) / 2
sns.plt.plot(x, pdf, label=name)
sns.plt.legend()
sns.plt.ylabel('density')
|
vitkl/sandbox | 2021-03-softplus_scales/cell2location_model_numpyro.py | import numpy as np
import numpyro as pyro
import numpyro.distributions as dist
import numpyro.optim as optim
import jax.numpy as jnp
from jax import device_put
from jax import random, jit, lax
import jax
from numpyro.infer import SVI, Trace_ELBO
from numpyro.infer.autoguide import AutoNormal
from tqdm.auto import tqdm
from jax import hessian, lax, random, tree_map
from numpyro.infer import init_to_median
from functools import partial
def init_to_mean(site=None):
"""
Initialize to the prior mean; fallback to median if mean is undefined.
"""
if site is None:
return partial(init_to_mean)
try:
# Try .mean() method.
if site['type'] == 'sample' and not site['is_observed'] and not site['fn'].is_discrete:
value = site["fn"].mean
# if jnp.isnan(value):
# raise ValueError
if hasattr(site["fn"], "_validate_sample"):
site["fn"]._validate_sample(value)
return np.array(value)
except (NotImplementedError, ValueError):
# Fall back to a median.
# This is required for distributions with infinite variance, e.g. Cauchy.
return init_to_median(site)
class LocationModelLinearDependentWMultiExperimentModel():
def __init__(
self,
n_obs,
n_vars,
n_factors,
n_exper,
cell_state_mat,
batch_size=None,
n_groups: int = 50,
m_g_gene_level_prior={"mean": 1 / 2, "sd": 1 / 4},
m_g_gene_level_var_prior={"mean_var_ratio": 1.0},
cell_number_prior={
"N_cells_per_location": 8.0,
"A_factors_per_location": 7.0,
"Y_groups_per_location": 7.0,
},
cell_number_var_prior={
"N_cells_mean_var_ratio": 1.0,
},
alpha_g_phi_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_alpha_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_mean_hyp_prior={"alpha": 1.0, "beta": 100.0},
w_sf_mean_var_ratio=5.0,
):
super().__init__()
self.n_obs = n_obs
self.n_vars = n_vars
self.n_factors = n_factors
self.n_exper = n_exper
self.batch_size = batch_size
self.n_groups = n_groups
for k in m_g_gene_level_var_prior.keys():
m_g_gene_level_prior[k] = m_g_gene_level_var_prior[k]
self.alpha_g_phi_hyp_prior = alpha_g_phi_hyp_prior
self.w_sf_mean_var_ratio = w_sf_mean_var_ratio
self.gene_add_alpha_hyp_prior = gene_add_alpha_hyp_prior
self.gene_add_mean_hyp_prior = gene_add_mean_hyp_prior
cell_number_prior["factors_per_groups"] = (
cell_number_prior["A_factors_per_location"]
/ cell_number_prior["Y_groups_per_location"]
)
for k in cell_number_var_prior.keys():
cell_number_prior[k] = cell_number_var_prior[k]
self.cell_number_prior = cell_number_prior
# compute hyperparameters from mean and sd
self.m_g_gene_level_prior = m_g_gene_level_prior
self.m_g_shape = jnp.array(
(self.m_g_gene_level_prior["mean"] ** 2)
/ (self.m_g_gene_level_prior["sd"] ** 2)
)
self.m_g_rate = jnp.array(
self.m_g_gene_level_prior["mean"]
/ (self.m_g_gene_level_prior["sd"] ** 2)
)
self.m_g_mean_var = jnp.array(self.m_g_gene_level_prior["mean_var_ratio"])
self.eps = jnp.array(1e-8)
self.cell_state_mat = cell_state_mat
self.cell_state = jnp.array(cell_state_mat.T)
self.N_cells_per_location = jnp.array(self.cell_number_prior["N_cells_per_location"])
self.factors_per_groups = jnp.array(self.cell_number_prior["factors_per_groups"])
self.Y_groups_per_location = jnp.array(self.cell_number_prior["Y_groups_per_location"])
self.N_cells_mean_var_ratio = jnp.array(self.cell_number_prior["N_cells_mean_var_ratio"])
self.alpha_g_phi_hyp_prior_alpha = jnp.array(self.alpha_g_phi_hyp_prior["alpha"])
self.alpha_g_phi_hyp_prior_beta = jnp.array(self.alpha_g_phi_hyp_prior["beta"])
self.gene_add_alpha_hyp_prior_alpha = jnp.array(self.gene_add_alpha_hyp_prior["alpha"])
self.gene_add_alpha_hyp_prior_beta = jnp.array(self.gene_add_alpha_hyp_prior["beta"])
self.gene_add_mean_hyp_prior_alpha = jnp.array(self.gene_add_mean_hyp_prior["alpha"])
self.gene_add_mean_hyp_prior_beta = jnp.array(self.gene_add_mean_hyp_prior["beta"])
self.w_sf_mean_var_ratio_tensor = jnp.array(self.w_sf_mean_var_ratio)
self.n_factors_tensor = jnp.array(self.n_factors)
self.n_groups_tensor = jnp.array(self.n_groups)
self.ones = jnp.ones((1, 1))
self.ones_scalar = jnp.ones(1)
self.ones_1_n_groups = jnp.ones([1, self.n_groups])
def create_plates(self, x_data, idx, obs2sample):
if self.batch_size is None:
# to support training on full data
obs_axis = pyro.plate("obs_axis", self.n_obs, dim=-2)
else:
obs_axis = pyro.plate(
"obs_axis",
self.n_obs,
dim=-2,
subsample_size=self.batch_size,
subsample=idx,
)
return [
obs_axis
]
def forward(self, x_data, idx, obs2sample):
# obs2sample = batch_index # one_hot(batch_index, self.n_exper)
(
obs_axis,
) = self.create_plates(x_data, idx, obs2sample)
# =====================Gene expression level scaling m_g======================= #
# Explains difference in sensitivity for each gene between single cell and spatial technology
m_g_alpha_hyp = pyro.sample(
"m_g_alpha_hyp",
dist.Gamma(self.m_g_shape * self.m_g_mean_var, self.m_g_mean_var),
)
m_g_beta_hyp = pyro.sample(
"m_g_beta_hyp",
dist.Gamma(self.m_g_rate * self.m_g_mean_var, self.m_g_mean_var),
)
m_g = pyro.sample("m_g", dist.Gamma(m_g_alpha_hyp, m_g_beta_hyp).expand([1, self.n_vars]).to_event(2))
# =====================Cell abundances w_sf======================= #
# factorisation prior on w_sf models similarity in locations
# across cell types f and reflects the absolute scale of w_sf
with obs_axis:
n_s_cells_per_location = pyro.sample(
"n_s_cells_per_location",
dist.Gamma(
self.N_cells_per_location * self.N_cells_mean_var_ratio,
self.N_cells_mean_var_ratio,
)
)
y_s_groups_per_location = pyro.sample(
"y_s_groups_per_location",
dist.Gamma(self.Y_groups_per_location, self.ones)
)
# cell group loadings
shape = self.ones_1_n_groups * y_s_groups_per_location / self.n_groups_tensor
rate = self.ones_1_n_groups / (
n_s_cells_per_location / y_s_groups_per_location
)
with obs_axis:
z_sr_groups_factors = pyro.sample(
"z_sr_groups_factors", dist.Gamma(shape, rate) # .to_event(1)#.expand([self.n_groups]).to_event(1)
) # (n_obs, n_groups)
k_r_factors_per_groups = pyro.sample(
"k_r_factors_per_groups",
dist.Gamma(self.factors_per_groups, self.ones).expand([self.n_groups, 1]).to_event(2)
) # (self.n_groups, 1)
c2f_shape = k_r_factors_per_groups / self.n_factors_tensor
x_fr_group2fact = pyro.sample(
"x_fr_group2fact",
dist.Gamma(c2f_shape, k_r_factors_per_groups).expand([self.n_groups, self.n_factors]).to_event(2)
) # (self.n_groups, self.n_factors)
with obs_axis:
w_sf_mu = z_sr_groups_factors @ x_fr_group2fact
w_sf = pyro.sample(
"w_sf",
dist.Gamma(
w_sf_mu * self.w_sf_mean_var_ratio_tensor,
self.w_sf_mean_var_ratio_tensor,
)
) # (self.n_obs, self.n_factors)
# =====================Location-specific additive component======================= #
l_s_add_alpha = pyro.sample("l_s_add_alpha", dist.Gamma(self.ones, self.ones))
l_s_add_beta = pyro.sample("l_s_add_beta", dist.Gamma(self.ones, self.ones))
with obs_axis:
l_s_add = pyro.sample(
"l_s_add", dist.Gamma(l_s_add_alpha, l_s_add_beta)
) # (self.n_obs, 1)
# =====================Gene-specific additive component ======================= #
# per gene molecule contribution that cannot be explained by
# cell state signatures (e.g. background, free-floating RNA)
s_g_gene_add_alpha_hyp = pyro.sample(
"s_g_gene_add_alpha_hyp",
dist.Gamma(
self.gene_add_alpha_hyp_prior_alpha, self.gene_add_alpha_hyp_prior_beta
)
)
s_g_gene_add_mean = pyro.sample(
"s_g_gene_add_mean",
dist.Gamma(
self.gene_add_mean_hyp_prior_alpha,
self.gene_add_mean_hyp_prior_beta,
).expand([self.n_exper, 1]).to_event(2)
) # (self.n_exper)
s_g_gene_add_alpha_e_inv = pyro.sample(
"s_g_gene_add_alpha_e_inv", dist.Exponential(s_g_gene_add_alpha_hyp).expand([self.n_exper, 1]).to_event(2)
) # (self.n_exper)
s_g_gene_add_alpha_e = self.ones / jnp.power(s_g_gene_add_alpha_e_inv, 2) # (self.n_exper)
s_g_gene_add = pyro.sample(
"s_g_gene_add",
dist.Gamma(
s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean
).expand([self.n_exper, self.n_vars]).to_event(2)
) # (self.n_exper, n_vars)
# =====================Gene-specific overdispersion ======================= #
alpha_g_phi_hyp = pyro.sample(
"alpha_g_phi_hyp",
dist.Gamma(
self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta
)
)
alpha_g_inverse = pyro.sample(
"alpha_g_inverse", dist.Exponential(alpha_g_phi_hyp).expand([self.n_exper, self.n_vars]).to_event(2)
) # (self.n_exper, self.n_vars)
# =====================Expected expression ======================= #
# expected expression
mu = (w_sf @ self.cell_state) * m_g + (obs2sample @ s_g_gene_add) + l_s_add
theta = obs2sample @ (self.ones / jnp.power(alpha_g_inverse, 2))
# =====================DATA likelihood ======================= #
# Likelihood (sampling distribution) of data_target & add overdispersion via NegativeBinomial
with obs_axis:
pyro.sample(
"data_target",
dist.GammaPoisson(concentration=theta, rate=theta / mu),
obs=x_data,
)
# =====================Compute mRNA count from each factor in locations ======================= #
mRNA = w_sf * (self.cell_state * m_g).sum(-1)
pyro.deterministic("u_sf_mRNA_factors", mRNA)
def compute_expected(self, obs2sample):
r"""Compute expected expression of each gene in each location. Useful for evaluating how well
the model learned expression pattern of all genes in the data.
"""
self.mu = (
np.dot(self.samples["post_sample_means"]["w_sf"], self.cell_state_mat.T)
* self.samples["post_sample_means"]["m_g"]
+ np.dot(obs2sample, self.samples["post_sample_means"]["s_g_gene_add"])
+ self.samples["post_sample_means"]["l_s_add"]
)
self.alpha = np.dot(
obs2sample,
1
/ (
self.samples["post_sample_means"]["alpha_g_inverse"]
* self.samples["post_sample_means"]["alpha_g_inverse"]
),
)
class LocationModelLinearDependentWMultiExperiment():
def __init__(self, device='gpu',
init_loc_fn=init_to_mean, init_scale=0.1,
**kwargs):
super().__init__()
pyro.set_platform(platform=device)
self.hist = []
self._model = LocationModelLinearDependentWMultiExperimentModel(**kwargs)
self._guide = AutoNormal(
self.model.forward,
init_loc_fn=init_loc_fn,
init_scale=init_scale,
create_plates=self.model.create_plates
)
@property
def model(self):
return self._model
@property
def guide(self):
return self._guide
def _train_full_data(self, x_data, obs2sample, n_epochs=20000, lr=0.002,
progressbar=True, random_seed=1):
idx = np.arange(x_data.shape[0]).astype("int64")
# move data to default device
x_data = device_put(jnp.array(x_data))
extra_data = {'idx': device_put(jnp.array(idx)),
'obs2sample': device_put(jnp.array(obs2sample))}
# initialise SVI inference method
svi = SVI(self.model.forward, self.guide,
# limit the gradient step from becoming too large
optim.ClippedAdam(clip_norm=jnp.array(200),
**{'step_size': jnp.array(lr)}),
loss=Trace_ELBO())
init_state = svi.init(random.PRNGKey(random_seed),
x_data=x_data, **extra_data)
self.state = init_state
if not progressbar:
# Training in one step
epochs_iterator = tqdm(range(1))
for e in epochs_iterator:
state, losses = lax.scan(lambda state_1, i: svi.update(state_1,
x_data=self.x_data, **extra_data),
# TODO for minibatch DataLoader goes here
init_state, jnp.arange(n_epochs))
# print(state)
epochs_iterator.set_description('ELBO Loss: ' + '{:.4e}'.format(losses[::-1][0]))
self.state = state
self.hist = losses
else:
# training using for-loop
jit_step_update = jit(lambda state_1: svi.update(state_1, x_data=x_data, **extra_data))
# TODO figure out minibatch static_argnums https://github.com/pyro-ppl/numpyro/issues/869
### very slow
epochs_iterator = tqdm(range(n_epochs))
for e in epochs_iterator:
self.state, loss = jit_step_update(self.state)
self.hist.append(loss)
epochs_iterator.set_description('ELBO Loss: ' + '{:.4e}'.format(loss))
self.state_param = svi.get_params(self.state).copy()
import pandas as pd
from scipy.sparse import csr_matrix
def get_cluster_averages(adata_ref, cluster_col):
"""
:param adata_ref: AnnData object of reference single-cell dataset
:param cluster_col: Name of adata_ref.obs column containing cluster labels
:returns: pd.DataFrame of cluster average expression of each gene
"""
if not adata_ref.raw:
raise ValueError("AnnData object has no raw data")
if sum(adata_ref.obs.columns == cluster_col) != 1:
raise ValueError("cluster_col is absent in adata_ref.obs or not unique")
all_clusters = np.unique(adata_ref.obs[cluster_col])
averages_mat = np.zeros((1, adata_ref.raw.X.shape[1]))
for c in all_clusters:
sparse_subset = csr_matrix(adata_ref.raw.X[np.isin(adata_ref.obs[cluster_col], c), :])
aver = sparse_subset.mean(0)
averages_mat = np.concatenate((averages_mat, aver))
averages_mat = averages_mat[1:, :].T
averages_df = pd.DataFrame(data=averages_mat,
index=adata_ref.raw.var_names,
columns=all_clusters)
return averages_df
from jax import random
from jax.experimental import stax
import jax.numpy as jnp
from jax.random import PRNGKey
import numpyro
Log1p = stax.elementwise(jax.lax.log1p)
def encoder(hidden_dim, z_dim):
return stax.serial(
Log1p,
stax.Dense(hidden_dim, W_init=stax.randn()), stax.Softplus,
stax.FanOut(2),
stax.parallel(stax.Dense(z_dim, W_init=stax.randn()),
stax.serial(stax.Dense(z_dim, W_init=stax.randn()), stax.Exp)),
)
def decoder(hidden_dim, out_dim):
return stax.serial(
stax.Dense(hidden_dim, W_init=stax.randn()), stax.Softplus,
stax.Dense(out_dim, W_init=stax.randn()), stax.Sigmoid,
)
def model(batch, hidden_dim=400, z_dim=100):
batch = jnp.reshape(batch, (batch.shape[0], -1))
batch_dim, out_dim = jnp.shape(batch)
decode = numpyro.module('decoder', decoder(hidden_dim, out_dim), (batch_dim, z_dim))
z = numpyro.sample('z', dist.Normal(jnp.zeros((z_dim,)), jnp.ones((z_dim,))))
img_loc = decode(z)
return numpyro.sample('obs', dist.Bernoulli(img_loc), obs=batch)
def guide(batch, hidden_dim=400, z_dim=100):
batch = jnp.reshape(batch, (batch.shape[0], -1))
batch_dim, out_dim = jnp.shape(batch)
encode = numpyro.module('encoder', encoder(hidden_dim, z_dim), (batch_dim, out_dim))
z_loc, z_std = encode(batch)
z = numpyro.sample('z', dist.Normal(z_loc, z_std))
return z
|
vitkl/sandbox | 2019-11-lowrank/experiment.py | <filename>2019-11-lowrank/experiment.py
import argparse
import os
import torch
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.autoguide import AutoLowRankMultivariateNormal
from pyro.optim import ClippedAdam
class Model:
def __init__(self, dim, rank):
self.dim = dim
self.rank = rank
self.loc1 = dist.Laplace(0, 1).sample((dim,))
self.scale1 = dist.Exponential(1).sample((dim,))
self.loc2 = dist.Laplace(0, 1).sample((rank,))
self.scale2 = dist.Exponential(1).sample((rank,))
self.mat = dist.Normal(0, 1).sample((dim, rank))
def __call__(self):
z = pyro.sample("z",
dist.Normal(self.loc1, self.scale1)
.expand([self.dim]).to_event(1))
pyro.sample("x",
dist.Normal(self.loc2, self.scale2)
.expand([self.rank]).to_event(1),
obs=z @ self.mat)
def train(args):
model = Model(args.dim, 2 * args.rank)
guide = AutoLowRankMultivariateNormal(model, rank=args.rank, init_scale=0.01)
optim = ClippedAdam({"lr": args.learning_rate})
elbo = Trace_ELBO()
svi = SVI(model, guide, optim, elbo)
losses = []
for step in range(args.num_steps):
loss = svi.step() / args.dim
losses.append(loss)
if step % 100 == 0:
print("step {: >4} loss = {:0.8g}".format(step, loss))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="experiment runner")
parser.add_argument("-d", "--dim", default=100, type=int)
parser.add_argument("-r", "--rank", default=10, type=int)
parser.add_argument("-n", "--num-steps", default=1000, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.01, type=float)
args = parser.parse_args()
train(args)
|
vitkl/sandbox | 2019-08-time-series/bart/forecast.py | import logging
import math
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
import torch
import torch.nn as nn
from pyro.infer import SVI, Trace_ELBO, TraceMeanField_ELBO
from pyro.optim import ClippedAdam
from torch.distributions import constraints
import funsor
import funsor.distributions as fdist
import funsor.ops as ops
from funsor.domains import reals
from funsor.interpreter import interpretation
from funsor.montecarlo import monte_carlo
from funsor.pyro.convert import dist_to_funsor, matrix_and_mvn_to_funsor, tensor_to_funsor
from funsor.sum_product import MarkovProduct
from funsor.terms import normalize
def vm(vector, matrix):
return vector.unsqueeze(-2).matmul(matrix).squeeze(-2)
def bounded_exp(x, bound):
return (x - math.log(bound)).sigmoid() * bound
def make_time_features(args, begin_time, end_time):
time = torch.arange(begin_time, end_time, dtype=torch.float)
time_mod_week = time / (24 * 7) % 1. * (2 * math.pi)
order = 24 * 7 / 2
angles = time_mod_week.unsqueeze(-1) * torch.arange(1., 1. + order)
return torch.cat([torch.cos(angles),
torch.sin(angles)], dim=-1)
class Model(nn.Module):
"""
The main generative model.
This is used for both training and forecasting.
"""
def __init__(self, args, features, trip_counts):
super().__init__()
self.args = args
self.num_stations = trip_counts.size(-1)
feature_dim = features.size(-1)
gate_rate_dim = 2 * self.num_stations ** 2
self.nn = nn.Sequential(
nn.Linear(feature_dim, args.model_nn_dim),
nn.Sigmoid(),
nn.Linear(args.model_nn_dim, 2 * gate_rate_dim))
self.nn[0].bias.data.fill_(0)
self.nn[2].bias.data.fill_(0)
def _unpack_gate_rate(self, gate_rate, event_dim):
"""
Unpack the ``gate_rate`` pair output from the neural net.
This can be seen as a final layer of the neural net.
"""
n = self.num_stations
sample_shape = gate_rate.shape[:-3 - event_dim]
time_shape = gate_rate.shape[-event_dim:-1]
if not time_shape:
time_shape = (1,)
gate, rate = gate_rate.reshape(sample_shape + time_shape + (2, n, n)).unbind(-3)
gate = gate.sigmoid().clamp(min=0.01, max=0.99)
rate = bounded_exp(rate, bound=1e4)
return gate, rate
def _dynamics(self, features):
"""
Compute dynamics parameters from time features.
"""
device = features.device
state_dim = self.args.state_dim
gate_rate_dim = 2 * self.num_stations ** 2
init_loc = torch.zeros(state_dim, device=device)
init_scale_tril = pyro.param("init_scale",
torch.full((state_dim,), 10., device=device),
constraint=constraints.positive).diag_embed()
init_dist = dist.MultivariateNormal(init_loc, scale_tril=init_scale_tril)
trans_matrix = pyro.param("trans_matrix",
0.99 * torch.eye(state_dim, device=device))
trans_loc = torch.zeros(state_dim, device=device)
trans_scale_tril = pyro.param("trans_scale",
0.1 * torch.ones(state_dim, device=device),
constraint=constraints.positive).diag_embed()
trans_dist = dist.MultivariateNormal(trans_loc, scale_tril=trans_scale_tril)
obs_matrix = pyro.param("obs_matrix", torch.randn(state_dim, gate_rate_dim, device=device))
obs_matrix.data /= obs_matrix.data.norm(dim=-1, keepdim=True)
loc_scale = self.nn(features)
loc, scale = loc_scale.reshape(loc_scale.shape[:-1] + (2, gate_rate_dim)).unbind(-2)
scale = bounded_exp(scale, bound=10.)
obs_dist = dist.Normal(loc, scale).to_event(1)
return init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist
def forward(self, features, trip_counts):
pyro.module("model", self)
if self.args.funsor:
return self._forward_funsor(features, trip_counts)
elif self.args.mean_field:
return self._forward_pyro_mean_field(features, trip_counts)
else:
return self._forward_pyro(features, trip_counts)
def _forward_pyro(self, features, trip_counts):
total_hours = len(features)
observed_hours, num_origins, num_destins = trip_counts.shape
assert observed_hours <= total_hours
assert num_origins == self.num_stations
assert num_destins == self.num_stations
time_plate = pyro.plate("time", observed_hours, dim=-3)
origins_plate = pyro.plate("origins", num_origins, dim=-2)
destins_plate = pyro.plate("destins", num_destins, dim=-1)
# The first half of the model performs exact inference over
# the observed portion of the time series.
hmm = dist.GaussianHMM(*self._dynamics(features[:observed_hours]))
gate_rate = pyro.sample("gate_rate", hmm)
gate, rate = self._unpack_gate_rate(gate_rate, event_dim=2)
with time_plate, origins_plate, destins_plate:
pyro.sample("trip_count", dist.ZeroInflatedPoisson(gate, rate),
obs=trip_counts)
# The second half of the model forecasts forward.
if total_hours > observed_hours:
state_dist = hmm.filter(gate_rate)
return self._forward_pyro_forecast(
features, trip_counts, origins_plate, destins_plate,
state_dist=state_dist)
def _forward_pyro_forecast(self, features, trip_counts, origins_plate, destins_plate,
state=None, state_dist=None):
total_hours = len(features)
observed_hours, num_origins, num_destins = trip_counts.shape
forecast = []
forecast_hours = total_hours - observed_hours
if forecast_hours > 0:
_, trans_matrix, trans_dist, obs_matrix, obs_dist = \
self._dynamics(features[observed_hours:])
for t in range(forecast_hours):
if state is not None:
loc = vm(state, trans_matrix) + trans_dist.loc
scale_tril = trans_dist.scale_tril
state_dist = dist.MultivariateNormal(loc, scale_tril=scale_tril)
state = pyro.sample("state_{}".format(t), state_dist)
loc = vm(state, obs_matrix) + obs_dist.base_dist.loc[..., t, :]
scale = obs_dist.base_dist.scale[..., t, :]
gate_rate = pyro.sample("gate_rate_{}".format(t),
dist.Normal(loc, scale).to_event(1))
gate, rate = self._unpack_gate_rate(gate_rate, event_dim=1)
with origins_plate, destins_plate:
forecast.append(pyro.sample("trip_count_{}".format(t),
dist.ZeroInflatedPoisson(gate, rate)))
return forecast
def _forward_funsor(self, features, trip_counts):
total_hours = len(features)
observed_hours, num_origins, num_destins = trip_counts.shape
assert observed_hours == total_hours
assert num_origins == self.num_stations
assert num_destins == self.num_stations
n = self.num_stations
gate_rate = funsor.Variable("gate_rate_t", reals(observed_hours, 2 * n * n))["time"]
@funsor.torch.function(reals(2 * n * n), (reals(n, n, 2), reals(n, n)))
def unpack_gate_rate(gate_rate):
batch_shape = gate_rate.shape[:-1]
gate, rate = gate_rate.reshape(batch_shape + (2, n, n)).unbind(-3)
gate = gate.sigmoid().clamp(min=0.01, max=0.99)
rate = bounded_exp(rate, bound=1e4)
gate = torch.stack((1 - gate, gate), dim=-1)
return gate, rate
# Create a Gaussian latent dynamical system.
init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist = \
self._dynamics(features[:observed_hours])
init = dist_to_funsor(init_dist)(value="state")
trans = matrix_and_mvn_to_funsor(trans_matrix, trans_dist,
("time",), "state", "state(time=1)")
obs = matrix_and_mvn_to_funsor(obs_matrix, obs_dist,
("time",), "state(time=1)", "gate_rate")
# Compute dynamic prior over gate_rate.
prior = trans + obs(gate_rate=gate_rate)
prior = MarkovProduct(ops.logaddexp, ops.add,
prior, "time", {"state": "state(time=1)"})
prior += init
prior = prior.reduce(ops.logaddexp, {"state", "state(time=1)"})
# Compute zero-inflated Poisson likelihood.
gate, rate = unpack_gate_rate(gate_rate)
likelihood = fdist.Categorical(gate["origin", "destin"], value="gated")
trip_counts = tensor_to_funsor(trip_counts, ("time", "origin", "destin"))
likelihood += funsor.Stack("gated", (
fdist.Poisson(rate["origin", "destin"], value=trip_counts),
fdist.Delta(0, value=trip_counts)))
likelihood = likelihood.reduce(ops.logaddexp, "gated")
likelihood = likelihood.reduce(ops.add, {"time", "origin", "destin"})
assert set(prior.inputs) == {"gate_rate_t"}, prior.inputs
assert set(likelihood.inputs) == {"gate_rate_t"}, likelihood.inputs
return prior, likelihood
def _forward_pyro_mean_field(self, features, trip_counts):
total_hours = len(features)
observed_hours, num_origins, num_destins = trip_counts.shape
assert observed_hours <= total_hours
assert num_origins == self.num_stations
assert num_destins == self.num_stations
time_plate = pyro.plate("time", observed_hours, dim=-3)
origins_plate = pyro.plate("origins", num_origins, dim=-2)
destins_plate = pyro.plate("destins", num_destins, dim=-1)
init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist = \
self._dynamics(features[:observed_hours])
# This is a parallelizable crf representation of the HMM.
# We first pull random variables from the guide, masking all factors.
with poutine.mask(mask=False):
shape = (1 + observed_hours, self.args.state_dim) # includes init
state = pyro.sample("state",
dist.Normal(0, 1).expand(shape).to_event(2))
shape = (observed_hours, 2 * num_origins * num_destins)
gate_rate = pyro.sample("gate_rate",
dist.Normal(0, 1).expand(shape).to_event(2))
# We then declare CRF factors.
pyro.sample("init", init_dist, obs=state[0])
pyro.sample("trans", trans_dist.expand((observed_hours,)).to_event(1),
obs=state[..., 1:, :] - state[..., :-1, :] @ trans_matrix)
pyro.sample("obs", obs_dist.expand((observed_hours,)).to_event(1),
obs=gate_rate - state[..., 1:, :] @ obs_matrix)
gate, rate = self._unpack_gate_rate(gate_rate, event_dim=2)
with time_plate, origins_plate, destins_plate:
pyro.sample("trip_count", dist.ZeroInflatedPoisson(gate, rate),
obs=trip_counts)
# The second half of the model forecasts forward.
if total_hours > observed_hours:
return self._forward_pyro_forecast(
features, trip_counts, origins_plate, destins_plate,
state=state[..., -1, :])
class Guide(nn.Module):
"""
The guide, aka encoder part of a variational autoencoder.
This operates independently over time.
"""
def __init__(self, args, features, trip_counts):
super().__init__()
self.args = args
feature_dim = features.size(-1)
num_stations = trip_counts.size(-1)
# Gate and rate are each sampled from diagonal normal distributions
# whose parameters are estimated from the current counts using
# a diagonal + low-rank approximation.
self.diag_part = nn.Parameter(torch.zeros(2 * 2, 1))
self.lowrank = nn.Sequential(
nn.Linear(feature_dim + num_stations ** 2, args.guide_rank),
nn.Sigmoid(),
nn.Linear(args.guide_rank, 2 * 2 * num_stations ** 2))
self.lowrank[0].bias.data.fill_(0)
self.lowrank[2].bias.data.fill_(0)
# Latent state is sampled from a diagonal distribution whose parameters
# are estimated via a diagonal + pooled approximation.
if args.mean_field:
self.mf_layer_0 = nn.Linear(2 + feature_dim + num_stations ** 2, args.guide_rank)
self.mf_layer_1 = nn.Linear(args.guide_rank, 2 * args.state_dim)
self.mf_highpass = nn.Parameter(torch.full((2 * args.state_dim,), 0.5))
self.mf_lowpass = nn.Parameter(torch.full((2 * args.state_dim,), 0.5))
self.mf_layer_0.bias.data.fill_(0)
self.mf_layer_1.bias.data.fill_(0)
def forward(self, features, trip_counts):
pyro.module("guide", self)
assert features.dim() == 2
assert trip_counts.dim() == 3
observed_hours = len(trip_counts)
log_counts = trip_counts.reshape(observed_hours, -1).log1p()
loc_scale = ((self.diag_part * log_counts.unsqueeze(-2)).reshape(observed_hours, -1) +
self.lowrank(torch.cat([features[:observed_hours], log_counts], dim=-1)))
loc, scale = loc_scale.reshape(observed_hours, 2, -1).unbind(1)
scale = bounded_exp(scale, bound=10.)
if self.args.funsor:
diag_normal = dist.Normal(loc, scale).to_event(2)
return dist_to_funsor(diag_normal)(value="gate_rate_t")
pyro.sample("gate_rate", dist.Normal(loc, scale).to_event(2))
if self.args.mean_field:
time = torch.arange(observed_hours, dtype=features.dtype, device=features.device)
temp = torch.cat([
time.unsqueeze(-1),
(observed_hours - 1 - time).unsqueeze(-1),
features[:observed_hours],
log_counts,
], dim=-1)
temp = self.mf_layer_0(temp).sigmoid()
temp = self.mf_layer_1(temp).sigmoid()
temp = (self.mf_highpass * temp +
self.mf_lowpass * temp.mean(0, keepdim=True))
temp = torch.cat([temp[:1], temp], dim=0) # copy initial state.
loc = temp[:, :self.args.state_dim]
scale = bounded_exp(temp[:, self.args.state_dim:], bound=10.)
pyro.sample("state", dist.Normal(loc, scale).to_event(2))
class Funsor_ELBO:
def __init__(self, args):
self.args = args
def __call__(self, model, guide, features, trip_counts):
q = guide(features, trip_counts)
if self.args.debug:
print(f"q = {q.quote()}")
with interpretation(normalize):
p_prior, p_likelihood = model(features, trip_counts)
if self.args.debug:
print(f"p_prior = {p_prior.quote()}")
print(f"p_likelihood = {p_likelihood.quote()}")
if self.args.analytic_kl:
# We can compute the KL part exactly.
exact_part = funsor.Integrate(q, p_prior - q, frozenset(["gate_rate_t"]))
# But we need to Monte Carlo approximate to compute likelihood.
with interpretation(monte_carlo):
approx_part = funsor.Integrate(q, p_likelihood, frozenset(["gate_rate_t"]))
elbo = exact_part + approx_part
else:
with interpretation(normalize):
p = p_prior + p_likelihood
pq = p - q
# Monte Carlo approximate everything.
with interpretation(monte_carlo):
elbo = funsor.Integrate(q, pq, frozenset(["gate_rate_t"]))
loss = -elbo
assert not loss.inputs, loss.inputs
assert isinstance(loss, funsor.Tensor), loss.pretty()
return loss.data
def train(args, dataset):
"""
Train a model and guide to fit a dataset.
"""
counts = dataset["counts"]
num_stations = len(dataset["stations"])
train_size = args.truncate if args.truncate else len(counts)
logging.info("Training on {} stations over {}/{} hours, {} batches/epoch"
.format(num_stations, train_size, len(counts),
int(math.ceil(train_size / args.batch_size))))
time_features = make_time_features(args, 0, len(counts))
control_features = (counts.max(1)[0] + counts.max(2)[0]).clamp(max=1)
logging.debug("On average {:0.1f}/{} stations are open at any one time"
.format(control_features.sum(-1).mean(), num_stations))
features = torch.cat([time_features, control_features], -1)
feature_dim = features.size(-1)
logging.debug("feature_dim = {}".format(feature_dim))
metadata = {"args": args, "losses": [], "control": control_features}
torch.save(metadata, args.training_filename)
if args.device.startswith("cuda"):
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def optim_config(module_name, param_name):
config = {
"lr": args.learning_rate,
"betas": (0.8, 0.99),
"lrd": 0.1 ** (1 / args.num_steps),
}
if param_name == "init_scale":
config["lr"] *= 0.1 # init_dist sees much less data per minibatch
return config
training_counts = counts[:args.truncate] if args.truncate else counts
data_size = len(training_counts)
model = Model(args, features, training_counts).to(device=args.device)
guide = Guide(args, features, training_counts).to(device=args.device)
optimizer = ClippedAdam(optim_config)
if args.funsor:
elbo = Funsor_ELBO(args)
elif args.analytic_kl:
elbo = TraceMeanField_ELBO()
else:
elbo = Trace_ELBO()
svi = SVI(model, guide, optimizer, elbo)
losses = []
forecaster = None
for step in range(args.num_steps):
begin_time = torch.randint(max(1, data_size - args.batch_size), ()).item()
end_time = min(data_size, begin_time + args.batch_size)
feature_batch = features[begin_time: end_time].to(device=args.device)
counts_batch = counts[begin_time: end_time].to(device=args.device)
loss = svi.step(feature_batch, counts_batch) / counts_batch.numel()
assert math.isfinite(loss), loss
losses.append(loss)
logging.debug("step {} loss = {:0.4g}".format(step, loss))
if step % 20 == 0:
# Save state every few steps.
pyro.get_param_store().save(args.param_store_filename)
metadata = {"args": args, "losses": losses, "control": control_features}
torch.save(metadata, args.training_filename)
forecaster = Forecaster(args, dataset, features, model, guide)
torch.save(forecaster, args.forecaster_filename)
if logging.Logger(None).isEnabledFor(logging.DEBUG):
init_scale = pyro.param("init_scale").data
trans_scale = pyro.param("trans_scale").data
trans_matrix = pyro.param("trans_matrix").data
eigs = trans_matrix.eig()[0].norm(dim=-1).sort(descending=True).values
logging.debug("guide.diag_part = {}".format(guide.diag_part.data.squeeze()))
logging.debug("init scale min/mean/max: {:0.3g} {:0.3g} {:0.3g}"
.format(init_scale.min(), init_scale.mean(), init_scale.max()))
logging.debug("trans scale min/mean/max: {:0.3g} {:0.3g} {:0.3g}"
.format(trans_scale.min(), trans_scale.mean(), trans_scale.max()))
logging.debug("trans mat eig:\n{}".format(eigs))
return forecaster
class Forecaster:
"""
A single object containing all information needed to forecast.
This can be pickled and unpickled for later use.
"""
def __init__(self, args, dataset, features, model, guide):
assert len(features) == len(dataset["counts"])
self.args = args
self.dataset = dataset
self.counts = dataset["counts"]
self.features = features
self.model = model
self.guide = guide
@torch.no_grad()
def __call__(self, window_begin, window_end, forecast_hours, num_samples=None):
"""
Given data in ``[window_begin, window_end)``, generate one or multiple
samples predictions in ``[window_end, window_end + forecast_hours)``.
"""
logging.debug(f"forecasting [{window_begin}, {window_end}] -> {forecast_hours}")
self.args.funsor = False # sets behavior of model and guide
assert 0 <= window_begin < window_end < window_end + forecast_hours <= len(self.counts)
features = self.features[window_begin: window_end + forecast_hours] \
.to(device=self.args.device)
counts = self.counts[window_begin: window_end].to(device=self.args.device)
# To draw multiple samples efficiently, we parallelize using pyro.plate.
model = self.model
guide = self.guide
if num_samples is not None:
vectorize = pyro.plate("samples", num_samples, dim=-4)
model = vectorize(model)
guide = vectorize(guide)
with poutine.trace() as tr:
guide(features, counts)
with poutine.replay(trace=tr.trace):
forecast = model(features, counts)
assert len(forecast) == forecast_hours
return torch.cat(forecast, dim=-3).cpu()
@torch.no_grad()
def log_prob(self, window_begin, window_end, truth):
forecast_hours = len(truth)
self.args.funsor = False # sets behavior of model and guide
assert 0 <= window_begin < window_end < window_end + forecast_hours <= len(self.counts)
features = self.features[window_begin: window_end + forecast_hours] \
.to(device=self.args.device)
x = self.counts[window_begin: window_end].to(device=self.args.device)
y = truth.to(device=self.args.device)
xy = torch.cat([x, y], dim=0)
loss = TraceMeanField_ELBO().loss
logp_x = -loss(self.model, self.guide, features[:len(x)], x)
logp_xy = -loss(self.model, self.guide, features[:len(xy)], xy)
return logp_xy - logp_x
|
vitkl/sandbox | 2019-08-time-series/bart/main.py | import argparse
import logging
import pyro
import torch
from forecast import train
from preprocess import load_hourly_od
def main(args):
assert pyro.__version__ >= "0.4.1"
pyro.enable_validation(__debug__)
pyro.set_rng_seed(args.seed)
dataset = load_hourly_od(args)
if args.tiny:
dataset["stations"] = dataset["stations"][:args.tiny]
dataset["counts"] = dataset["counts"][:, :args.tiny, :args.tiny]
forecaster = train(args, dataset)
if forecaster is None:
return
window_begin = max(0, args.truncate - args.batch_size)
window_end = args.truncate
truth = dataset['counts'][window_end: window_end + args.forecast_hours]
forecast = forecaster(window_begin, window_end, args.forecast_hours,
num_samples=args.num_samples)
assert forecast.shape == (args.num_samples,) + truth.shape
log_prob = forecaster.log_prob(window_begin, window_end, truth) / truth.numel()
torch.save({
'forecast': forecast,
'truth': truth,
'log_prob': log_prob,
}, args.forecast_filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BART origin-destination forecast")
parser.add_argument("--param-store-filename", default="pyro_param_store.pkl")
parser.add_argument("--forecaster-filename", default="forecaster.pkl")
parser.add_argument("--forecast-filename", default="forecast.pkl")
parser.add_argument("--training-filename", default="training.pkl")
parser.add_argument("--truncate", default=0, type=int,
help="optionally truncate to a subset of hours")
parser.add_argument("--tiny", default=0, type=int,
help="optionally truncate to a subset of stations")
parser.add_argument("--state-dim", default=8, type=int,
help="size of HMM state space in model")
parser.add_argument("--model-nn-dim", default=64, type=int,
help="size of hidden layer in model net")
parser.add_argument("--guide-rank", default=8, type=int,
help="size of hidden layer in guide net")
parser.add_argument("--funsor", action="store_true")
parser.add_argument("--analytic-kl", action="store_true")
parser.add_argument("--mean-field", action="store_true")
parser.add_argument("-n", "--num-steps", default=1001, type=int)
parser.add_argument("-b", "--batch-size", default=24 * 7 * 2, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.05, type=float)
parser.add_argument("--seed", default=123456789, type=int)
parser.add_argument("--forecast-hours", default=24, type=int)
parser.add_argument("--num-samples", default=10, type=int)
parser.add_argument("--device", default="")
parser.add_argument("--cuda", dest="device", action="store_const", const="cuda")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--pdb", action="store_true")
parser.add_argument("--no-pdb", dest="pdb", action="store_false")
args = parser.parse_args()
if not args.device:
args.device = "cuda" if torch.cuda.is_available() else "cpu"
logging.basicConfig(format='%(process) 5d %(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
if args.pdb:
try:
main(args)
except (Exception, KeyboardInterrupt) as e:
print(e)
import pdb
pdb.post_mortem(e.__traceback__)
else:
main(args)
|
vitkl/sandbox | 2021-03-softplus_scales/cell2location_model.py | <reponame>vitkl/sandbox
import numpy as np
import pyro
import pyro.distributions as dist
import pyro.optim as optim
import torch
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.autoguide import AutoNormal, init_to_mean
from pyro.nn import PyroModule
from tqdm.auto import tqdm
from scvi import _CONSTANTS
# from scvi.train import PyroTrainingPlan, Trainer
from scvi.distributions._negative_binomial import _convert_mean_disp_to_counts_logits
from scvi.module.base import PyroBaseModuleClass
import pandas as pd
from scipy.sparse import csr_matrix
# from scvi.nn import one_hot
class LocationModelLinearDependentWMultiExperimentModel(PyroModule):
def __init__(
self,
n_obs,
n_vars,
n_factors,
n_exper,
cell_state_mat,
batch_size=None,
n_groups: int = 50,
m_g_gene_level_prior={"mean": 1 / 2, "sd": 1 / 4},
m_g_gene_level_var_prior={"mean_var_ratio": 1.0},
cell_number_prior={
"N_cells_per_location": 8.0,
"A_factors_per_location": 7.0,
"Y_groups_per_location": 7.0,
},
cell_number_var_prior={
"N_cells_mean_var_ratio": 1.0,
},
alpha_g_phi_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_alpha_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_mean_hyp_prior={"alpha": 1.0, "beta": 100.0},
w_sf_mean_var_ratio=5.0,
):
super().__init__()
self.n_obs = n_obs
self.n_vars = n_vars
self.n_factors = n_factors
self.n_exper = n_exper
self.batch_size = batch_size
self.n_groups = n_groups
for k in m_g_gene_level_var_prior.keys():
m_g_gene_level_prior[k] = m_g_gene_level_var_prior[k]
self.alpha_g_phi_hyp_prior = alpha_g_phi_hyp_prior
self.w_sf_mean_var_ratio = w_sf_mean_var_ratio
self.gene_add_alpha_hyp_prior = gene_add_alpha_hyp_prior
self.gene_add_mean_hyp_prior = gene_add_mean_hyp_prior
cell_number_prior["factors_per_groups"] = (
cell_number_prior["A_factors_per_location"]
/ cell_number_prior["Y_groups_per_location"]
)
for k in cell_number_var_prior.keys():
cell_number_prior[k] = cell_number_var_prior[k]
self.cell_number_prior = cell_number_prior
# compute hyperparameters from mean and sd
self.m_g_gene_level_prior = m_g_gene_level_prior
self.register_buffer(
"m_g_shape",
torch.tensor(
(self.m_g_gene_level_prior["mean"] ** 2)
/ (self.m_g_gene_level_prior["sd"] ** 2)
),
)
self.register_buffer(
"m_g_rate",
torch.tensor(
self.m_g_gene_level_prior["mean"]
/ (self.m_g_gene_level_prior["sd"] ** 2)
),
)
self.register_buffer(
"m_g_mean_var", torch.tensor(self.m_g_gene_level_prior["mean_var_ratio"])
)
self.register_buffer("eps", torch.tensor(1e-8))
self.cell_state_mat = cell_state_mat
self.register_buffer("cell_state", torch.tensor(cell_state_mat.T))
self.register_buffer(
"N_cells_per_location",
torch.tensor(self.cell_number_prior["N_cells_per_location"]),
)
self.register_buffer(
"factors_per_groups",
torch.tensor(self.cell_number_prior["factors_per_groups"]),
)
self.register_buffer(
"Y_groups_per_location",
torch.tensor(self.cell_number_prior["Y_groups_per_location"]),
)
self.register_buffer(
"N_cells_mean_var_ratio",
torch.tensor(self.cell_number_prior["N_cells_mean_var_ratio"]),
)
self.register_buffer(
"alpha_g_phi_hyp_prior_alpha",
torch.tensor(self.alpha_g_phi_hyp_prior["alpha"]),
)
self.register_buffer(
"alpha_g_phi_hyp_prior_beta",
torch.tensor(self.alpha_g_phi_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_alpha",
torch.tensor(self.gene_add_alpha_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_beta",
torch.tensor(self.gene_add_alpha_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_alpha",
torch.tensor(self.gene_add_mean_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_beta",
torch.tensor(self.gene_add_mean_hyp_prior["beta"]),
)
self.register_buffer(
"w_sf_mean_var_ratio_tensor", torch.tensor(self.w_sf_mean_var_ratio)
)
self.register_buffer("n_factors_tensor", torch.tensor(self.n_factors))
self.register_buffer("n_groups_tensor", torch.tensor(self.n_groups))
self.register_buffer("ones", torch.ones((1, 1)))
self.register_buffer("ones_1_n_groups", torch.ones((1, self.n_groups)))
@staticmethod
def _get_fn_args_from_batch(tensor_dict):
x_data = tensor_dict[_CONSTANTS.X_KEY]
ind_x = tensor_dict["ind_x"].long().squeeze()
batch_index = tensor_dict[_CONSTANTS.BATCH_KEY]
return (x_data, ind_x, batch_index), {}
def create_plates(self, x_data, idx, batch_index):
if self.batch_size is None:
# to support training on full data
obs_axis = pyro.plate("obs_axis", self.n_obs, dim=-2)
else:
obs_axis = pyro.plate(
"obs_axis",
self.n_obs,
dim=-2,
subsample_size=self.batch_size,
subsample=idx,
)
return [obs_axis]
def forward(self, x_data, idx, obs2sample):
# obs2sample = batch_index # one_hot(batch_index, self.n_exper)
(obs_axis,) = self.create_plates(x_data, idx, obs2sample)
# =====================Gene expression level scaling m_g======================= #
# Explains difference in sensitivity for each gene between single cell and spatial technology
m_g_alpha_hyp = pyro.sample(
"m_g_alpha_hyp",
dist.Gamma(self.m_g_shape * self.m_g_mean_var, self.m_g_mean_var),
)
m_g_beta_hyp = pyro.sample(
"m_g_beta_hyp",
dist.Gamma(self.m_g_rate * self.m_g_mean_var, self.m_g_mean_var),
)
m_g = pyro.sample(
"m_g",
dist.Gamma(m_g_alpha_hyp, m_g_beta_hyp)
.expand([1, self.n_vars])
.to_event(2),
)
# =====================Cell abundances w_sf======================= #
# factorisation prior on w_sf models similarity in locations
# across cell types f and reflects the absolute scale of w_sf
with obs_axis:
n_s_cells_per_location = pyro.sample(
"n_s_cells_per_location",
dist.Gamma(
self.N_cells_per_location * self.N_cells_mean_var_ratio,
self.N_cells_mean_var_ratio,
),
)
y_s_groups_per_location = pyro.sample(
"y_s_groups_per_location",
dist.Gamma(self.Y_groups_per_location, self.ones),
)
# cell group loadings
shape = self.ones_1_n_groups * y_s_groups_per_location / self.n_groups_tensor
rate = self.ones_1_n_groups / (n_s_cells_per_location / y_s_groups_per_location)
with obs_axis:
z_sr_groups_factors = pyro.sample(
"z_sr_groups_factors",
dist.Gamma(
shape, rate
), # .to_event(1)#.expand([self.n_groups]).to_event(1)
) # (n_obs, n_groups)
k_r_factors_per_groups = pyro.sample(
"k_r_factors_per_groups",
dist.Gamma(self.factors_per_groups, self.ones)
.expand([self.n_groups, 1])
.to_event(2),
) # (self.n_groups, 1)
c2f_shape = k_r_factors_per_groups / self.n_factors_tensor
x_fr_group2fact = pyro.sample(
"x_fr_group2fact",
dist.Gamma(c2f_shape, k_r_factors_per_groups)
.expand([self.n_groups, self.n_factors])
.to_event(2),
) # (self.n_groups, self.n_factors)
with obs_axis:
w_sf_mu = z_sr_groups_factors @ x_fr_group2fact
w_sf = pyro.sample(
"w_sf",
dist.Gamma(
w_sf_mu * self.w_sf_mean_var_ratio_tensor,
self.w_sf_mean_var_ratio_tensor,
),
) # (self.n_obs, self.n_factors)
# =====================Location-specific additive component======================= #
l_s_add_alpha = pyro.sample("l_s_add_alpha", dist.Gamma(self.ones, self.ones))
l_s_add_beta = pyro.sample("l_s_add_beta", dist.Gamma(self.ones, self.ones))
with obs_axis:
l_s_add = pyro.sample(
"l_s_add", dist.Gamma(l_s_add_alpha, l_s_add_beta)
) # (self.n_obs, 1)
# =====================Gene-specific additive component ======================= #
# per gene molecule contribution that cannot be explained by
# cell state signatures (e.g. background, free-floating RNA)
s_g_gene_add_alpha_hyp = pyro.sample(
"s_g_gene_add_alpha_hyp",
dist.Gamma(
self.gene_add_alpha_hyp_prior_alpha, self.gene_add_alpha_hyp_prior_beta
),
)
s_g_gene_add_mean = pyro.sample(
"s_g_gene_add_mean",
dist.Gamma(
self.gene_add_mean_hyp_prior_alpha,
self.gene_add_mean_hyp_prior_beta,
)
.expand([self.n_exper, 1])
.to_event(2),
) # (self.n_exper)
s_g_gene_add_alpha_e_inv = pyro.sample(
"s_g_gene_add_alpha_e_inv",
dist.Exponential(s_g_gene_add_alpha_hyp)
.expand([self.n_exper, 1])
.to_event(2),
) # (self.n_exper)
s_g_gene_add_alpha_e = self.ones / s_g_gene_add_alpha_e_inv.pow(2)
s_g_gene_add = pyro.sample(
"s_g_gene_add",
dist.Gamma(s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean)
.expand([self.n_exper, self.n_vars])
.to_event(2),
) # (self.n_exper, n_vars)
# =====================Gene-specific overdispersion ======================= #
alpha_g_phi_hyp = pyro.sample(
"alpha_g_phi_hyp",
dist.Gamma(
self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta
),
)
alpha_g_inverse = pyro.sample(
"alpha_g_inverse",
dist.Exponential(alpha_g_phi_hyp)
.expand([self.n_exper, self.n_vars])
.to_event(2),
) # (self.n_exper, self.n_vars)
# =====================Expected expression ======================= #
# expected expression
mu = (w_sf @ self.cell_state) * m_g + (obs2sample @ s_g_gene_add) + l_s_add
theta = obs2sample @ (self.ones / alpha_g_inverse.pow(2))
# convert mean and overdispersion to total count and logits
total_count, logits = _convert_mean_disp_to_counts_logits(
mu, theta, eps=self.eps
)
# =====================DATA likelihood ======================= #
# Likelihood (sampling distribution) of data_target & add overdispersion via NegativeBinomial
with obs_axis:
pyro.sample(
"data_target",
dist.NegativeBinomial(total_count=total_count, logits=logits),
obs=x_data,
)
# =====================Compute mRNA count from each factor in locations ======================= #
mRNA = w_sf * (self.cell_state * m_g).sum(-1)
pyro.deterministic("u_sf_mRNA_factors", mRNA)
def compute_expected(self, obs2sample):
r"""Compute expected expression of each gene in each location. Useful for evaluating how well
the model learned expression pattern of all genes in the data.
"""
self.mu = (
np.dot(self.samples["post_sample_means"]["w_sf"], self.cell_state_mat.T)
* self.samples["post_sample_means"]["m_g"]
+ np.dot(obs2sample, self.samples["post_sample_means"]["s_g_gene_add"])
+ self.samples["post_sample_means"]["l_s_add"]
)
self.alpha = np.dot(
obs2sample,
1
/ (
self.samples["post_sample_means"]["alpha_g_inverse"]
* self.samples["post_sample_means"]["alpha_g_inverse"]
),
)
class LocationModelLinearDependentWMultiExperiment(torch.nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.hist = []
self._model = LocationModelLinearDependentWMultiExperimentModel(**kwargs)
self._guide = AutoNormal(
self.model,
init_loc_fn=init_to_mean,
create_plates=self.model.create_plates,
)
@property
def model(self):
return self._model
@property
def guide(self):
return self._guide
def _train_full_data(self, x_data, obs2sample, n_epochs=20000, lr=0.002):
idx = np.arange(x_data.shape[0]).astype("int64")
device = torch.device("cuda")
idx = torch.tensor(idx).to(device)
x_data = torch.tensor(x_data).to(device)
obs2sample = torch.tensor(obs2sample).to(device)
self.to(device)
pyro.clear_param_store()
self.guide(x_data, idx, obs2sample)
svi = SVI(
self.model,
self.guide,
optim.ClippedAdam({"lr": lr, "clip_norm": 200}),
loss=Trace_ELBO(),
)
iter_iterator = tqdm(range(n_epochs))
hist = []
for it in iter_iterator:
loss = svi.step(x_data, idx, obs2sample)
iter_iterator.set_description(
"Epoch " + "{:d}".format(it) + ", -ELBO: " + "{:.4e}".format(loss)
)
hist.append(loss)
if it % 500 == 0:
torch.cuda.empty_cache()
self.hist = hist
def get_cluster_averages(adata_ref, cluster_col):
"""
:param adata_ref: AnnData object of reference single-cell dataset
:param cluster_col: Name of adata_ref.obs column containing cluster labels
:returns: pd.DataFrame of cluster average expression of each gene
"""
if not adata_ref.raw:
raise ValueError("AnnData object has no raw data")
if sum(adata_ref.obs.columns == cluster_col) != 1:
raise ValueError("cluster_col is absent in adata_ref.obs or not unique")
all_clusters = np.unique(adata_ref.obs[cluster_col])
averages_mat = np.zeros((1, adata_ref.raw.X.shape[1]))
for c in all_clusters:
sparse_subset = csr_matrix(
adata_ref.raw.X[np.isin(adata_ref.obs[cluster_col], c), :]
)
aver = sparse_subset.mean(0)
averages_mat = np.concatenate((averages_mat, aver))
averages_mat = averages_mat[1:, :].T
averages_df = pd.DataFrame(
data=averages_mat, index=adata_ref.raw.var_names, columns=all_clusters
)
return averages_df
|
vitkl/sandbox | 2019-08-time-series/bart/evaluate.py | import argparse
import logging
import multiprocessing
import os
import subprocess
import torch
from pyro.ops.stats import crps_empirical
from preprocess import load_hourly_od
def config_to_basename(config):
return '.'.join(arg.lstrip('-').replace('-', '_') for arg in config)
def make_splits(args, dataset):
"""
Make train-test splits in time.
"""
total_hours = len(dataset['counts'])
if args.truncate:
total_hours = min(total_hours, args.truncate)
# Dataset starts on a Saturday.
assert dataset['start_date'][0].strftime('%A') == 'Saturday'
# Ridership is minimum early Sunday morning.
split_hour_of_week = 29
# We train HMM on at least six years of historical data.
min_hours = 6 * 365 * 24
stride = 24 * 7
result = list(range(min_hours + split_hour_of_week,
total_hours - args.forecast_hours,
stride))
logging.info(f'Created {len(result)} test/train splits')
assert result, 'truncated too short'
return result
def forecast_one(args, config):
basename = config_to_basename(config + ("forecast",))
forecast_path = f'{args.results}/{basename}.pkl'
if args.force or not os.path.exists(forecast_path):
command = ['python'] if __debug__ else ['python', '-O']
command.append('main.py')
command.append('--pdb' if args.pdb else '--no-pdb')
if args.verbose:
command.append('--verbose')
command.append(f'--num-steps={args.num_steps}')
command.append(f'--forecast-hours={args.forecast_hours}')
command.append(f'--num-samples={args.num_samples}')
command.append('--param-store-filename=/dev/null')
command.append('--forecaster-filename=/dev/null')
command.append('--training-filename=/dev/null')
command.extend(config)
command.append(f'--forecast-filename={forecast_path}')
logging.info('# {}'.format(' '.join(command)))
logging.debug(' \\\n '.join(command))
subprocess.check_call(command)
return torch.load(forecast_path, map_location=args.device)
def eval_one(args, result):
logging.debug('evaluating')
pred = result['forecast']
truth = result['truth']
t, n, n = truth.shape
assert pred.shape == (args.num_samples, t, n, n)
# Evaluate point estimate using Mean Absolute Error.
mae = float((pred.median(dim=0).values - truth).abs().mean())
# Evaluate uncertainty using negative Continuous Ranked Probability Score.
crps = float(crps_empirical(pred, truth).mean())
result = {'MAE': mae, 'CRPS': crps, 'ELBO': result['log_prob']}
logging.info(result)
return result
def process_task(task):
args, config, truncate = task
logging.basicConfig(format='%(process) 5d %(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
forecast = forecast_one(args, config + ('--truncate={}'.format(truncate),))
metrics = eval_one(args, forecast)
del forecast
if args.device.startswith('cuda'):
torch.cuda.empty_cache()
return config, truncate, metrics
def main(args):
dataset = load_hourly_od()
if not os.path.exists(args.results):
os.mkdir(args.results)
configs = [
(),
('--mean-field',),
# ('--funsor',),
# ('--funsor', '--analytic-kl'),
]
splits = make_splits(args, dataset)
results = {}
map_ = map if args.parallel == 1 else multiprocessing.Pool(args.parallel).map
results = list(map_(process_task, [
(args, config, truncate)
for config in configs
for truncate in splits
]))
# Group by config and by truncate.
metrics = {}
for config, truncate, metric in results:
metrics.setdefault(config, {}).setdefault(truncate, metric)
results = {'args': args, 'metrics': metrics}
eval_filename = os.path.abspath(f'{args.results}/eval.pkl')
logging.info(f'Saving results to {eval_filename}')
torch.save(results, eval_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="BART forecasting evaluation")
parser.add_argument("-f", "--force", action="store_true")
parser.add_argument("--results", default="results")
parser.add_argument("--truncate", default=0, type=int)
parser.add_argument("-n", "--num-steps", default=1001, type=int)
parser.add_argument("--forecast-hours", default=24 * 7, type=int)
parser.add_argument("--num-samples", default=99, type=int)
parser.add_argument("--device", default="")
parser.add_argument("--cuda", dest="device", action="store_const", const="cuda")
parser.add_argument("-p", "--parallel", default=1, type=int)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--pdb", action="store_true")
parser.add_argument("--no-pdb", dest="pdb", action="store_false")
args = parser.parse_args()
if not args.device:
args.device = "cuda" if torch.cuda.is_available() else "cpu"
if args.parallel > 1 and args.device.startswith("cuda"):
multiprocessing.set_start_method('forkserver')
logging.basicConfig(format='%(process) 5d %(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
main(args)
|
vitkl/sandbox | 2019-08-time-series/bart/part_iii_preprocess.py | import csv
import datetime
import os
import torch
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA = os.path.join(ROOT, "data")
# Downloaded from https://www.bart.gov/about/reports/ridership
SOURCE_FILE = "bart-SFIA-EMBR-2011.csv"
DESTIN_FILE = "bart-SFIA-EMBR-2011.pkl"
if __name__ == "__main__":
# Load csv file.
start_date = datetime.datetime.strptime("2011-01-01", "%Y-%m-%d")
dates = []
counts = []
with open(os.path.join(DATA, SOURCE_FILE)) as f:
for date, hour, origin, destin, trip_count in csv.reader(f):
date = datetime.datetime.strptime(date, "%Y-%m-%d")
date += datetime.timedelta(hours=int(hour))
dates.append(int((date - start_date).total_seconds() / 3600))
counts.append(int(trip_count))
print("Loaded {} dense counts".format(len(dates)))
assert 0 <= min(dates) < 24
# Convert to PyTorch.
result = torch.zeros(1 + max(dates))
for date, count in zip(dates, counts):
result[date] = count
print("Saving {} dense counts".format(len(result)))
torch.save(result, os.path.join(DATA, DESTIN_FILE))
|
samar-khanna/cs224w-project | online_main.py | <filename>online_main.py<gh_stars>1-10
import os
import torch
import pickle
import argparse
import torch.optim as optim
from gnn_stack import GNNStack
from link_predictor import LinkPredictor
from torch_geometric.data import DataLoader
from ogb.linkproppred import PygLinkPropPredDataset
from train import train
from online_train import online_train
from online_eval import online_eval
from utils import print_and_log
def passed_arguments():
parser = argparse.ArgumentParser(description="Script to train online graph setting")
parser.add_argument('--data_path', type=str,
default='./dataset/online_init:1000-online_nodes:10-seed:0.pkl',
help='Path to data .pkl file')
parser.add_argument('--exp_dir', type=str, default=None,
help="Path to exp dir for model checkpoints and experiment logs")
parser.add_argument('--init_epochs', type=int, default=100,
help="Number of epochs for initial subgraph training")
parser.add_argument('--online_steps', type=int, default=10,
help="Number of gradient steps for online learning.")
parser.add_argument('--init_lr', type=float, default=1e-2,
help="Learning rate for initial graph pre-training")
parser.add_argument('--online_lr', type=float, default=1e-2,
help="Learning rate for online node learning")
parser.add_argument('--node_dim', type=int, default=256,
help='Embedding dimension for nodes')
parser.add_argument('--init_batch_size', type=int, default=1024 * 64,
help='Number of links per batch used in initial pre-training')
parser.add_argument('--online_batch_size', type=int, default=32,
help='Number of links per batch used for online learning')
return parser.parse_args()
if __name__ == "__main__":
args = passed_arguments()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
hidden_dim = 32
dropout = 0.5
num_layers = 4
optim_wd = 0
init_train_epochs = args.init_epochs
num_online_steps = args.online_steps
init_lr = args.init_lr
online_lr = args.online_lr
node_emb_dim = args.node_dim
init_batch_size = args.init_batch_size
online_batch_size = args.online_batch_size
path_to_dataset = args.data_path
exp_dir = args.exp_dir
# Get dataset
with open(path_to_dataset, 'rb') as f:
dataset = pickle.load(f)
init_nodes = dataset['init_nodes'].to(device)
init_edge_index = dataset['init_edge_index'].to(device)
init_pos_train = init_edge_index[:, ::2].to(device) # Relying on interleaved order
online_node_edge_index = dataset['online']
# Configure experiment saving directories
if exp_dir is None:
exp_dir = "./experiments"
dir = f"online.init_nodes:{len(init_nodes)}.num_online:{len(online_node_edge_index)}" \
f".{path_to_dataset.split('-')[2]}" \
f".epochs:{init_train_epochs}.online_steps:{num_online_steps}" \
f".layers:{num_layers}.hidden_dim:{hidden_dim}.node_dim:{node_emb_dim}" \
f".init_lr:{init_lr}.online_lr:{online_lr}.optim_wd:{optim_wd}" \
f".init_batch_size:{init_batch_size}.online_batch_size:{online_batch_size}"
exp_dir = os.path.join(exp_dir, dir)
model_dir = os.path.join(exp_dir, 'checkpoints')
logs_dir = os.path.join(exp_dir, 'logs')
os.makedirs(exp_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
os.makedirs(logs_dir, exist_ok=True)
logfile_path = os.path.join(logs_dir, 'log.txt')
resfile_val_path = os.path.join(logs_dir, 'res_val.pkl')
resfile_test_path = os.path.join(logs_dir, 'res_test.pkl')
logfile = open(logfile_path, "a" if os.path.isfile(logfile_path) else "w", buffering=1)
# Create embedding, model, and optimizer
emb = torch.nn.Embedding(len(init_nodes) + max(online_node_edge_index) + 1, node_emb_dim).to(device)
model = GNNStack(node_emb_dim, hidden_dim, hidden_dim, num_layers, dropout, emb=True).to(device)
link_predictor = LinkPredictor(hidden_dim, hidden_dim, 1, num_layers + 1, dropout).to(device)
optimizer = optim.Adam(
list(model.parameters()) + list(link_predictor.parameters()) + list(emb.parameters()),
lr=init_lr, weight_decay=optim_wd
)
# Train on initial subgraph
for e in range(init_train_epochs):
loss = train(model, link_predictor, emb.weight[:len(init_nodes)], init_edge_index, init_pos_train.T,
init_batch_size, optimizer)
print_and_log(logfile, f"Epoch {e + 1}/{init_train_epochs}: Loss = {round(loss, 5)}")
if (e + 1) % 20 == 0:
torch.save(model.state_dict(), os.path.join(model_dir, f"init_train:{e}.pt"))
# New optimizer for online learning (don't update GraphSAGE)
optimizer = optim.Adam(
list(link_predictor.parameters()) + list(emb.parameters()),
lr=online_lr, weight_decay=optim_wd
)
curr_nodes = init_nodes
curr_edge_index = init_edge_index # (2, E)
val_preds, test_preds = {}, {}
for n_id, node_split in online_node_edge_index.items():
train_msg, train_sup, train_neg, valid, valid_neg, test, test_neg = \
node_split['train_msg'], node_split['train_sup'], node_split['train_neg'], \
node_split['valid'], node_split['valid_neg'], node_split['test'], node_split['test_neg']
train_msg = train_msg.to(device)
train_sup = train_sup.to(device)
train_neg = train_neg.to(device)
valid = valid.to(device)
valid_neg = valid_neg.to(device)
test = test.to(device)
test_neg = test_neg.to(device)
# Add message edges to edge index
curr_edge_index = torch.cat((curr_edge_index, train_msg.T), dim=1) # (2, E+Tr_msg)
# Add new node to list of curr_nodes
curr_nodes = torch.cat((curr_nodes, torch.as_tensor([n_id], device=device)))
# Create new embedding for n_id
# optimizer.param_groups[0]['params'].extend(node_emb.parameters())
# Warm start embedding for new node
with torch.no_grad():
emb.weight[n_id] = emb.weight[curr_nodes].mean(dim=0)
# Nodes are ordered sequentially (online node ids start at len(init_nodes))
for t in range(num_online_steps):
loss = online_train(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, train_sup, train_neg, online_batch_size, optimizer, device)
print_and_log(logfile, f"Step {t + 1}/{num_online_steps}: loss = {round(loss, 5)}")
torch.save(model.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_model.pt"))
torch.save(emb.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_emb.pt"))
torch.save(link_predictor.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_lp.pt"))
val_tp, val_tn, val_fp, val_fn, preds = online_eval(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, valid, valid_neg, online_batch_size)
val_preds[n_id] = preds
test_tp, test_tn, test_fp, test_fn, preds = online_eval(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, valid, test_neg, online_batch_size,)
test_preds[n_id] = preds
print_and_log(logfile,f"For node {n_id}")
print_and_log(logfile, f"VAL accuracy: {(val_tp + val_tn) / (val_tp + val_tn + val_fp + val_fn)}")
print_and_log(logfile, f"VAL tp: {val_tp}, fn: {val_fn}, tn: {val_tn}, fp: {val_fp}")
print_and_log(logfile, f"TEST accuracy: {(test_tp + test_tn) / (test_tp + test_tn + test_fp + test_fn)}")
print_and_log(logfile, f"TEST tp: {test_tp}, fn: {test_fn}, tn: {test_tn}, fp: {test_fp}")
with open(resfile_val_path, 'wb') as f:
pickle.dump(val_preds, f)
with open(resfile_test_path, 'wb') as f:
pickle.dump(test_preds, f)
logfile.close()
|
samar-khanna/cs224w-project | gists/online_eval.py | <reponame>samar-khanna/cs224w-project
def online_eval(model, link_predictor, emb, edge_index, pos_edges, neg_edges, batch_size):
"""
Evaluates model on positive and negative edges for prediction.
1. Computes the updated node embeddings given the existing subgraph and online node's message edges
2. Computes predictions on the positive edges, calculating:
a. True positives: number of positive edges correctly identified as positive
b. False negatives: number of positive edges falsely identified as negative
3. Computes predictions on the negative edges, calculating:
c. False positives: number of negative edges falsely identified as positive
d. True negatives: number of negative edges correctly identified as negative
"""
model.eval()
link_predictor.eval()
tp = 0.0
tn = 0.0
fp = 0.0
fn = 0.0
for edge_id in DataLoader(range(pos_edges.shape[0]), batch_size, shuffle=False, drop_last=False):
node_emb = model(emb, edge_index) # (N, d)
pos_edge = pos_edges[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]).squeeze() # (B, )
tp += (pos_pred >= 0.5).sum().item()
fn += (pos_pred < 0.5).sum().item()
for edge_id in DataLoader(range(neg_edges.shape[0]), batch_size, shuffle=False, drop_last=False):
node_emb = model(emb, edge_index) # (N, d)
neg_edge = neg_edges[edge_id].T # (2, B)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]).squeeze() # (B, )
fp += (neg_pred >= 0.5).sum().item()
tn += (neg_pred < 0.5).sum().item()
return tp, tn, fp, fn
|
samar-khanna/cs224w-project | utils.py | import os
def print_and_log(log_file, message):
print(message)
log_file.write(message + '\n')
def log(log_file, message):
log_file.write(message + '\n') |
samar-khanna/cs224w-project | gists/graph_sage.py | <filename>gists/graph_sage.py
class GraphSage(MessagePassing):
def __init__(self, in_channels, out_channels, normalize=True,
bias=False, **kwargs):
super(GraphSage, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.lin_l = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.lin_r = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
def forward(self, x, edge_index, size=None):
# x is shape (N, in_c)
neighbor_out = self.propagate(edge_index, x=(x, x), size=size)
out = self.lin_l(x) + self.lin_r(neighbor_out)
if self.normalize:
out = torch.nn.functional.normalize(out, p=2)
return out
def message(self, x_j):
# x_j has shape (E, d)
out = x_j
return out
def aggregate(self, inputs, index, dim_size=None):
node_dim = self.node_dim
out = torch_scatter.scatter(inputs, index, dim=node_dim, reduce='mean')
return out
|
samar-khanna/cs224w-project | gists/evaluate.py | <reponame>samar-khanna/cs224w-project
def test(model, predictor, emb, edge_index, split_edge, batch_size, evaluator):
"""
Evaluates model on positive and negative test edges
1. Computes the updated node embeddings given the edge index (i.e. the message passing edges)
2. Computes predictions on the positive and negative edges
3. Calculates hits @ k given predictions using the ogb evaluator
"""
model.eval()
predictor.eval()
node_emb = model(emb, edge_index)
pos_test_edge = split_edge['test']['edge'].to(emb.device)
neg_test_edge = split_edge['test']['edge_neg'].to(emb.device)
pos_test_preds = []
for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):
edge = pos_test_edge[perm].t()
pos_test_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
pos_test_pred = torch.cat(pos_test_preds, dim=0)
neg_test_preds = []
for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):
edge = neg_test_edge[perm].t()
neg_test_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
neg_test_pred = torch.cat(neg_test_preds, dim=0)
results = {}
for K in [20, 50, 100]:
evaluator.K = K #using the Evaluator function in the ogb.linkproppred package
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = test_hits
return results |
samar-khanna/cs224w-project | gists/online_train.py | <reponame>samar-khanna/cs224w-project
def online_train(model, link_predictor, emb, edge_index, pos_train_edge, neg_train_edges,
batch_size, optimizer, device):
"""
Runs training for a single online node given its edges to the existing subgraph.
1. Updates node embeddings given the message edges for the online node and the existing subgraph edges
2. Computes predictions on the positive supervision edges for the online node
3. Computes predictions on the negative supervision edges for the online node
4. Computes the loss on the positive and negative edges and updates parameters
"""
model.train()
link_predictor.train()
train_losses = []
for edge_id in DataLoader(range(pos_train_edge.shape[0]), batch_size, shuffle=True):
optimizer.zero_grad()
node_emb = model(emb, edge_index) # (N, d)
pos_edge = pos_train_edge[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]) # (B, )
neg_idx = np.random.choice(len(neg_train_edges), edge_id.shape[0], replace=False)
neg_edge = neg_train_edges[torch.from_numpy(neg_idx).to(device)] # (Ne, 2)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]) # (Ne,)
loss = -torch.log(pos_pred + 1e-15).mean() - torch.log(1 - neg_pred + 1e-15).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
return sum(train_losses) / len(train_losses)
|
samar-khanna/cs224w-project | evaluate.py | import torch
from torch_geometric.data import DataLoader
def test(model, predictor, emb, edge_index, split_edge, batch_size, evaluator):
"""
Evaluates graph model on validation and test edges
:param model: Torch Graph model used for updating node embeddings based on message passing
:param predictor: Torch model used for predicting whether edge exists or not
:param emb: (N, d) Initial node embeddings for all N nodes in graph
:param edge_index: (2, E) Edge index for all edges in the graph
:param split_edge: Dictionary of (e, 2) edges for val pos/neg and test pos/neg edges
:param batch_size: Number of positive (and negative) supervision edges to sample per batch
:param evaluator: OGB evaluator to calculate hits @ k metric
:return: hits @ k results
"""
model.eval()
predictor.eval()
node_emb = model(emb, edge_index)
pos_valid_edge = split_edge['valid']['edge'].to(emb.device)
neg_valid_edge = split_edge['valid']['edge_neg'].to(emb.device)
pos_test_edge = split_edge['test']['edge'].to(emb.device)
neg_test_edge = split_edge['test']['edge_neg'].to(emb.device)
pos_valid_preds = []
for perm in DataLoader(range(pos_valid_edge.size(0)), batch_size):
edge = pos_valid_edge[perm].t()
pos_valid_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
pos_valid_pred = torch.cat(pos_valid_preds, dim=0)
neg_valid_preds = []
for perm in DataLoader(range(neg_valid_edge.size(0)), batch_size):
edge = neg_valid_edge[perm].t()
neg_valid_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
neg_valid_pred = torch.cat(neg_valid_preds, dim=0)
pos_test_preds = []
for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):
edge = pos_test_edge[perm].t()
pos_test_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
pos_test_pred = torch.cat(pos_test_preds, dim=0)
neg_test_preds = []
for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):
edge = neg_test_edge[perm].t()
neg_test_preds += [predictor(node_emb[edge[0]], node_emb[edge[1]]).squeeze().cpu()]
neg_test_pred = torch.cat(neg_test_preds, dim=0)
results = {}
for K in [20, 50, 100]:
evaluator.K = K
valid_hits = evaluator.eval({
'y_pred_pos': pos_valid_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = (valid_hits, test_hits)
return results
|
samar-khanna/cs224w-project | train.py | <gh_stars>1-10
import torch
import numpy as np
import copy
from tqdm import trange
from torch_geometric.data import DataLoader
from torch_geometric.utils import negative_sampling
def train(model, link_predictor, emb, edge_index, pos_train_edge, batch_size, optimizer):
"""
Runs offline training for model, link_predictor and node embeddings given the message
edges and supervision edges.
:param model: Torch Graph model used for updating node embeddings based on message passing
:param link_predictor: Torch model used for predicting whether edge exists or not
:param emb: (N, d) Initial node embeddings for all N nodes in graph
:param edge_index: (2, E) Edge index for all edges in the graph
:param pos_train_edge: (PE, 2) Positive edges used for training supervision loss
:param batch_size: Number of positive (and negative) supervision edges to sample per batch
:param optimizer: Torch Optimizer to update model parameters
:return: Average supervision loss over all positive (and correspondingly sampled negative) edges
"""
model.train()
link_predictor.train()
train_losses = []
for edge_id in DataLoader(range(pos_train_edge.shape[0]), batch_size, shuffle=True):
optimizer.zero_grad()
# Run message passing on the inital node embeddings to get updated embeddings
node_emb = model(emb, edge_index) # (N, d)
# Predict the class probabilities on the batch of positive edges using link_predictor
pos_edge = pos_train_edge[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]) # (B, )
# Sample negative edges (same as number of positive edges) and predict class probabilities
neg_edge = negative_sampling(edge_index, num_nodes=emb.shape[0],
num_neg_samples=edge_id.shape[0], method='dense') # (Ne,2)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]) # (Ne,)
# Compute the corresponding negative log likelihood loss on the positive and negative edges
loss = -torch.log(pos_pred + 1e-15).mean() - torch.log(1 - neg_pred + 1e-15).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
# print(loss.item())
return sum(train_losses) / len(train_losses) |
samar-khanna/cs224w-project | gists/train.py | <filename>gists/train.py
def train(model, link_predictor, emb, edge_index, pos_train_edge, batch_size, optimizer):
"""
Runs offline training for model, link_predictor and node embeddings given the message
edges and supervision edges.
1. Updates node embeddings given the edge index (i.e. the message passing edges)
2. Computes predictions on the positive supervision edges
3. Computes predictions on the negative supervision edges (which are sampled)
4. Computes the loss on the positive and negative edges and updates parameters
"""
model.train()
link_predictor.train()
train_losses = []
for edge_id in DataLoader(range(pos_train_edge.shape[0]), batch_size, shuffle=True):
optimizer.zero_grad()
node_emb = model(emb, edge_index) # (N, d)
pos_edge = pos_train_edge[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]) # (B, )
neg_edge = negative_sampling(edge_index, num_nodes=emb.shape[0],
num_neg_samples=edge_id.shape[0], method='dense') # (Ne,2)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]) # (Ne,)
loss = -torch.log(pos_pred + 1e-15).mean() - torch.log(1 - neg_pred + 1e-15).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
return sum(train_losses) / len(train_losses) |
samar-khanna/cs224w-project | preprocess.py | import os
import torch
import pickle
import argparse
import numpy as np
import torch_geometric as pyg
from ogb.linkproppred import PygLinkPropPredDataset
class NoEdgeException(Exception):
pass
def create_online_edge_index(n_id, full_edge_index, curr_edge_index, curr_nodes, rng,
train_msg=0.4, train_sup=0.4, val_pct=0.1):
"""
Creates the train/val/test positive and negative edge index for online node id n_id,
given the split ratios for training message/supervision, and val and test edges
:param n_id: Node index for the online node being considered
:param full_edge_index: (2, E) tensor of ALL edges in the full graph
:param curr_edge_index: (2, Ec) tensor of edges in the current subgraph
:param curr_nodes: (N,) tensor of node indices in the current subgraph
:param rng: numpy random number generator
:param train_msg: Percentage of n_id's edges that will be used for train message passing
:param train_sup: Percentage of n_id's edges that will be used for train loss supervision
:param val_pct: Percentage of n_id's edges that will be used for validation metrics
:return:
(2, E_new) tensor of edges in the subgraph updated with node n_id's message edges
(N+1,) tensor of nodes in the subgraph updated with node n_id
dict(key, (E, 2) Tensor) of train msg/sup/neg edges, val pos/neg edges, test pos/neg edges
"""
curr_edges = curr_edge_index.T # (CE, 2)
edges = full_edge_index.T # (E, 2)
# First search for all edges containing node id
# since undirected, both (i,j) and (j, i) should be in edges
all_node_edges = edges[edges[:, 0] == n_id] # (D_all, 2)
# Then, only keep edges from node_id to nodes in current graph
node_edges = torch.isin(all_node_edges[:, 1], curr_nodes) # (D_all,)
node_edges = all_node_edges[node_edges] # (D, 2)
node_edges = node_edges[rng.permutation(node_edges.shape[0])] # (D, 2)
D = node_edges.shape[0]
# Create negative edges (avoid positive val and test edges, is this ok?)
neg_edges = []
for n in curr_nodes:
if not torch.isin(n, node_edges):
neg_edges.append((n_id, n))
neg_edges = torch.as_tensor(neg_edges, dtype=torch.long) # (Ne, 2)
neg_edges = neg_edges[rng.permutation(neg_edges.shape[0])] # (Ne, 2)
# Then, split node edges into train/val/test
train_msg_range = (0, int(train_msg * D))
train_sup_range = (train_msg_range[1], train_msg_range[1] + int(train_sup * D))
val_range = (train_sup_range[1], train_sup_range[1] + int(val_pct * D))
test_range = (val_range[1], D)
split = {
'train_msg': node_edges[train_msg_range[0]:train_msg_range[1]], # (TrMsg, 2)
'train_sup': node_edges[train_sup_range[0]:train_sup_range[1]], # (TrSup, 2)
'valid': node_edges[val_range[0]:val_range[1]], # (Val, 2)
'test': node_edges[test_range[0]:test_range[1]] # (Test, 2)
}
# Keep same number of neg edges for val/test as pos edges, give remaining to train
val_neg_range = (0, split['valid'].shape[0])
test_neg_range = (split['valid'].shape[0], split['valid'].shape[0] + split['test'].shape[0])
train_neg_range = (split['valid'].shape[0] + split['test'].shape[0], neg_edges.shape[0])
split['valid_neg'] = neg_edges[val_neg_range[0]:val_neg_range[1]] # (Val, 2)
split['test_neg'] = neg_edges[test_neg_range[0]:test_neg_range[1]] # (Val, 2)
split['train_neg'] = neg_edges[train_neg_range[0]:train_neg_range[1]] # (Val, 2)
# Msg edges need both (i,j) and (j,i)
split['train_msg'] = split['train_msg'].repeat_interleave(2, dim=0)
split['train_msg'][::2] = split['train_msg'][::2].flip(1)
for k, edges in split.items():
if len(edges) == 0:
raise NoEdgeException(f"Warning: node {n_id} has no {k} edges")
# print(f"Warning: node {n_id} has no {k} edges")
return torch.cat((curr_edges, split['train_msg']), dim=0).T, \
torch.cat((curr_nodes, torch.as_tensor([n_id]))), \
split
def preprocess(outfile, init_cluster_size=1000, num_online=None, seed=0,
split_train_msg=0.4, split_train_sp=0.4, split_val=0.1):
"""
Creates a dataset with an initial subgraph and a dictionary of online nodes
:param outfile: path to .pkl file where dataset will be saved
:param init_cluster_size: Number of nodes in initial subgraph
:param num_online: Number of nodes considered in online setting
:param seed: Numpy random number generator seed
:param split_train_msg: Percentage of each online node's edges used for train message passing
:param split_train_sp: Percentage of each online node's edges used for train loss supervision
:param split_val: Percentage of each online node's edges used for validation metrics
:return:
"""
rng = np.random.default_rng(seed)
dataset = PygLinkPropPredDataset(name="ogbl-ddi", root='./dataset/')
split_edge = dataset.get_edge_split()
graph = dataset[0]
edge_index = graph.edge_index.T # (TrE, 2)
# All train edges are in edge_index. None of val or test edges are in edge_index
val_edges = split_edge['valid']['edge'].repeat_interleave(2, dim=0)
val_edges[::2] = val_edges[::2].flip(1)
test_edges = split_edge['test']['edge'].repeat_interleave(2, dim=0)
test_edges[::2] = test_edges[::2].flip(1)
full_index = torch.cat((edge_index, val_edges, test_edges), dim=0) # (E, 2)
nodes = np.arange(graph.num_nodes)
node_map = np.arange(len(nodes))
rng.shuffle(node_map)
# old_to_new[i] = new idx of node i in new ordering of nodes
new_from_old = torch.from_numpy(node_map[node_map])
old_from_new = torch.from_numpy(node_map)
# Map edges to new ordering of nodes (where new node 0 = node_map[0])
full_index = new_from_old[full_index].T # (2, E)
# Initial node induced subgraph of all (keeps the interleaved structure)
init_nodes = torch.arange(init_cluster_size)
init_edge_index, _ = pyg.utils.subgraph(init_nodes, full_index) # (2, InitEdges)
num_online = num_online if num_online is not None else len(nodes) - init_cluster_size
online_nodes = torch.arange(init_cluster_size, init_cluster_size + num_online)
# For online nodes, find edges that connect node to current subgraph.
# Add the online node's training message edges to the current subgraph to update the curr edge_index
# Add the node's training, val, test edges to the online node dictionary
curr_nodes = init_nodes
curr_edge_index = init_edge_index
online_node_edge_index = {}
for n in online_nodes.numpy():
try:
curr_edge_index, curr_nodes, node_split = \
create_online_edge_index(n, full_index, curr_edge_index, curr_nodes, rng, split_train_msg,
split_train_sp, split_val)
except NoEdgeException as e:
print(str(e))
continue
online_node_edge_index[n] = node_split
# Save the graph info
dataset = {
"init_nodes": init_nodes,
"init_edge_index": init_edge_index,
"online": online_node_edge_index,
"full_edge_index": full_index,
}
with open(outfile, 'wb') as f:
pickle.dump(dataset, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create online graph dataset")
parser.add_argument("--file_name", type=str, default=None,
help="Path to outfile containing .pkl dataset")
parser.add_argument("--init_size", type=int, default=1000,
help="Number of nodes in initial graph")
parser.add_argument("--num_online", type=int, default=10,
help="Number of online nodes.")
parser.add_argument("--seed", type=int, default=0,
help="Np random seed")
parser.add_argument("--split_train_msg", type=float, default=0.4,
help="Fraction of edges as train message")
parser.add_argument("--split_train_sp", type=float, default=0.4,
help="Fraction of edges as train supervison")
parser.add_argument("--split_val", type=float, default=0.1,
help="Fraction of edges as val")
args = parser.parse_args()
file_name = args.file_name
if file_name is None:
split_test = round(1.0 - args.split_train_msg - args.split_train_sp - args.split_val, 2)
file_name = f"online_init:{args.init_size}" \
f"-online_nodes:{args.num_online}" \
f"-split:{args.split_train_msg}_{args.split_train_sp}_{args.split_val}_{split_test}" \
f"-seed:{args.seed}.pkl"
file_name = os.path.join('dataset', file_name)
preprocess(file_name, args.init_size, args.num_online, args.seed,
args.split_train_msg, args.split_train_sp, args.split_val)
|
samar-khanna/cs224w-project | pyggif.py | from genericpath import exists
import pickle
import torch
print(torch.__version__)
import torch_geometric
print(torch_geometric.__version__)
import networkx as nx
import matplotlib.pyplot as plt
from pylab import show
import collections
import imageio
import os
def tuplify(tensor):
return (int(tensor[0].item()),int(tensor[1].item()))
def listify(tensor):
l = []
for t in tensor:
l.append(tuplify(t))
return l
def visualize(base_graph_edges, base_extra_nodes, new_nodes, new_given_edges,pred_new_node_edges, correct_new_edge, figsize,gif_name,framerate=24):
G_skel = nx.Graph()
G_skel.add_edges_from(base_graph_edges)
for edge_list in pred_new_node_edges:
G_skel.add_edges_from(edge_list)
for edge_list in correct_new_edge:
G_skel.add_edges_from(edge_list)
for edge_list in new_given_edges:
G_skel.add_edges_from(edge_list)
pos = nx.spring_layout(G_skel)
min_x_pos = 10000
max_x_pos = -10000
min_y_pos = 10000
max_y_pos = -10000
for k,v in pos.items():
min_x_pos = min(min_x_pos,v[0])
min_y_pos = min(min_y_pos,v[1])
max_x_pos = max(max_x_pos,v[0])
max_y_pos = max(max_y_pos,v[1])
print(min_x_pos,max_x_pos,min_y_pos,max_y_pos)
min_x_pos -=0.5
min_y_pos -=0.5
max_x_pos +=0.5
max_y_pos +=0.5
filenames = []
# BASE GRAPH
G_old = nx.Graph()
G_old.add_edges_from(base_graph_edges)
G_old.add_nodes_from(base_extra_nodes)
plt.figure(figsize=figsize)
plt.xlim(min_x_pos,max_x_pos)
plt.ylim(min_y_pos,max_y_pos)
pos_old = {i:pos[i] for i in G_old.nodes()}
node_labels_old = {i:i for i in G_old.nodes()}
node_color_old = ['b' for node in G_old.nodes()]
nx.draw(G_old, pos=pos_old,node_color=node_color_old, labels=node_labels_old, font_color='white')
filename = f'{0}.png'
filenames.append(filename)
plt.savefig(filename)
G_pred = nx.Graph()
G_pred.clear()
G_pred.add_edges_from(base_graph_edges)
G_pred.add_nodes_from(base_extra_nodes)
edge_color = {}
edge_weight = {}
# for edge in G_pred.edges():
# edge_color[edge] = 'black'
pos_new = {i:pos[i] for i in G_pred.nodes()}
node_labels = {i:i for i in G_pred.nodes()}
# Iterating over new nodes
edges_learnt = []
for index, new_node in enumerate(new_nodes):
G_pred.add_edges_from(pred_new_node_edges[index])
G_pred.add_edges_from(correct_new_edge[index])
G_pred.add_edges_from(new_given_edges[index])
pos_new[new_node] = pos[new_node]
node_labels[new_node] = new_node
for edge in G_pred.edges():
rev_edge = edge[::-1]
if edge[1] != new_node:
if edge in base_graph_edges or rev_edge in edges_learnt:
edge_color[edge] = 'black'
edge_weight[edge] = 1
elif rev_edge in pred_new_node_edges[index] and rev_edge in correct_new_edge[index]:
edge_color[edge] = 'purple'
edge_weight[edge] = 2
edges_learnt.append(rev_edge)
else:
edge_color[edge] = 'white'
edge_weight[edge] = 1
# print('whiten',edge)
if rev_edge in new_given_edges[index]:
edge_color[edge] = 'blue'
edge_weight[edge] = 2
edges_learnt.append(rev_edge)
# print("given ",edge)
elif rev_edge in correct_new_edge[index] and rev_edge in pred_new_node_edges[index] :
edge_color[edge] = 'green'
edge_weight[edge] = 5
edges_learnt.append(rev_edge)
print(edge, "is correct")
elif rev_edge in pred_new_node_edges[index]:
edge_color[edge] = 'red'
edge_weight[edge] = 5
print(edge, "is wrong")
elif rev_edge in correct_new_edge[index]:
edge_weight[edge] = 3
edge_color[edge] = 'yellow'
# print(edge, "was missed")
# print('edges learnt so far')
# print(edges_learnt)
# print('-------')
node_color = ['r' if node==new_node else 'b' for node in G_pred.nodes()]
edges = G_pred.edges()
ec = [edge_color[edge] for edge in edges]
ew = [edge_weight[edge] for edge in edges]
plt.figure(figsize=figsize)
plt.xlim(min_x_pos,max_x_pos)
plt.ylim(min_y_pos,max_y_pos)
nx.draw(G_pred, pos=pos_new, labels=node_labels, edge_color=ec, width=ew, node_color=node_color, font_color='white')
filename = f'{index+1}.png'
filenames.append(filename)
plt.savefig(filename)
with imageio.get_writer(gif_name, mode='I') as writer:
for filename in filenames:
for _ in range(framerate):
image = imageio.imread(filename)
writer.append_data(image)
for filename in set(filenames):
os.remove(filename)
if __name__ == "__main__":
# ! python preprocess.py --init_size 75 --num_online 10
# ! python online_main.py --data_path /content/cs224w-project/dataset/online_init:75-online_nodes:10-split:0.4_0.4_0.1_0.1-seed:0.pkl
res_val_path = './experiments/online.init_nodes:75.num_online:7.online_nodes:10.epochs:100.online_steps:10.layers:4.hidden_dim:32.node_dim:256.init_lr:0.01.online_lr:0.01.optim_wd:0.init_batch_size:65536.online_batch_size:32/logs/res_val.pkl'
res_val = pickle.load(open(res_val_path, 'rb'))
with open('./dataset/online_init:75-online_nodes:10-split:0.4_0.4_0.1_0.1-seed:0.pkl', 'rb') as f:
dataset = pickle.load(f)
online_node_edge_index = dataset['online']
new_nodes = list(res_val.keys())[:3]
print(f"New Nodes: {new_nodes}")
new_given_edges = []
correct_new_edge = []
all_online_edges = torch.Tensor()
for n_id in new_nodes:
t_msg = online_node_edge_index[n_id]['train_msg'][1::2]
t_sup = online_node_edge_index[n_id]['train_sup']
new_given_edges.append(listify(torch.cat([t_msg,t_sup],dim=0)))
valid = online_node_edge_index[n_id]['valid']
test = online_node_edge_index[n_id]['test']
correct_new_edge.append(listify(torch.cat([valid,test],dim=0)))
all_online_edges = torch.cat([all_online_edges,t_msg,t_sup,valid,test],dim=0)
print("num all_onl_edges = ",all_online_edges.shape)
pred_new_node_edges = []
for n_id in new_nodes:
pred_new_node_edges.append(listify(torch.cat([res_val[n_id]['corr_pred'].view(-1,2),res_val[n_id]['inc_pred'].view(-1,2)],dim=0)))
print("num_pred_e=",len(pred_new_node_edges))
for p in pred_new_node_edges:
all_online_edges = torch.cat([all_online_edges, torch.Tensor(p)],dim=0)
all_online_edges = all_online_edges.type(dtype=torch.int64)
print("num all_onl_edges = ",all_online_edges.shape)
useful_nodes = torch.unique(all_online_edges[:,1])
print("num useful nodes = ",useful_nodes.shape)
useful_edges = []
for e in dataset['init_edge_index'].T:
if e[0].item() in useful_nodes and e[1].item() in useful_nodes:
useful_edges.append(tuplify(e))
nodes_made_by_ue = []
for e in useful_edges:
e0 = int(e[0])
e1 = int(e[1])
if e0 not in nodes_made_by_ue:
nodes_made_by_ue.append(e0)
if e1 not in nodes_made_by_ue:
nodes_made_by_ue.append(e1)
print("num nodes in useful edge=",len(nodes_made_by_ue))
extra_nodes = []
for n in useful_nodes:
if n not in nodes_made_by_ue:
extra_nodes.append(n.item())
print("extra nodes: ",extra_nodes)
base_graph_edges = useful_edges
base_extra_nodes = extra_nodes
figsize = (30,30)
fig_dir = './figs'
os.makedir(fig_dir,exists_ok=True)
gif_name = os.path.join(fig_dir,'online_link_pred_75.gif')
visualize(base_graph_edges, base_extra_nodes, new_nodes, new_given_edges,pred_new_node_edges, correct_new_edge, figsize,gif_name,framerate=12) |
samar-khanna/cs224w-project | log_parser.py | <filename>log_parser.py
import os
import numpy as np
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to tabulate results")
parser.add_argument('--path', default=None, help="location of log.txt")
args = parser.parse_args()
val_tp=0.0
val_fn=0.0
val_tn=0.0
val_fp=0.0
test_tp=0.0
test_fn=0.0
test_tn=0.0
test_fp=0.0
with open(args.path,"r") as f:
lines = f.readlines()
for line in lines:
if line[:5] =="VAL t":
s = line.split(':')
val_tp += float(s[1].split(',')[0][1:])
val_fn += float(s[2].split(',')[0][1:])
val_tn += float(s[3].split(',')[0][1:])
val_fp += float(s[4].split(',')[0][1:])
elif line[:6] =="TEST t":
s = line.split(':')
test_tp += float(s[1].split(',')[0][1:])
test_fn += float(s[2].split(',')[0][1:])
test_tn += float(s[3].split(',')[0][1:])
test_fp += float(s[4].split(',')[0][1:])
print(f"VAL tp:{val_tp}, fn:{val_fn}, tn:{val_tn}, fp:{val_fp}")
print(f"TEST tp:{test_tp}, fn:{test_fn}, tn:{test_tn}, fp:{test_fp}") |
samar-khanna/cs224w-project | gists/link_predictor.py | <reponame>samar-khanna/cs224w-project<filename>gists/link_predictor.py
class LinkPredictor(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(LinkPredictor, self).__init__()
self.lins = nn.ModuleList()
self.lins.append(nn.Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(nn.Linear(hidden_channels, hidden_channels))
self.lins.append(nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
def forward(self, x_i, x_j):
x = x_i * x_j
for lin in self.lins[:-1]:
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return torch.sigmoid(x)
|
samar-khanna/cs224w-project | online_eval.py | <filename>online_eval.py
import torch
from torch_geometric.data import DataLoader
def online_eval(model, link_predictor, emb, edge_index, pos_edges, neg_edges, batch_size):
"""
Evaluates model on positive and negative edges for prediction
:param model: Torch Graph model used for updating node embeddings based on message passing
:param link_predictor: Torch model used for predicting whether edge exists or not
:param emb: (N+1, d) Initial node embeddings for all N nodes in subgraph, along with new online node
:param edge_index: (2, E) Edge index for edges in subgraph, along with message edges for online node
:param pos_edges: (PE, 2) Positive edges from online node to subgraph (previously unseen)
:param neg_edges: (PE, 2) Negative edges from online node to subgraph (previously unseen)
:param batch_size: Number of positive (and negative) supervision edges to sample per batch
:return: true positives, true negatives, false positives, false negatives, and
dict(true positive edges, false positive edges, false negative edges)
"""
model.eval()
link_predictor.eval()
tp = 0.
tn = 0.
fp = 0.
fn = 0.
tp_pred = torch.empty(0, dtype=pos_edges.dtype)
fp_pred = torch.empty(0, dtype=pos_edges.dtype)
fn_pred = torch.empty(0, dtype=pos_edges.dtype)
for edge_id in DataLoader(range(pos_edges.shape[0]), batch_size, shuffle=False, drop_last=False):
node_emb = model(emb, edge_index) # (N, d)
pos_edge = pos_edges[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]).squeeze() # (B, )
tp += (pos_pred >= 0.5).sum().item()
fn += (pos_pred < 0.5).sum().item()
tp_pred = torch.cat((tp_pred, pos_edge.T[pos_pred >= 0.5].cpu()), dim=0)
fn_pred = torch.cat((fn_pred, pos_edge.T[pos_pred < 0.5].cpu()), dim=0)
for edge_id in DataLoader(range(neg_edges.shape[0]), batch_size, shuffle=False, drop_last=False):
node_emb = model(emb, edge_index) # (N, d)
neg_edge = neg_edges[edge_id].T # (2, B)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]).squeeze() # (B, )
fp += (neg_pred >= 0.5).sum().item()
tn += (neg_pred < 0.5).sum().item()
# Don't care about tn coz those are too many
fp_pred = torch.cat((fp_pred, neg_edge.T[neg_pred >= 0.5].cpu()), dim=0)
preds = {'tp_pred': tp_pred, 'fp_pred': fp_pred, 'fn_pred': fn_pred}
return tp, tn, fp, fn, preds
|
samar-khanna/cs224w-project | online_train.py | <gh_stars>1-10
import torch
import numpy as np
from torch_geometric.data import DataLoader
def online_train(model, link_predictor, emb, edge_index, pos_train_edge, neg_train_edges,
batch_size, optimizer, device):
"""
Runs training for a single online node given its edges to the existing subgraph
:param model: Torch Graph model used for updating node embeddings based on message passing
:param link_predictor: Torch model used for predicting whether edge exists or not
:param emb: (N+1, d) Initial node embeddings for all N nodes in subgraph, along with new online node
:param edge_index: (2, E) Edge index for edges in subgraph, along with message edges for online node
:param pos_train_edge: (PE, 2) Positive edges from online node to subgraph, for supervision loss
:param neg_train_edges: (NE, 2) All training negative edges from online node to subgraph.
(Equal number of negative edges will be sampled as the number of positive edges for batch)
:param batch_size: Number of positive (and negative) supervision edges to sample per batch
:param optimizer: Torch Optimizer to update model parameters
:param device: PyTorch device
:return: Average supervision loss over all positive (and correspondingly sampled negative) edges
"""
model.train()
link_predictor.train()
train_losses = []
for edge_id in DataLoader(range(pos_train_edge.shape[0]), batch_size, shuffle=True):
optimizer.zero_grad()
# Run message passing on the inital node embeddings to get updated embeddings
node_emb = model(emb, edge_index) # (N, d)
# Predict the class probabilities on the batch of positive edges using link_predictor
pos_edge = pos_train_edge[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]) # (B, )
# Here we are given negative edges, so sample same number as pos edges and predict probabilities
neg_idx = np.random.choice(len(neg_train_edges), edge_id.shape[0], replace=False)
neg_edge = neg_train_edges[torch.from_numpy(neg_idx).to(device)] # (Ne, 2)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]) # (Ne,)
# Compute the corresponding negative log likelihood loss on the positive and negative edges
loss = -torch.log(pos_pred + 1e-15).mean() - torch.log(1 - neg_pred + 1e-15).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
return sum(train_losses) / len(train_losses)
|
samar-khanna/cs224w-project | main.py | <gh_stars>1-10
import os
import argparse
import torch
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
from torch.optim import optimizer
import torch.optim as optim
from torch_geometric.data import DataLoader
from gnn_stack import GNNStack
from train import train
from link_predictor import LinkPredictor
from evaluate import test
from utils import print_and_log
def main():
parser = argparse.ArgumentParser(description="Script to train link prediction in offline graph setting")
parser.add_argument('--epochs', type=int, default=300,
help="Number of epochs for training")
parser.add_argument('--lr', type=float, default=3e-3,
help="Learning rate training")
parser.add_argument('--node_dim', type=int, default=256,
help='Embedding dimension for nodes')
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--batch_size', type=int, default=64 * 1024)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--exp_dir', type=str, default=None,
help="Path to exp dir for model checkpoints and experiment logs")
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
optim_wd = 0
epochs = args.epochs
hidden_dim = args.hidden_channels
dropout = args.dropout
num_layers = args.num_layers
lr = args.lr
node_emb_dim = args.node_dim
batch_size = args.batch_size
exp_dir = args.exp_dir
if exp_dir is None:
exp_dir = "./experiments"
dir = f"offline.epochs:{epochs}.lr{lr}.layers:{num_layers}" \
f".hidden_dim:{hidden_dim}.node_dim:{node_emb_dim}.init_batch_size:{batch_size}"
exp_dir = os.path.join(exp_dir, dir)
model_dir = os.path.join(exp_dir, 'checkpoints')
logs_dir = os.path.join(exp_dir, 'logs')
os.makedirs(exp_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
os.makedirs(logs_dir, exist_ok=True)
logfile_path = os.path.join(logs_dir, 'log.txt')
logfile = open(logfile_path, "a" if os.path.isfile(logfile_path) else "w", buffering=1)
# Download and process data at './dataset/ogbl-ddi/'
dataset = PygLinkPropPredDataset(name="ogbl-ddi", root='./dataset/')
split_edge = dataset.get_edge_split()
pos_train_edge = split_edge['train']['edge'].to(device)
graph = dataset[0]
edge_index = graph.edge_index.to(device)
evaluator = Evaluator(name='ogbl-ddi')
# Create embedding, model, and optimizer
emb = torch.nn.Embedding(graph.num_nodes, node_emb_dim).to(device)
model = GNNStack(node_emb_dim, hidden_dim, hidden_dim, num_layers, dropout, emb=True).to(device)
link_predictor = LinkPredictor(hidden_dim, hidden_dim, 1, num_layers + 1, dropout).to(device)
optimizer = optim.Adam(
list(model.parameters()) + list(link_predictor.parameters()) + list(emb.parameters()),
lr=lr, weight_decay=optim_wd
)
for e in range(epochs):
loss = train(model, link_predictor, emb.weight, edge_index, pos_train_edge, batch_size, optimizer)
print_and_log(logfile, f"Epoch {e + 1}: loss: {round(loss, 5)}")
if (e + 1) % 10 == 0:
torch.save(model.state_dict(), os.path.join(model_dir, f"model_{e + 1}.pt"))
torch.save(emb.state_dict(), os.path.join(model_dir, f"emb_{e + 1}.pt"))
torch.save(link_predictor.state_dict(), os.path.join(model_dir, f"link_pred_{e + 1}.pt"))
result = test(model, link_predictor, emb.weight, edge_index, split_edge, batch_size, evaluator)
print_and_log(logfile, f"{result}")
logfile.close()
if __name__ == "__main__":
main()
|
samar-khanna/cs224w-project | graph_sage.py | <reponame>samar-khanna/cs224w-project
import torch
import torch.nn as nn
import torch_scatter
from torch_geometric.nn.conv import MessagePassing
class GraphSage(MessagePassing):
def __init__(self, in_channels, out_channels, normalize=True,
bias=False, **kwargs):
super(GraphSage, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
# self.lin_l is the linear transformation that you apply to embedding
# for central node.
# self.lin_r is the linear transformation that you apply to aggregated
# message from neighbors.
self.lin_l = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.lin_r = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
def forward(self, x, edge_index, size=None):
# 1. Call the propagate function to conduct the message passing.
# 1.1 See the description of propagate above or the following link for more information:
# https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
# 1.2 We will only use the representation for neighbor nodes (x_j), so by default
# we pass the same representation for central and neighbor nodes as x=(x, x).
# 2. Update our node embedding with skip connection from the previous layer.
# 3. If normalize is set, do L-2 normalization (defined in
# torch.nn.functional)
# x is shape (N, in_c)
neighbor_out = self.propagate(edge_index, x=(x, x), size=size)
out = self.lin_l(x) + self.lin_r(neighbor_out)
if self.normalize:
out = torch.nn.functional.normalize(out, p=2)
############################################################################
return out
def message(self, x_j):
# x_j has shape (E, d)
out = x_j
return out
def aggregate(self, inputs, index, dim_size=None):
# The axis along which to index number of nodes.
node_dim = self.node_dim
# https://pytorch-scatter.readthedocs.io/en/latest/functions/scatter.html#torch_scatter.scatter
out = torch_scatter.scatter(inputs, index, dim=node_dim, reduce='mean')
return out
|
samar-khanna/cs224w-project | gnn_stack.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric as pyg
class GNNStack(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout, emb=False):
super(GNNStack, self).__init__()
conv_model = pyg.nn.SAGEConv
self.convs = nn.ModuleList()
self.convs.append(conv_model(input_dim, hidden_dim))
self.dropout = dropout
self.num_layers = num_layers
self.emb = emb
assert (self.num_layers >= 1), 'Number of layers is not >=1'
for l in range(self.num_layers - 1):
self.convs.append(conv_model(hidden_dim, hidden_dim))
# post-message-passing
self.post_mp = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim), nn.Dropout(self.dropout),
nn.Linear(hidden_dim, output_dim))
def forward(self, x, edge_index):
for i in range(self.num_layers):
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.post_mp(x)
if self.emb:
return x
return F.log_softmax(x, dim=1)
def loss(self, pred, label):
return F.nll_loss(pred, label)
|
dainis-boumber/SiameseULMFiT | text_util.py | <filename>text_util.py
re1 = re.compile(r' +')
def fixup_string(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>','u_n').replace(' @.@ ','.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
def get_texts(df):
texts = df[0].astype(str)
texts = list(texts.apply(fixup).values)
texts = f'x_bos ' + df[0].astype(str)
tok = Tokenizer().proc_all_mp(partition_by_cores(texts))
return tok |
PumpMagic/aces-rank-extractor | main.py | <reponame>PumpMagic/aces-rank-extractor
from PIL import Image, ImageDraw
import pytesseract
import subprocess
import os
import itertools
from collections import Counter
import time
TEST_VIDEO_PATH = "/Users/owner/repos/aces-scraper/resources/acesranks-01.mp4"
IDENTIFIER_STRIP_X = 37
IDENTIFIER_STRIP_Y_START = 97
IDENTIFIER_STRIP_Y_END = 655
def get_row_type(identifier_strip_rgb_triplet):
red, green, blue = identifier_strip_rgb_triplet
rgb_sum = red + green + blue
if rgb_sum < 40:
return "heading"
elif red > 140:
return "personal"
elif rgb_sum < 140:
return "dark"
else:
return "light"
class LeaderboardRowBounds:
def __init__(self, row_type, y_start, y_end):
self.row_type = row_type
self.y_start = y_start
self.y_end = y_end
def extract_row_classes_and_bounds(rgb_image, identifier_strip_x, identifier_strip_y_start, identifier_strip_y_end):
"""
:param rgb_image:
:param identifier_strip_x:
:param identifier_strip_y_start:
:param identifier_strip_y_end:
:return: [ classified row bounds ] ; [ ClassifiedRowBounds ]
"""
# { y: (r, g, b) }; { int: (int, int, int) }
ys_and_rgbs = { strip_y: rgb_image.getpixel((identifier_strip_x, strip_y))
for strip_y in range(identifier_strip_y_start, identifier_strip_y_end+1) }
# { y: row type } ; { int: str }
ys_and_row_types = { y: get_row_type(rgb)
for y, rgb in ys_and_rgbs.items() }
ranking_rows = list() # [ ClassifiedRowBounds ]
sorted_y_values_and_row_types = sorted(ys_and_row_types.items(), key=lambda tup: tup[0])
max_y = sorted_y_values_and_row_types[-1][0]
traversing_row_type = sorted_y_values_and_row_types[0][1]
row_start_y = sorted_y_values_and_row_types[0][0]
row_end_y = row_start_y
for y, row_type in sorted_y_values_and_row_types:
if row_type == traversing_row_type:
if y < max_y:
# continuing a row
row_end_y = y
else:
# this is the very last item we're traversing.
# make sure we append whatever last row we've been working on!
ranking_rows.append(LeaderboardRowBounds(traversing_row_type, row_start_y, row_end_y))
else:
# we're transitioning from one row type to another.
# first, append the row that we just finished
ranking_rows.append(LeaderboardRowBounds(traversing_row_type, row_start_y, row_end_y + 1))
# then, initialize the bounds of the new row
row_start_y = y
row_end_y = y
traversing_row_type = row_type
return ranking_rows
def identify_row_bounds(image):
"""
Identify all of the rows in a snapshot from <NAME>' rankings board.
:param output_path: where to store the marked up image.
:return:
"""
# Get the RGB values of each pixel in the identifier strip
rgb_image = image.convert('RGB')
classified_row_bounds_list = extract_row_classes_and_bounds(rgb_image,
IDENTIFIER_STRIP_X,
IDENTIFIER_STRIP_Y_START,
IDENTIFIER_STRIP_Y_END)
# for classified_row_bounds in classified_row_bounds_list:
# print("{} row: pixels [{}, {}]".format(classified_row_bounds.row_type,
# classified_row_bounds.y_start,
# classified_row_bounds.y_end))
return classified_row_bounds_list
def dump_marked_image(source_image, row_bounds_list, output_path):
ROW_TYPES_TO_COLORS = {
"heading": (255, 255, 0),
"light": (102, 255, 51),
"dark": (255, 0, 255),
"personal": (0, 255, 255)
}
# Initialize an output image
image_with_bounding_boxes = source_image.copy()
# Mark it up
# TODO: Magic numbers
for row_bounds in row_bounds_list:
draw = ImageDraw.Draw(image_with_bounding_boxes)
draw.rectangle(((31, row_bounds.y_start + 1), (1219, row_bounds.y_end - 1)),
outline=ROW_TYPES_TO_COLORS[row_bounds.row_type])
image_with_bounding_boxes.save(output_path)
class LeaderboardRow():
def __init__(self, ranking, character, country, nickname, points, wins_losses, win_percent, rating):
self.ranking = ranking
self.character = character
self.country = country
self.nickname = nickname
self.points = points
self.wins_losses = wins_losses
self.win_percent = win_percent
self.rating = rating
def filter_extractable_bounds(raw_bounds_list):
"""
Given a list of row bounds, filter out those from which we cannot realistically extract quality data.
:param raw_bounds_list:
:return:
"""
return [ raw_bounds for raw_bounds in raw_bounds_list if raw_bounds.y_end - raw_bounds.y_start >= 40 ]
# TODO: Maybe another intermediate format that keys by ranking. Would allow consensus algorithm that could work with
# imperfect rows, e.g. if one row has ranking 4 and nickname X and points unknown and another row has ranking 4 and
# nickname unknown and points Y, could combine into ranking 4 and nickname X and points Y
def ocr_row(image, row_bounds):
"""
:param image:
:param row_bounds:
:return: None, or LeaderboardRow with all fields Noneable except for ranking.
"""
# Crop each cell out.
RANKING_START_X = IDENTIFIER_STRIP_X
RANKING_END_X = 161
NICKNAME_START_X = 288
NICKNAME_END_X = 548
POINTS_START_X = 554
POINTS_END_X = 732
WINS_LOSSES_START_X = 738
WINS_LOSSES_END_X = 934
WIN_PERCENT_START_X = 935
WIN_PERCENT_END_X = 1020
RATING_START_X = 1024
RATING_END_X = 1218
# convert the image to black and white before passing to tesseract - seems to help OCR
image_bw = image.convert("L")
# debug
image_bw.copy().save("/tmp/poop.png")
ranking_image = image_bw.crop((RANKING_START_X, row_bounds.y_start, RANKING_END_X, row_bounds.y_end))
nickname_image = image_bw.crop((NICKNAME_START_X, row_bounds.y_start, NICKNAME_END_X, row_bounds.y_end))
points_image = image_bw.crop((POINTS_START_X, row_bounds.y_start, POINTS_END_X, row_bounds.y_end))
wins_losses_image = image_bw.crop((WINS_LOSSES_START_X, row_bounds.y_start, WINS_LOSSES_END_X, row_bounds.y_end))
win_percent_image = image_bw.crop((WIN_PERCENT_START_X, row_bounds.y_start, WIN_PERCENT_END_X, row_bounds.y_end))
rating_image = image_bw.crop((RATING_START_X, row_bounds.y_start, RATING_END_X, row_bounds.y_end))
# PSM 7 means "treat the image as a single text line"
raw_ranking = pytesseract.image_to_string(ranking_image,
config="--psm 7 -c tessedit_char_whitelist=0123456789",
lang="eng")
# God help us with the nickname. Is there any pattern to the language / character set?
raw_nickname = pytesseract.image_to_string(nickname_image,
config="--psm 7",
lang="eng")
raw_points = pytesseract.image_to_string(points_image,
config="--psm 7 -c tessedit_char_whitelist=0123456789,",
lang="eng")
raw_wins_losses = pytesseract.image_to_string(wins_losses_image,
config="--psm 7 -c tessedit_char_whitelist=0123456789-",
lang="eng")
raw_win_percent = pytesseract.image_to_string(win_percent_image,
config="--psm 7 -c tessedit_char_whitelist=0123456789%",
lang="eng")
raw_rating = pytesseract.image_to_string(rating_image,
config="--psm 7 -c tessedit_char_whitelist=0123456789",
lang="eng")
# TODO: Identify country and nickname using a custom image classifier
# Do basic text cleanup and throw away clearly wrong data
ranking = raw_ranking.strip()
if len(ranking) < 1:
ranking = None
# No key! Useless row, at least for now.
return None
nickname = raw_nickname
points = raw_points.strip()
if points.startswith(",") or points.endswith(","):
points = None
# TODO: Do we want to separate wins from losses here? Or later?
# Doing it here would be easier
wins_losses = raw_wins_losses.strip()
if wins_losses.startswith("-") or wins_losses.endswith("-"):
wins_losses = None
win_percent = raw_win_percent.strip()
if not win_percent.endswith("%") or win_percent.count("%") != 1:
win_percent = None
rating = raw_rating.strip()
raw_row = LeaderboardRow(ranking, None, None, nickname, points, wins_losses, win_percent, rating)
return raw_row
def extract_leaderboard_rows_from_image(image):
all_bounds = identify_row_bounds(image)
extractable_bounds_list = filter_extractable_bounds(all_bounds)
leaderboard_rows = [ ocr_row(image, extractable_bounds) for extractable_bounds in extractable_bounds_list ]
return leaderboard_rows
def reach_leaderboard_consensus(all_incomplete_leaderboard_rows):
# Group rows by ranking
# TODO: Need to convert ranking strs to ints for this to be useful?
sorted_incomplete_leaderboard_rows = sorted(all_incomplete_leaderboard_rows, key=lambda r: r.ranking)
incomplete_row_groups = itertools.groupby(sorted_incomplete_leaderboard_rows, key=lambda r: r.ranking)
for ranking, incomplete_row_group_iterable in incomplete_row_groups:
incomplete_row_group = list(incomplete_row_group_iterable)
nicknames = [ r.nickname for r in incomplete_row_group if r.nickname is not None ]
points = [ r.points for r in incomplete_row_group if r.points is not None ]
wins_losses = [ r.wins_losses for r in incomplete_row_group if r.wins_losses is not None ]
win_percents = [ r.win_percent for r in incomplete_row_group if r.win_percent is not None ]
ratings = [ r.rating for r in incomplete_row_group if r.rating is not None ]
if len(nicknames) < 1 or len(points) < 1 or len(wins_losses) < 1 or len(win_percents) < 1 or len(ratings) < 1:
# Not one instance of a row contained a certain column
print("Unable to extract something for ranking {}".format(ranking))
# TODO: Throw an error?
continue
nickname_of_choice = max(Counter(nicknames).items(), key=lambda tup: tup[1])[0]
print("seeya")
def extract_all_incomplete_leaderboard_rows_from_frames(video_frame_images):
num_frames_extracted_from = 0
all_incomplete_rows_from_all_frames = list()
for video_frame_image in video_frame_images:
# TODO: This is really slow. Consider parallelizing it. Would be really easy with subprocess module.
all_incomplete_rows_from_all_frames.extend(extract_leaderboard_rows_from_image(video_frame_image))
num_frames_extracted_from += 1
if num_frames_extracted_from % 5 == 0:
print("Extracted from {} frames...".format(num_frames_extracted_from))
break
# TODO: Implement a consensus algorithm that uses the incomplete rows to make
# Just group the incomplete rows by rank, then take the most common of each present (non-None) value
return all_incomplete_rows_from_all_frames
def extract_frames(video_path, temp_dir):
"""
Extract all frames from a given video into individual image files, writing them to a temporary directory.
:param video_path: Location of video in filesystem.
:return: List of paths to extracted frames.
"""
# TODO: Consider using an ffmpeg wrapper. Might be cleaner, and would make dep on ffmpeg more explicit.
ffmpeg_output_dir = os.path.join(temp_dir, os.path.basename(video_path))
ffmpeg_output_pattern = os.path.join(ffmpeg_output_dir, "out%06d.png")
os.makedirs(ffmpeg_output_dir, exist_ok=True)
# Run ffmpeg
# subprocess.check_call(["ffmpeg", "-i", video_path, ffmpeg_output_path])
# TODO: -ss here is a hack to ignore non-leaderboard frames
subprocess.check_call(["ffmpeg", "-i", video_path, "-ss", "00:00:09", ffmpeg_output_pattern])
# TODO: This could be problematic if some files existed in the output directory already; it would return those
# files as well. Consider adding a random string to the output path.
filenames = os.listdir(ffmpeg_output_dir)
frame_paths = [ os.path.join(ffmpeg_output_dir, filename) for filename in filenames ]
return frame_paths
def extract_leaderboard_data_from_video(video_path):
video_frame_paths = extract_frames(video_path, "/tmp/frames")
# Load the extracted frames into Pillow images
# We could use an iterator here if we wanted to save memory... probably doesn't matter since our input videos are
# thirty seconds long
video_frames = [ Image.open(video_frame_path) for video_frame_path in video_frame_paths ]
# TODO: Filter out non-leaderboard frames, or make sure that the video does not have any
all_incomplete_leaderboard_rows = extract_all_incomplete_leaderboard_rows_from_frames(video_frames)
high_confidence_leaderboard_rows = reach_leaderboard_consensus(all_incomplete_leaderboard_rows)
return high_confidence_leaderboard_rows
if __name__ == '__main__':
data = extract_leaderboard_data_from_video(TEST_VIDEO_PATH)
print("bye") |
dethnass/MechanicalSoup | setup.py | <reponame>dethnass/MechanicalSoup
import re
import sys
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup # Always prefer setuptools over distutils
def requirements_from_file(filename):
"""Parses a pip requirements file into a list."""
return [line.strip() for line in open(filename, 'r')
if line.strip() and not line.strip().startswith('--')]
def read(fname, URL, URLImage):
"""Read the content of a file."""
readme = open(path.join(path.dirname(__file__), fname)).read()
if hasattr(readme, 'decode'):
# In Python 3, turn bytes into str.
readme = readme.decode('utf8')
# turn relative links into absolute ones
readme = re.sub(r'`<([^>]*)>`__',
r'`\1 <' + URL + r"/blob/master/\1>`__",
readme)
readme = re.sub(r"\.\. image:: /", ".. image:: " + URLImage + "/", readme)
return readme
here = path.abspath(path.dirname(__file__))
about = {}
with open(path.join(here, 'mechanicalsoup', '__version__.py'),
'r', 'utf-8') as f:
exec(f.read(), about)
# Don't install pytest-runner on every setup.py run, just for tests.
# See https://pypi.org/project/pytest-runner/#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup(
name=about['__title__'],
# useful: python setup.py sdist bdist_wheel upload
version=about['__version__'],
description=about['__description__'],
long_description=read('README.rst', about['__github_url__'], about[
'__github_assets_absoluteURL__']),
url=about['__url__'],
license=about['__license__'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=['mechanicalsoup'],
# List run-time dependencies here. These will be installed by pip
# when your project is installed. For an analysis of
# "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=requirements_from_file('requirements.txt'),
setup_requires=pytest_runner,
tests_require=requirements_from_file('tests/requirements.txt'),
)
|
dethnass/MechanicalSoup | examples/example.py | """Example app to login to GitHub using the StatefulBrowser class.
NOTE: This example will not work if the user has 2FA enabled."""
from __future__ import print_function
import argparse
import mechanicalsoup
from getpass import getpass
parser = argparse.ArgumentParser(description="Login to GitHub.")
parser.add_argument("username")
args = parser.parse_args()
args.password = getpass("Please enter your GitHub password: ")
browser = mechanicalsoup.StatefulBrowser(
soup_config={'features': 'lxml'},
raise_on_404=True,
user_agent='MyBot/0.1: mysite.example.com/bot_info',
)
# Uncomment for a more verbose output:
# browser.set_verbose(2)
browser.open("https://github.com")
browser.follow_link("login")
browser.select_form('#login form')
browser["login"] = args.username
browser["password"] = <PASSWORD>
resp = browser.submit_selected()
# Uncomment to launch a web browser on the current page:
# browser.launch_browser()
# verify we are now logged in
page = browser.page
messages = page.find("div", class_="flash-messages")
if messages:
print(messages.text)
assert page.select(".logout-form")
print(page.title.text)
# verify we remain logged in (thanks to cookies) as we browse the rest of
# the site
page3 = browser.open("https://github.com/MechanicalSoup/MechanicalSoup")
assert page3.soup.select(".logout-form")
|
dethnass/MechanicalSoup | mechanicalsoup/__init__.py | from .utils import LinkNotFoundError
from .browser import Browser
from .form import Form, InvalidFormMethod
from .stateful_browser import StatefulBrowser
from .__version__ import __version__
__all__ = ['StatefulBrowser', 'LinkNotFoundError', 'Browser', 'Form',
'InvalidFormMethod', '__version__']
|
pranavgupta1234/pielogstash | logstashpy/formatters/base_formatter.py | <filename>logstashpy/formatters/base_formatter.py<gh_stars>0
import traceback
import logging
import socket
import sys
from datetime import datetime
import json
def get_extra_fields(record):
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra',
'stacklevel', 'password', 'stack_info')
easy_types = (str, bool, dict, float, int, list, type(None))
extra_fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
extra_fields[key] = value
else:
extra_fields[key] = repr(value)
return extra_fields
def get_debug_information(record):
stack_debug_info = {
'stack_info': traceback.format_stack(record.stack_info)
}
return stack_debug_info
def get_process_info(record):
process_thread_fields = {
'thread': record.thread,
'threadName': record.threadName,
'process': record.processName,
'processName': record.processName,
}
return process_thread_fields
def format_source(message_type, host, path):
return "%s://%s/%s" % (message_type, host, path)
def format_timestamp(time):
tstamp = datetime.utcfromtimestamp(time)
return tstamp.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (tstamp.microsecond / 1000) + "Z"
def format_exception(exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
class LogstashFormatterBase(logging.Formatter):
def __init__(self, message_type='Logstash', tags=None, fqdn=False):
super().__init__()
self.message_type = message_type
self.tags = tags if tags is not None else []
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
class DefaultLogstashFormatter(logging.Formatter):
def __init__(self, message_type='Logstash', tags=None, fqdn=False):
super().__init__()
self.message_type = message_type
self.tags = tags if tags is not None else []
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
'''
Instead of return string to be serialized, return the object
'''
def format(self, record: logging.LogRecord):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.stack_info:
record.stack_info = self.formatStack(record.stack_info)
# update record with
record.__dict__.update(get_extra_fields(record))
record.__dict__.update(get_process_info(record))
return json.dumps(dict(record.__dict__)) |
pranavgupta1234/pielogstash | tests/test_serialization.py | <reponame>pranavgupta1234/pielogstash<filename>tests/test_serialization.py
import unittest
import os, sys
sys.path.insert(0, os.path.join(os.getcwd(), "../logstashpy/"))
from logstashpy.serialization import (json_serializer,
msgpack_serializer,
append_newline_byte,
append_newline
)
import json
class SerializationTests(unittest.TestCase):
def test_append_newline(self):
somedata = "somedata"
self.assertEqual(f'{somedata}\n', append_newline(somedata))
def test_append_newline_bytes(self):
somedata = b'somedata'
self.assertEqual(somedata+b'\n', append_newline_byte(somedata))
def test_json_serialization(self):
sample_json = {
'a' : 5,
'b' : 6
}
input_str_for_logstash = bytes(json.dumps(sample_json), 'utf-8') + b'\n'
self.assertEqual(json_serializer(json.dumps(sample_json)), input_str_for_logstash)
|
pranavgupta1234/pielogstash | logstashpy/handlers/udp_handler.py | <gh_stars>0
from logging.handlers import DatagramHandler
from logstashpy.handlers.tcp_handler import TCPLogstashHandler
class UDPLogstashHandler(TCPLogstashHandler, DatagramHandler):
pass
|
pranavgupta1234/pielogstash | logstashpy/formatters/old_formatters.py | from logstashpy.formatters.base_formatter import LogstashFormatterBase
from logstashpy.formatters.base_formatter import (get_extra_fields,
get_process_info,
get_debug_information)
from logstashpy.formatters.base_formatter import (format_source,
format_timestamp,
format_exception)
import json
class LogstashFormatterVersion0(LogstashFormatterBase):
version = 0
def format(self, record):
# Create message dict
message = {
'@timestamp': format_timestamp(record.created),
'@message': record.getMessage(),
'@source': format_source(self.message_type, self.host,
record.pathname),
'@source_host': self.host,
'@source_path': record.pathname,
'@tags': self.tags,
'@type': self.message_type,
'@fields': {
'levelname': record.levelname,
'logger': record.name,
},
}
# Add extra fields
message['@fields'].update(get_extra_fields(record))
# Add process, thread info
message['@fields'].update(get_process_info(record))
# If exception, add debug info
if record.exc_info:
message['@fields'].update(get_debug_information(record))
return json.dumps(message)
class LogstashFormatterVersion1(LogstashFormatterBase):
def format(self, record):
# Create message dict
message = {
'@timestamp': format_timestamp(record.created),
'@version': '1',
'message': record.getMessage(),
'host': self.host,
'path': record.pathname,
'tags': self.tags,
'type': self.message_type,
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
}
# Add extra fields
message['@fields'].update(get_extra_fields(record))
# Add process, thread info
message['@fields'].update(get_process_info(record))
# If exception, add debug info
if record.exc_info:
message['@fields'].update(get_debug_information(record))
return json.dumps(message) |
pranavgupta1234/pielogstash | examples/tcp_example.py | <reponame>pranavgupta1234/pielogstash
import sys
sys.path.insert(0,'../..')
import logging
from logstashpy.handlers.tcp_handler import TCPLogstashHandler
# Local Logstash server
host = 'localhost'
test_logger = logging.getLogger(__name__)
test_logger.setLevel(logging.INFO)
handler = TCPLogstashHandler(host, 5959)
test_logger.addHandler(handler)
test_logger.error('test logstashpy error message.')
test_logger.info('test logstashpy info message.')
test_logger.warning('test logstashpy warning message.')
# add extra field to logstashpy message
extra = {
'test_string': 'cool!',
'test_boolean': True,
'test_dict': {'a': 1, 'b': 'c'},
'test_float': 1.23,
'test_integer': 123,
'test_list': [1, 2, '3'],
}
test_logger.info('test extra fields', extra=extra) |
pranavgupta1234/pielogstash | logstashpy/formatters/ecs_formatter.py | from ecs_logging import StdlibFormatter, StructlogFormatter
from ecs_logging._utils import normalize_dict
'''
To potentially override the behaviour of ECS formatter
These ECS Formatters do not return string for logrecord
but a dict instead
'''
class ECSStdlibFormatter(StdlibFormatter):
def format(self, record):
record_dict = self.format_to_ecs(record)
return record_dict
class ECSStructlogFormatter(StructlogFormatter):
def __call__(self, _, name, event_dict):
event_dict = normalize_dict(event_dict)
event_dict.setdefault("log", {}).setdefault("level", name)
event_dict = self.format_to_ecs(event_dict)
return event_dict |
pranavgupta1234/pielogstash | logstashpy/handlers/tcp_handler.py | <filename>logstashpy/handlers/tcp_handler.py
from logging import LogRecord, Formatter
from logging.handlers import SocketHandler
from logstashpy.serialization import Serializer
from logstashpy.formatters.base_formatter import DefaultLogstashFormatter
from logstashpy.formatters.ecs_formatter import ECSStdlibFormatter
from ecs_logging import StdlibFormatter
import logging
import ssl
logger = logging.getLogger(__name__)
class TCPLogstashHandler(SocketHandler):
'''
:param fqdn; Indicates whether to show fully qualified domain name or not (default False).
:param version: version of logstashpy event schema (default is 0). :param version: version of logstashpy event schema (default is 0).
:param tags: list of tags for a logger (default is None). :param tags: list of tags for a logger (default is None).
:param ssl: Should SSL be enabled for the connection? Default is True.
:param ssl_verify: Should the server's SSL certificate be verified?
:param keyfile: The path to client side SSL key file (default is None).
:param certfile: The path to client side SSL certificate file (default is None).
:param ca_certs: The path to the file containing recognised CA certificates. System wide CA certs are used if omitted.
'''
def __init__(self, host, port=5959, serializer='pickle', message_type='logstashpy', tags=None,
fqdn=False, ssl = True, ssl_verify=False, keyfile=None, certfile=None, ca_certs=None):
super(TCPLogstashHandler, self).__init__(host, port)
self._host = host
self._port = port
self._data_serialization = serializer
self._message_type = message_type
self.tags = tags
self._fqdn = fqdn
self._ssl = ssl
self.formatter = StdlibFormatter()
self._ssl_verify = ssl_verify
self._keyfile = keyfile
self._certfile = certfile
self._ca_certs = ca_certs
def makePickle(self, record: LogRecord) -> bytes:
return Serializer.serialize(self.formatter.format(record), self._data_serialization)
def makeSocket(self, timeout=1):
s = super(TCPLogstashHandler, self).makeSocket()
if not self._ssl:
return s
context = ssl.create_default_context(cafile=self._ca_certs)
context.verify_mode = ssl.CERT_REQUIRED
if not self._ssl_verify:
if self._ca_certs:
context.verify_mode = ssl.CERT_OPTIONAL
else:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Client side certificate auth.
if self._certfile and self._keyfile:
context.load_cert_chain(self._certfile, keyfile=self._keyfile)
return context.wrap_socket(s, server_hostname=self._host)
|
pranavgupta1234/pielogstash | setup.py | <filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='logstashpy',
version='0.0.1',
author="<NAME>",
author_email="<EMAIL>",
description="python logging handlers to send data to Logstash server with SSL/TLS support",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pranavgupta1234/logstashpy",
license="Apache Software License",
packages=["logstashpy"],
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
) |
pranavgupta1234/pielogstash | logstashpy/serialization.py | <reponame>pranavgupta1234/pielogstash
import logging
import msgpack
import json
supported_serializers = [
'pickle',
'msgpack'
]
def append_newline(data):
return data+"\n"
def append_newline_byte(data):
return data+b'\n'
def get_serializer(format):
if format == 'pickle':
return json_serializer
elif format == 'msgpack':
return msgpack_serializer
else:
raise ValueError(format)
'''
Received formatted string from formatter.format
Appending newline is important for TCP input plugin of Logstash
'''
def json_serializer(data):
return bytes(append_newline(data), 'utf-8')
def msgpack_serializer(data):
record_dict = json.loads(data)
return msgpack.packb(record_dict)
class Serializer():
@classmethod
def serialize(cls, data, format='pickle'):
serializer = get_serializer(format)
return serializer(data)
|
PanyiDong/AutoML | tests/test_imputer/test_imputer.py | <reponame>PanyiDong/AutoML<filename>tests/test_imputer/test_imputer.py
"""
File: test_imputer.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_imputer/test_imputer.py
File Created: Saturday, 9th April 2022 10:13:00 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 11:46:30 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
# from My_AutoML._imputation import imputers
# data_X = pd.DataFrame(
# {
# "col_1": [np.nan, 8, 6, 7, 9, 9, 8, 8, 7, 5],
# "col_2": [9, 7, 2, 1, 6, 8, 8, 9, 3, 6],
# }
# )
# class TestImputer(unittest.TestCase):
# def test_Imputer(self):
# self.method_dict = imputers
# self.method_names = list(self.method_dict.keys())
# self.method_objects = list(self.method_dict.values())
# for method_name, method_object in zip(self.method_names, self.method_objects):
# if method_name == "KNNImputer":
# mol = method_object(n_neighbors=1)
# else:
# mol = method_object()
# # mol.fill(data_X)
# mol._fitted = True
# # check whether the method is fitted
# self.assertEqual(
# mol._fitted,
# True,
# "The method {} is not correctly fitted.".format(method_name),
# )
# print(
# "The method {} is correctly fitted.".format(method_name),
# )
def test_imputer():
from My_AutoML._imputation import imputers
from My_AutoML._utils import formatting
for method_name, method_object in zip(imputers.keys(), imputers.values()):
imputer = method_object()
if method_name != "KNNImputer":
data = pd.read_csv("Appendix/healthcare-dataset-stroke-data.csv")
encoder = formatting()
encoder.fit(data)
data = imputer.fill(data)
assert (
imputer._fitted == True
), "The method {} is not correctly fitted.".format(method_name)
assert (
data.isnull().any().any() == False
), "The imputation method {} fail to impute all missings.".format(
method_name
)
def test_DummyImputer():
from My_AutoML._imputation import DummyImputer
data = pd.DataFrame(
np.random.randint(0, 50, size=(100, 5)),
columns=["col_1", "col_2", "col_3", "col_4", "col_5"],
)
for _index in data.index:
if np.random.rand() < 0.1:
data.loc[_index, "col_3"] = np.nan
data["col_6"] = np.random.randint(0, 10, size=100)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
imputer = DummyImputer(
method="median",
)
filled_data = imputer.fill(X, y)
assert imputer._fitted == True, "The method DummyImputer is not correctly fitted."
assert (
filled_data.isnull().any().any() == False
), "The imputation method DummyImputer fail to impute all missings."
def test_kNNImputer():
from My_AutoML._imputation import KNNImputer
data = pd.DataFrame(
np.random.randint(0, 50, size=(100, 5)),
columns=["col_1", "col_2", "col_3", "col_4", "col_5"],
)
for _index in data.index:
if np.random.rand() < 0.1:
data.loc[_index, "col_3"] = np.nan
imputer = KNNImputer(
n_neighbors=3,
method="median",
)
filled_data = imputer.fill(data)
assert imputer._fitted == True, "The method KNNImputer is not correctly fitted."
assert (
filled_data.isnull().any().any() == False
), "The imputation method KNNImputer fail to impute all missings."
# def test_imputer_AAI_kNN():
# from My_AutoML._imputation._clustering import AAI_kNN
# # test AAI_kNN
# # generate missing data
# data = pd.DataFrame(
# np.random.randint(0, 100, size=(1000, 5)),
# columns=["col_" + str(i) for i in range(1, 6)],
# )
# for _column in data.columns:
# for _index in data.index:
# if np.random.rand() < 0.1:
# data.loc[_index, _column] = np.nan
# imputer = AAI_kNN(similarity="PCC")
# fill_data = imputer.fill(data)
# assert imputer._fitted == True, "The method {} is not correctly fitted.".format(
# "AAI_kNN"
# )
# assert (
# fill_data.isnull().any().any() == False
# ), "The imputation method {} fail to impute all missings.".format("AAI_kNN")
# imputer = AAI_kNN(similarity="COS")
# fill_data = imputer.fill(data)
# assert imputer._fitted == True, "The method {} is not correctly fitted.".format(
# "AAI_kNN"
# )
# assert (
# fill_data.isnull().any().any() == False
# ), "The imputation method {} fail to impute all missings.".format("AAI_kNN")
# def test_imputer_CMI():
# from My_AutoML._imputation._clustering import CMI
# # test CMI
# # generate missing data
# data = pd.DataFrame(
# np.random.randint(0, 100, size=(1000, 5)),
# columns=["col_" + str(i) for i in range(1, 6)],
# )
# for _column in data.columns:
# for _index in data.index:
# if np.random.rand() < 0.1:
# data.loc[_index, _column] = np.nan
# imputer = CMI()
# fill_data = imputer.fill(data)
# assert imputer._fitted == True, "The method {} is not correctly fitted.".format(
# "CMI"
# )
# assert (
# fill_data.isnull().any().any() == False
# ), "The imputation method {} fail to impute all missings.".format("CMI")
# def test_imputer_k_Prototype_NN():
# from My_AutoML._imputation._clustering import k_Prototype_NN
# # test k_Prototype_NN
# # generate missing data
# data = pd.DataFrame(
# np.random.randint(0, 100, size=(1000, 5)),
# columns=["col_" + str(i) for i in range(1, 6)],
# )
# for _column in data.columns:
# for _index in data.index:
# if np.random.rand() < 0.1:
# data.loc[_index, _column] = np.nan
# imputer = k_Prototype_NN()
# fill_data = imputer.fill(data)
# assert imputer._fitted == True, "The method {} is not correctly fitted.".format(
# "k_Prototype_NN"
# )
# assert (
# fill_data.isnull().any().any() == False
# ), "The imputation method {} fail to impute all missings.".format("k_Prototype_NN")
|
PanyiDong/AutoML | My_AutoML/_utils/_optimize.py | <reponame>PanyiDong/AutoML<filename>My_AutoML/_utils/_optimize.py
"""
File: _optimize.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_optimize.py
File Created: Friday, 8th April 2022 11:55:13 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 10:33:14 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
from inspect import isclass
import copy
import time
from collections import defaultdict, deque
import numpy as np
import pandas as pd
from ray import tune
from ray.tune import Stopper
import importlib
from typing import Callable
# from wrapt_timeout_decorator import *
from My_AutoML._utils._base import (
has_method,
)
# create hyperparameter space using ray.tune.choice
# the pipeline of AutoClassifier is [encoder, imputer, scaling, balancing, feature_selection, model]
# only chosen ones will be added to hyperparameter space
def _get_hyperparameter_space(
X,
encoders_hyperparameters,
encoder,
imputers_hyperparameters,
imputer,
balancings_hyperparameters,
balancing,
scalings_hyperparameters,
scaling,
feature_selection_hyperparameters,
feature_selection,
models_hyperparameters,
models,
task_mode,
):
# encoding space
_encoding_hyperparameter = []
for _encoder in [*encoder]:
for item in encoders_hyperparameters: # search the encoders' hyperparameters
# find encoder key
for _key in item.keys():
if "encoder_" in _key:
_encoder_key = _key
break
if item[_encoder_key] == _encoder:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_encoder_key] = tune.choice([_item[_encoder_key]])
_encoding_hyperparameter.append(_item)
break
# raise error if no encoding hyperparameters are found
if len(_encoding_hyperparameter) == 0:
raise ValueError(
"No encoding hyperparameters are found. Please check your encoders."
)
_encoding_hyperparameter = tune.choice(_encoding_hyperparameter)
# imputation space
_imputer_hyperparameter = []
if not X.isnull().values.any(): # if no missing, no need for imputation
_imputer_hyperparameter = tune.choice(
[{"imputer_0": tune.choice(["no_processing"])}]
)
else:
for _imputer in [*imputer]:
for item in imputers_hyperparameters: # search the imputer' hyperparameters
# find imputer key
for _key in item.keys():
if "imputer_" in _key:
_imputer_key = _key
break
if item[_imputer_key] == _imputer:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_imputer_key] = tune.choice([_item[_imputer_key]])
_imputer_hyperparameter.append(_item)
break
# raise error if no imputation hyperparameters are found
if len(_imputer_hyperparameter) == 0:
raise ValueError(
"No imputation hyperparameters are found. Please check your imputers."
)
_imputer_hyperparameter = tune.choice(_imputer_hyperparameter)
# balancing space
_balancing_hyperparameter = []
for _balancing in [*balancing]:
for (
item
) in balancings_hyperparameters: # search the balancings' hyperparameters
# find balancing key
for _key in item.keys():
if "balancing_" in _key:
_balancing_key = _key
break
if item[_balancing_key] == _balancing:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_balancing_key] = tune.choice([_item[_balancing_key]])
_balancing_hyperparameter.append(_item)
break
# raise error if no balancing hyperparameters are found
if len(_balancing_hyperparameter) == 0:
raise ValueError(
"No balancing hyperparameters are found. Please check your balancings."
)
_balancing_hyperparameter = tune.choice(_balancing_hyperparameter)
# scaling space
_scaling_hyperparameter = []
for _scaling in [*scaling]:
for item in scalings_hyperparameters: # search the scalings' hyperparameters
# find scaling key
for _key in item.keys():
if "scaling_" in _key:
_scaling_key = _key
break
if item[_scaling_key] == _scaling:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_scaling_key] = tune.choice([_item[_scaling_key]])
_scaling_hyperparameter.append(_item)
break
# raise error if no scaling hyperparameters are found
if len(_scaling_hyperparameter) == 0:
raise ValueError(
"No scaling hyperparameters are found. Please check your scalings."
)
_scaling_hyperparameter = tune.choice(_scaling_hyperparameter)
# feature selection space
_feature_selection_hyperparameter = []
for _feature_selection in [*feature_selection]:
for (
item
) in (
feature_selection_hyperparameters
): # search the feature selections' hyperparameters
# find feature_selection key
for _key in item.keys():
if "feature_selection_" in _key:
_feature_selection_key = _key
break
if item[_feature_selection_key] == _feature_selection:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_feature_selection_key] = tune.choice(
[_item[_feature_selection_key]]
)
_feature_selection_hyperparameter.append(_item)
break
# raise error if no feature selection hyperparameters are found
if len(_feature_selection_hyperparameter) == 0:
raise ValueError(
"No feature selection hyperparameters are found. Please check your feature selections."
)
_feature_selection_hyperparameter = tune.choice(_feature_selection_hyperparameter)
# model selection and hyperparameter optimization space
_model_hyperparameter = []
for _model in [*models]:
# checked before at models that all models are in default space
for item in models_hyperparameters: # search the models' hyperparameters
# find model key
for _key in item.keys():
if "model_" in _key:
_model_key = _key
break
if item[_model_key] == _model:
# create a copy of hyperparameters, avoid changes on original
_item = copy.deepcopy(item)
# convert string to tune.choice
_item[_model_key] = tune.choice([_item[_model_key]])
_model_hyperparameter.append(_item)
break
# raise error if no model hyperparameters are found
if len(_model_hyperparameter) == 0:
raise ValueError(
"No model hyperparameters are found. Please check your models."
)
_model_hyperparameter = tune.choice(_model_hyperparameter)
# the pipeline search space
# select one of the method/hyperparameter setting from each part
return {
"task_type": "tabular_" + task_mode,
"encoder": _encoding_hyperparameter,
"imputer": _imputer_hyperparameter,
"balancing": _balancing_hyperparameter,
"scaling": _scaling_hyperparameter,
"feature_selection": _feature_selection_hyperparameter,
"model": _model_hyperparameter,
}
# get the hyperparameter optimization algorithm based on string input
def get_algo(search_algo):
if search_algo == "RandomSearch" or search_algo == "GridSearch":
# Random Search and Grid Search
from ray.tune.suggest.basic_variant import BasicVariantGenerator
algo = BasicVariantGenerator
# elif search_algo == "BayesOptSearch":
# # check whether bayes_opt is installed
# bayes_opt_spec = importlib.util.find_spec("bayes_opt")
# if bayes_opt_spec is None:
# raise ImportError(
# "BayesOpt is not installed. Please install it first to use BayesOptSearch. \
# Command to install: pip install bayesian-optimization"
# )
# # Bayesian Search
# from ray.tune.suggest.bayesopt import BayesOptSearch
# algo = BayesOptSearch
elif search_algo == "AxSearch":
# check whether Ax and sqlalchemy are installed
Ax_spec = importlib.util.find_spec("ax")
sqlalchemy_spec = importlib.util.find_spec("sqlalchemy")
if Ax_spec is None or sqlalchemy_spec is None:
raise ImportError(
"Ax or sqlalchemy not installed. Please install these packages to use AxSearch. \
Command to install: pip install ax-platform sqlalchemy"
)
# Ax Search
from ray.tune.suggest.ax import AxSearch
algo = AxSearch
# elif search_algo == "BOHB":
# # check whether HpBandSter and ConfigSpace are installed
# hpbandster_spec = importlib.util.find_spec("hpbandster")
# ConfigSpace_spec = importlib.util.find_spec("ConfigSpace")
# if hpbandster_spec is None or ConfigSpace_spec is None:
# raise ImportError(
# "HpBandSter or ConfigSpace not installed. Please install these packages to use BOHB. \
# Command to install: pip install hpbandster ConfigSpace"
# )
# # Bayesian Optimization HyperBand/BOHB
# from ray.tune.suggest.bohb import TuneBOHB
# algo = TuneBOHB
elif search_algo == "BlendSearch":
# check whether flaml is installed
flaml_spec = importlib.util.find_spec("flaml")
if flaml_spec is None:
raise ImportError(
"flaml not installed. Please install it first to use BlendSearch. \
Command to install: pip install 'flaml[blendsearch]'"
)
# Blend Search
from ray.tune.suggest.flaml import BlendSearch
algo = BlendSearch
elif search_algo == "CFO":
# check whether flaml is installed
flaml_spec = importlib.util.find_spec("flaml")
if flaml_spec is None:
raise ImportError(
"flaml not installed. Please install it first to use BlendSearch. \
Command to install: pip install 'flaml[blendsearch]'"
)
# Blend Search
from ray.tune.suggest.flaml import CFO
algo = CFO
# elif search_algo == "DragonflySearch":
# # check whether dragonfly-opt is installed
# dragonfly_spec = importlib.util.find_spec("dragonfly")
# if dragonfly_spec is None:
# raise ImportError(
# "dragonfly-opt not installed. Please install it first to use DragonflySearch. \
# Command to install: pip install dragonfly-opt"
# )
# # Dragonfly Search
# from ray.tune.suggest.dragonfly import DragonflySearch
# algo = DragonflySearch
elif search_algo == "HEBO":
# check whether HEBO is installed
HEBO_spec = importlib.util.find_spec("HEBO")
if HEBO_spec is None:
raise ImportError(
"HEBO not installed. Please install it first to use HEBO. \
Command to install: pip install 'HEBO>=0.2.0'"
)
# Heteroscedastic Evolutionary Bayesian Optimization/HEBO
from ray.tune.suggest.hebo import HEBOSearch
algo = HEBOSearch
elif search_algo == "HyperOpt":
# check whether hyperopt is installed
hyperopt_spec = importlib.util.find_spec("hyperopt")
if hyperopt_spec is None:
raise ImportError(
"hyperopt not installed. Please install it first to use HyperOpt. \
Command to install: pip install -U hyperopt"
)
# HyperOpt Search
from ray.tune.suggest.hyperopt import HyperOptSearch
algo = HyperOptSearch
elif search_algo == "Nevergrad":
# check whether nevergrad is installed
nevergrad_spec = importlib.util.find_spec("nevergrad")
if nevergrad_spec is None:
raise ImportError(
"nevergrad not installed. Please install it first to use Nevergrad. \
Command to install: pip install nevergrad"
)
# Nevergrad Search
from ray.tune.suggest.nevergrad import NevergradSearch
algo = NevergradSearch
# default hyeprparameter space can not be easily converted
# elif search_algo == "Optuna":
# # check whether optuna is installed
# optuna_spec = importlib.util.find_spec("optuna")
# if optuna_spec is None:
# raise ImportError(
# "optuna not installed. Please install it first to use Optuna. \
# Command to install: pip install optuna"
# )
# # Optuna Search
# from ray.tune.suggest.optuna import OptunaSearch
# algo = OptunaSearch
# elif search_algo == "SigOpt":
# # check whether sigopt is installed
# sigopt_spec = importlib.util.find_spec("sigopt")
# if sigopt_spec is None:
# raise ImportError(
# "sigopt not installed. Please install it first to use SigOpt. \
# Command to install: pip install sigopt \
# Set SigOpt API: export SIGOPT_KEY= ..."
# )
# # SigOpt Search
# from ray.tune.suggest.sigopt import SigOptSearch
# algo = SigOptSearch
# elif search_algo == "Scikit-Optimize":
# # check whether scikit-optimize is installed
# skopt_spec = importlib.util.find_spec("skopt")
# if skopt_spec is None:
# raise ImportError(
# "scikit-optimize not installed. Please install it first to use Scikit-Optimize. \
# Command to install: pip install scikit-optimize"
# )
# # Scikit-Optimize Search
# from ray.tune.suggest.skopt import SkOptSearch
# algo = SkOptSearch
# elif search_algo == "ZOOpt":
# # check whether zoopt is installed
# zoopt_spec = importlib.util.find_spec("zoopt")
# if zoopt_spec is None:
# raise ImportError(
# "zoopt not installed. Please install it first to use ZOOpt. \
# Command to install: pip install zoopt"
# )
# # ZOOpt Search
# from ray.tune.suggest.zoopt import ZOOptSearch
# algo = ZOOptSearch
elif search_algo == "Repeater":
# Repeated Evaluations
from ray.tune.suggest import Repeater
algo = Repeater
elif search_algo == "ConcurrencyLimiter":
# ConcurrencyLimiter
from ray.tune.suggest import ConcurrencyLimiter
algo = ConcurrencyLimiter
else:
# if none above, assume is a callable custom algorithm
if isinstance(search_algo, Callable):
algo = search_algo
# if not callable, raise error
else:
raise TypeError(
"Algorithm {} is not supported. Please use one of the supported algorithms.".format(
search_algo
)
)
return algo
# get search scheduler based on string input
def get_scheduler(search_scheduler):
if search_scheduler == "FIFOScheduler":
from ray.tune.schedulers import FIFOScheduler
scheduler = FIFOScheduler
elif search_scheduler == "ASHAScheduler":
from ray.tune.schedulers import ASHAScheduler
scheduler = ASHAScheduler
elif search_scheduler == "HyperBandScheduler":
from ray.tune.schedulers import HyperBandScheduler
scheduler = HyperBandScheduler
elif search_scheduler == "MedianStoppingRule":
from ray.tune.schedulers import MedianStoppingRule
scheduler = MedianStoppingRule
elif search_scheduler == "PopulationBasedTraining":
from ray.tune.schedulers import PopulationBasedTraining
scheduler = PopulationBasedTraining
elif search_scheduler == "PopulationBasedTrainingReplay":
from ray.tune.schedulers import PopulationBasedTrainingReplay
scheduler = PopulationBasedTrainingReplay
elif search_scheduler == "PB2":
# check whether GPy2 is installed
Gpy_spec = importlib.util.find_spec("GPy")
if Gpy_spec is None:
raise ImportError(
"GPy2 not installed. Please install it first to use PB2. \
Command to install: pip install GPy"
)
from ray.tune.schedulers.pb2 import PB2
scheduler = PB2
elif search_scheduler == "HyperBandForBOHB":
from ray.tune.schedulers import HyperBandForBOHB
scheduler = HyperBandForBOHB
else:
# if callable, use it as scheduler
if isinstance(search_scheduler, Callable):
scheduler = search_scheduler
else:
raise TypeError(
"Scheduler {} is not supported. Please use one of the supported schedulers.".format(
search_scheduler
)
)
return scheduler
# get progress reporter based on string input
def get_progress_reporter(
progress_reporter,
max_evals,
max_error,
):
if progress_reporter == "CLIReporter":
from ray.tune.progress_reporter import CLIReporter
progress_reporter = CLIReporter(
# metric_columns=[
# "fitted_model",
# "training_status",
# "total time (s)",
# "iter",
# "loss",
# ],
parameter_columns=["task_type"],
max_progress_rows=max_evals,
max_error_rows=max_error,
sort_by_metric=True,
)
elif progress_reporter == "JupyterNotebookReporter":
from ray.tune.progress_reporter import JupyterNotebookReporter
progress_reporter = JupyterNotebookReporter(
overwrite=True,
# metric_columns=[
# "fitted_model",
# "training_status",
# "total time (s)",
# "iter",
# "loss",
# ],
parameter_columns=["task_type"],
max_progress_rows=max_evals,
max_error_rows=max_error,
sort_by_metric=True,
)
# add metrics for visualization
progress_reporter.add_metric_column("fitted_model")
progress_reporter.add_metric_column("training_status")
progress_reporter.add_metric_column("loss")
return progress_reporter
def get_logger(logger):
if not isinstance(logger, list) and logger is not None:
raise TypeError("Expect a list of string or None, get {}.".format(logger))
loggers = []
if logger is None:
from ray.tune.logger import LoggerCallback
return [LoggerCallback()]
for log in logger:
if "Logger" == log:
from ray.tune.logger import LoggerCallback
loggers.append(LoggerCallback())
elif "TBX" == log:
# check whether TensorBoardX is installed
TensorBoardX_spec = importlib.util.find_spec("tensorboardX")
if TensorBoardX_spec is None:
raise ImportError(
"TensorBoardX not installed. Please install it first to use TensorBoardX. \
Command to install: pip install tensorboardX"
)
from ray.tune.logger import TBXLoggerCallback
loggers.append(TBXLoggerCallback())
elif "JSON" == log:
from ray.tune.logger import JsonLoggerCallback
loggers.append(JsonLoggerCallback())
elif "CSV" == log:
from ray.tune.logger import CSVLoggerCallback
loggers.append(CSVLoggerCallback())
elif "MLflow" == log:
# checkwhether mlflow is installed
mlflow_spec = importlib.util.find_spec("mlflow")
if mlflow_spec is None:
raise ImportError(
"mlflow not installed. Please install it first to use mlflow. \
Command to install: pip install mlflow"
)
from ray.tune.integration.mlflow import MLflowLoggerCallback
loggers.append(MLflowLoggerCallback())
elif "Wandb" == log:
# check whether wandb is installed
wandb_spec = importlib.util.find_spec("wandb")
if wandb_spec is None:
raise ImportError(
"wandb not installed. Please install it first to use wandb. \
Command to install: pip install wandb"
)
from ray.tune.integration.wandb import WandbLoggerCallback
loggers.append(WandbLoggerCallback())
return loggers
class TimePlateauStopper(Stopper):
"""
Combination of TimeoutStopper and TrialPlateauStopper
"""
def __init__(
self,
timeout=360,
metric="loss",
std=0.01,
num_results=4,
grace_period=4,
metric_threshold=None,
mode="min",
):
self._start = time.time()
self._deadline = timeout
self._metric = metric
self._mode = mode
self._std = std
self._num_results = num_results
self._grace_period = grace_period
self._metric_threshold = metric_threshold
self._iter = defaultdict(lambda: 0)
self._trial_results = defaultdict(lambda: deque(maxlen=self._num_results))
def __call__(self, trial_id, result):
metric_result = result.get(self._metric) # get metric from result
self._trial_results[trial_id].append(metric_result)
self._iter[trial_id] += 1 # record trial results and iteration
# If still in grace period, do not stop yet
if self._iter[trial_id] < self._grace_period:
return False
# If not enough results yet, do not stop yet
if len(self._trial_results[trial_id]) < self._num_results:
return False
# if threshold specified, use threshold to stop
# If metric threshold value not reached, do not stop yet
if self._metric_threshold is not None:
if self._mode == "min" and metric_result > self._metric_threshold:
return False
elif self._mode == "max" and metric_result < self._metric_threshold:
return False
# if threshold not specified, use std to stop
# Calculate stdev of last `num_results` results
try:
current_std = np.std(self._trial_results[trial_id])
except Exception:
current_std = float("inf")
# If stdev is lower than threshold, stop early.
return current_std < self._std
def stop_all(self):
return time.time() - self._start > self._deadline
# get estimator based on string or class
def get_estimator(estimator_str):
if estimator_str == "Lasso":
from sklearn.linear_model import Lasso
return Lasso()
elif estimator_str == "Ridge":
from sklearn.linear_model import Ridge
return Ridge()
elif estimator_str == "ExtraTreeRegressor":
from sklearn.tree import ExtraTreeRegressor
return ExtraTreeRegressor()
elif estimator_str == "RandomForestRegressor":
from sklearn.ensemble import RandomForestRegressor
return RandomForestRegressor()
elif estimator_str == "LogisticRegression":
from sklearn.linear_model import LogisticRegression
return LogisticRegression()
elif estimator_str == "ExtraTreeClassifier":
from sklearn.tree import ExtraTreeClassifier
return ExtraTreeClassifier()
elif estimator_str == "RandomForestClassifier":
from sklearn.ensemble import RandomForestClassifier
return RandomForestClassifier()
elif isclass(type(estimator_str)):
# if estimator is recognized as a class
# make sure it has fit/predict methods
if not has_method(estimator_str, "fit") or not has_method(
estimator_str, "predict"
):
raise ValueError("Estimator must have fit/predict methods!")
return estimator_str()
else:
raise AttributeError("Unrecognized estimator!")
# get metrics based on string or class
# if not in min mode, call negative of the metric
def get_metrics(metric_str):
if metric_str == "neg_accuracy":
from My_AutoML._utils._stat import neg_accuracy
return neg_accuracy
elif metric_str == "accuracy":
from sklearn.metrics import accuracy_score
warnings.warn(
"accuracy_score is not for min mode, please use neg_accuracy instead."
)
return accuracy_score
elif metric_str == "neg_precision":
from My_AutoML._utils._stat import neg_precision
return neg_precision
elif metric_str == "precision":
from sklearn.metrics import precision_score
warnings.warn(
"precision_score is not for min mode, please use neg_precision instead."
)
return precision_score
elif metric_str == "neg_auc":
from My_AutoML._utils._stat import neg_auc
return neg_auc
elif metric_str == "auc":
from sklearn.metrics import roc_auc_score
warnings.warn("roc_auc_score is not for min mode, please use neg_auc instead.")
return roc_auc_score
elif metric_str == "neg_hinge":
from My_AutoML._utils._stat import neg_hinge
return neg_hinge
elif metric_str == "hinge":
from sklearn.metrics import hinge_loss
warnings.warn("hinge_loss is not for min mode, please use neg_hinge instead.")
return hinge_loss
elif metric_str == "neg_f1":
from My_AutoML._utils._stat import neg_f1
return neg_f1
elif metric_str == "f1":
from sklearn.metrics import f1_score
warnings.warn("f1_score is not for min mode, please use neg_f1 instead.")
return f1_score
elif metric_str == "MSE":
from sklearn.metrics import mean_squared_error
return mean_squared_error
elif metric_str == "MAE":
from sklearn.metrics import mean_absolute_error
return mean_absolute_error
elif metric_str == "MSLE":
from sklearn.metrics import mean_squared_log_error
return mean_squared_log_error
elif metric_str == "neg_R2":
from My_AutoML._utils._stat import neg_R2
return neg_R2
elif metric_str == "R2":
from sklearn.metrics import r2_score
warnings.warn("r2_score is not for min mode, please use neg_R2 instead.")
return r2_score
elif metric_str == "MAX":
from sklearn.metrics import max_error
return max_error
elif isinstance(metric_str, Callable):
# if callable, pass
return metric_str
else:
raise ValueError("Unrecognized criteria!")
|
PanyiDong/AutoML | My_AutoML/_imputation/_base.py | <gh_stars>1-10
"""
File: _base.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_imputation/_base.py
File Created: Tuesday, 5th April 2022 11:49:51 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 8:33:31 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from My_AutoML._utils import nan_cov
class SimpleImputer:
"""
Simple Imputer to fill nan values
Parameters
----------
method: the method to fill nan values, default = 'mean'
supproted methods ['mean', 'zero', 'median', 'most frequent', constant]
'mean' : fill columns with nan values using mean of non nan values
'zero': fill columns with nan values using 0
'median': fill columns with nan values using median of non nan values
'most frequent': fill columns with nan values using most frequent of non nan values
constant: fill columns with nan values using predefined values
"""
def __init__(self, method="mean"):
self.method = method
self._fitted = False # whether the imputer has been fitted
def fill(self, X):
_X = X.copy(deep=True)
if _X.isnull().values.any():
features = list(X.columns)
for _column in features:
if X[_column].isnull().values.any():
_X[_column] = self._fill(_X[_column])
self._fitted = True
return _X
def _fill(self, X):
if self.method == "mean":
X = X.fillna(np.nanmean(X))
elif self.method == "zero":
X = X.fillna(0)
elif self.method == "median":
X = X.fillna(np.nanmedian(X))
elif self.method == "most frequent":
X = X.fillna(X.value_counts().index[0])
else:
X = X.fillna(self.method)
return X
class DummyImputer:
"""
Create dummy variable for nan values and fill the original feature with 0
The idea is that there are possibilities that the nan values are critically related to response, create dummy
variable to identify the relationship
Parameters
----------
force: whether to force dummy coding for nan values, default = False
if force == True, all nan values will create dummy variables, otherwise, only nan values that creates impact
on response will create dummy variables
threshold: threshold whether to create dummy variable, default = 0.1
if mean of nan response and mean of non nan response is different above threshold, threshold will be created
method: the method to fill nan values for columns not reaching threshold, default = 'mean'
supproted methods ['mean', 'zero', 'median', 'most frequent', constant]
'mean' : fill columns with nan values using mean of non nan values
'zero': fill columns with nan values using 0
'median': fill columns with nan values using median of non nan values
'most frequent': fill columns with nan values using most frequent of non nan values
constant: fill columns with nan values using predefined values
"""
def __init__(self, force=False, threshold=0.1, method="zero"):
self.force = force
self.threshold = threshold
self.method = method
self._fitted = False # whether the imputer has been fitted
def fill(self, X, y):
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X, y)
self._fitted = True
return _X
def _fill(self, X, y):
features = list(X.columns)
for _column in features:
if X[_column].isnull().values.any():
_mean_nan = y[X[_column].isnull()].mean()
_mean_non_nan = y[~X[_column].isnull()].mean()
if abs(_mean_nan / _mean_non_nan - 1) >= self.threshold:
X[_column + "_nan"] = X[_column].isnull().astype(int)
X[_column] = X[_column].fillna(0)
else:
if self.method == "mean":
X[_column] = X[_column].fillna(np.nanmean(X[_column]))
elif self.method == "zero":
X[_column] = X[_column].fillna(0)
elif self.method == "median":
X[_column] = X[_column].fillna(np.nanmedian(X[_column]))
elif self.method == "most frequent":
X[_column] = X[_column].fillna(
X[_column].value_counts().index[0]
)
else:
X[_column] = X[_column].fillna(self.method)
return X
class JointImputer:
"""
Impute the missing values assume a joint distribution, default as multivariate Gaussian distribution
"""
def __init__(self, kernel="normal"):
self.kernel = kernel
self._fitted = False # whether the imputer has been fitted
def fill(self, X):
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X)
self._fitted = True
return _X
def _fill(self, X):
rows = list(X.index)
for _row in rows:
if X.loc[_row, :].isnull().values.any():
X.loc[_row, :] = self._fill_row(_row, X)
return X
def _fill_row(self, row_index, X):
"""
for x = (x_{mis}, x_{obs})^{T} with \mu = (\mu_{mis}, \mu_{obs}).T and \Sigma = ((Sigma_{mis, mis},
Sigma_{mis, obs}), (Sigma_{obs, Sigma}, Sigma_{obs, obs})),
Conditional distribution x_{mis}|x_{obs} = a is N(\bar(\mu), \bar(\Sigma))
where \bar(\mu) = \mu_{mis} + \Sigma_{mis, obs}\Sigma_{obs, obs}^{-1}(a - \mu_{obs})
and \bar(\Sigma) = \Sigma_{mis, mis} - \Sigma_{mis, obs}\Sigma_{obs, obs}^{-1}\Sigma_{obs, mis}
in coding, 1 = mis, 2 = obs for simpilicity
"""
_mis_column = np.argwhere(X.loc[row_index, :].isnull().values).T[0]
_obs_column = [i for i in range(len(list(X.columns)))]
for item in _mis_column:
_obs_column.remove(item)
_mu_1 = np.nanmean(X.iloc[:, _mis_column], axis=0).T.reshape(
len(_mis_column), 1
)
_mu_2 = np.nanmean(X.iloc[:, _obs_column], axis=0).T.reshape(
len(_obs_column), 1
)
_sigma_11 = nan_cov(X.iloc[:, _mis_column].values)
_sigma_22 = nan_cov(X.iloc[:, _obs_column].values)
_sigma_12 = nan_cov(
X.iloc[:, _mis_column].values, y=X.iloc[:, _obs_column].values
)
_sigma_21 = nan_cov(
X.iloc[:, _obs_column].values, y=X.iloc[:, _mis_column].values
)
_a = X.loc[row_index, ~X.loc[row_index, :].isnull()].values.T.reshape(
len(_obs_column), 1
)
_mu = _mu_1 + _sigma_12 @ np.linalg.inv(_sigma_22) @ (_a - _mu_2)
_mu = _mu[0] # multivariate_normal only accept 1 dimension mean
_sigma = _sigma_11 - _sigma_12 @ np.linalg.inv(_sigma_22) @ _sigma_21
X.loc[row_index, X.loc[row_index, :].isnull()] = np.random.multivariate_normal(
mean=_mu, cov=_sigma, size=(X.loc[row_index, :].isnull().values.sum(), 1)
)
return X.loc[row_index, :]
|
PanyiDong/AutoML | My_AutoML/_imputation/_multiple.py | <reponame>PanyiDong/AutoML
"""
File: _multiple.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_imputation/_multiple.py
File Created: Tuesday, 5th April 2022 11:50:03 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 8:42:22 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import warnings
from My_AutoML._utils import random_index, random_list
from ._base import SimpleImputer
class ExpectationMaximization:
"""
Use Expectation Maximization (EM) to impute missing data[1]
[1] Impyute.imputation.cs.em
Parameters
----------
iterations: maximum number of iterations for single imputation, default = 50
threshold: threshold to early stop iterations, default = 0.01
only early stop when iterations < self.iterations and change in the imputation < self.threshold
seed: random seed, default = 1
"""
def __init__(self, iterations=50, threshold=0.01, seed=1):
self.iterations = iterations
self.threshold = threshold
self.seed = seed
self._fitted = False # whether the imputer has been fitted
def fill(self, X):
self.iterations = int(self.iterations)
self.threshold = float(self.threshold)
_X = X.copy(deep=True)
n = _X.shape[0]
if _X.isnull().values.any():
_X = self._fill(_X)
self._fitted = True
return _X
def _fill(self, X):
features = list(X.columns)
np.random.seed(self.seed)
_missing_feature = [] # features contains missing values
_missing_vector = [] # vector with missing values, to mark the missing index
# create _missing_table with _missing_feature
# missing index will be 1, existed index will be 0
for _column in features:
if X[_column].isnull().values.any():
_missing_feature.append(_column)
_missing_vector.append(
X[_column].loc[X[_column].isnull()].index.astype(int)
)
_missing_vector = np.array(_missing_vector).T
self._missing_table = pd.DataFrame(_missing_vector, columns=_missing_feature)
for _column in list(self._missing_table.columns):
for _index in self._missing_table[_column]:
X.loc[_index, _column] = self._EM_iter(X, _index, _column)
return X
def _EM_iter(self, X, index, column):
_mark = 1
for _ in range(self.iterations):
_mu = np.nanmean(X.loc[:, column])
_std = np.nanstd(X.loc[:, column])
_tmp = np.random.normal(loc=_mu, scale=_std)
_delta = np.abs(_tmp - _mark) / _mark
if _delta < self.threshold and self.iterations > 10:
return _tmp
X.loc[index, column] = _tmp
_mark = _tmp
return _tmp
class KNNImputer:
"""
Use KNN to impute the missing values, further update: use cross validation to select best k [1]
[1] <NAME>. and <NAME>., 2012. MissForest—non-parametric missing value imputation
for mixed-type data. Bioinformatics, 28(1), pp.112-118.
Parameters
----------
n_neighbors: list of k, default = None
default will set to 1:10
method: method to initaillay impute missing values, default = "mean"
fold: cross validation number of folds, default = 10
uni_class: unique class to be considered as categorical columns, default = 31
seed: random seed, default = 1
"""
def __init__(
self,
n_neighbors=None,
method="mean",
fold=10,
uni_class=31,
seed=1,
):
self.n_neighbors = n_neighbors
self.method = method
self.fold = fold
self.uni_class = uni_class
self.seed = seed
self._fitted = False # whether the imputer has been fitted
def fill(self, X):
features = list(X.columns)
for _column in features:
if len(X[_column].unique()) <= min(0.1 * len(X), self.uni_class):
raise ValueError("KNN Imputation not supported for categorical data!")
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No nan values found, no change.")
self._fitted = True
return _X
def _fill(self, X):
features = list(X.columns)
self._missing_feature = [] # features contains missing values
self._missing_vector = (
[]
) # vector with missing values, to mark the missing index
# create _missing_table with _missing_feature
# missing index will be 1, existed index will be 0
for _column in features:
if X[_column].isnull().values.any():
self._missing_feature.append(_column)
self._missing_vector.append(
X[_column].loc[X[_column].isnull()].index.astype(int)
)
self._missing_vector = np.array(self._missing_vector).T
self._missing_table = pd.DataFrame(
self._missing_vector, columns=self._missing_feature
)
X = SimpleImputer(method=self.method).fill(
X
) # initial filling for missing values
random_features = random_list(
self._missing_feature, self.seed
) # the order to regress on missing features
# _index = random_index(len(X.index)) # random index for cross validation
_err = []
# if assigned n_neighbors, use it, otherwise use k-fold cross validation
if self.n_neighbors is None:
for i in range(self.fold):
_test = X.iloc[
i * int(len(X.index) / self.fold) : int(len(X.index) / self.fold), :
]
_train = X
_train.drop(labels=_test.index, axis=0, inplace=True)
_err.append(self._cross_validation_knn(_train, _test, random_features))
_err = np.mean(np.array(_err), axis=0) # mean of cross validation error
self.optimial_k = np.array(_err).argmin()[0] + 1 # optimal k
X = self._knn_impute(X, random_features, self.optimial_k)
else:
X = self._knn_impute(X, random_features, self.n_neighbors)
return X
def _cross_validation_knn(
self, _train, _test, random_features
): # cross validation to return error
from sklearn.neighbors import KNeighborsRegressor
if self.n_neighbors == None:
n_neighbors = [i + 1 for i in range(10)]
else:
n_neighbors = (
self.n_neighbors
if isinstance(self.n_neighbors, list)
else [self.n_neighbors]
)
_test_mark = _test.copy(deep=True)
_err = []
for _k in n_neighbors:
_test = _test_mark.copy(deep=True)
for _feature in random_features:
_subfeatures = list(_train.columns)
_subfeatures.remove(_feature)
fit_model = KNeighborsRegressor(n_neighbors=_k)
fit_model.fit(_train.loc[:, _subfeatures], _train.loc[:, _feature])
_test.loc[:, _feature] = fit_model.predict(_test.loc[:, _subfeatures])
_err.append(((_test - _test_mark) ** 2).sum())
return _err
def _knn_impute(self, X, random_features, k):
from sklearn.neighbors import KNeighborsRegressor
features = list(X.columns)
for _column in random_features:
_subfeature = features.copy()
_subfeature.remove(_column)
X.loc[self._missing_table[_column], _column] = np.nan
fit_model = KNeighborsRegressor(n_neighbors=k)
fit_model.fit(
X.loc[~X[_column].isnull(), _subfeature],
X.loc[~X[_column].isnull(), _column],
)
X.loc[X[_column].isnull(), _column] = fit_model.predict(
X.loc[X[_column].isnull(), _subfeature]
)
return X
class MissForestImputer:
"""
Run Random Forest to impute the missing values [1]
[1] <NAME>. and <NAME>., 2012. MissForest—non-parametric missing
value imputation for mixed-type data. Bioinformatics, 28(1), pp.112-118.
Parameters
----------
threshold: threshold to terminate iterations, default = 0
At default, if difference between iterations increases, the iteration stops
method: initial imputation method for missing values, default = 'mean'
uni_class: column with unique classes less than uni_class will be considered as categorical, default = 31
"""
def __init__(self, threshold=0, method="mean", uni_class=31):
self.threshold = threshold
self.method = method
self.uni_class = uni_class
self._fitted = False # whether the imputer has been fitted
def _RFImputer(self, X):
from sklearn.ensemble import RandomForestRegressor
_delta = [] # criteria of termination
while True:
for _column in list(self._missing_table.columns):
X_old = X.copy(deep=True)
_subfeature = list(X_old.columns)
_subfeature.remove(str(_column))
_missing_index = self._missing_table[_column].tolist()
RegModel = RandomForestRegressor()
RegModel.fit(
X.loc[~X.index.astype(int).isin(_missing_index), _subfeature],
X.loc[~X.index.astype(int).isin(_missing_index), _column],
)
_tmp_column = RegModel.predict(
X.loc[X.index.astype(int).isin(_missing_index), _subfeature]
)
X.loc[X.index.astype(int).isin(_missing_index), _column] = _tmp_column
_delta.append(self._delta_cal(X, X_old))
if len(_delta) >= 2 and _delta[-1] > _delta[-2]:
break
if len(_delta) >= 2 and _delta[-1] > _delta[-2]:
break
return X
# calcualte the difference between data newly imputed and before imputation
def _delta_cal(self, X_new, X_old):
if (X_new.shape[0] != X_old.shape[0]) or (X_new.shape[1] != X_old.shape[1]):
raise ValueError("New and old data must have same size, get different!")
_numerical_features = []
_categorical_features = []
for _column in list(self._missing_table.columns):
if len(X_old[_column].unique()) <= self.uni_class:
_categorical_features.append(_column)
else:
_numerical_features.append(_column)
_N_nume = 0
_N_deno = 0
_F_nume = 0
_F_deno = 0
if len(_numerical_features) > 0:
for _column in _numerical_features:
_N_nume += ((X_new[_column] - X_old[_column]) ** 2).sum()
_N_deno += (X_new[_column] ** 2).sum()
if len(_categorical_features) > 0:
for _column in _categorical_features:
_F_nume += (X_new[_column] != X_old[_column]).astype(int).sum()
_F_deno += len(self._missing_table[_column])
if len(_numerical_features) > 0 and len(_categorical_features) > 0:
return _N_nume / _N_deno + _F_nume / _F_deno
elif len(_numerical_features) > 0:
return _N_nume / _N_deno
elif len(_categorical_features) > 0:
return _F_nume / _F_deno
def fill(self, X):
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No nan values found, no change.")
self._fitted = True
return _X
def _fill(self, X):
features = list(X.columns)
for _column in features:
if (X[_column].dtype == object) or (str(X[_column].dtype) == "category"):
raise ValueError(
"MICE can only handle numerical filling, run encoding first!"
)
_missing_feature = [] # features contains missing values
_missing_vector = [] # vector with missing values, to mark the missing index
# create _missing_table with _missing_feature
# missing index will be 1, existed index will be 0
_missing_count = [] # counts for missing values
for _column in features:
if X[_column].isnull().values.any():
_missing_feature.append(_column)
_missing_vector.append(X.loc[X[_column].isnull()].index.astype(int))
_missing_count.append(X[_column].isnull().astype(int).sum())
# reorder the missing features by missing counts increasing
_order = np.array(_missing_count).argsort().tolist()
_missing_count = np.array(_missing_count)[_order].tolist()
_missing_feature = np.array(_missing_feature)[_order].tolist()
_missing_vector = np.array(_missing_vector)[_order].T.tolist()
self._missing_table = pd.DataFrame(_missing_vector, columns=_missing_feature)
X = SimpleImputer(method=self.method).fill(
X
) # initial filling for missing values
X = self._RFImputer(X)
return X
class MICE:
"""
Multiple Imputation by chained equations (MICE)
using single imputation to initialize the imputation step, and iteratively build regression/
classification model to impute features with missing values [1]
[1] <NAME>., <NAME>., <NAME>. and <NAME>., 2011. Multiple imputation by
chained equations: what is it and how does it work?. International journal of methods in
psychiatric research, 20(1), pp.40-49.
Parameters
----------
cycle: how many runs of regression/imputation to build the complete data, default = 10
method: the method to initially fill nan values, default = 'mean'
supproted methods ['mean', 'zero', 'median', 'most frequent', constant]
'mean' : fill columns with nan values using mean of non nan values
'zero': fill columns with nan values using 0
'median': fill columns with nan values using median of non nan values
'most frequent': fill columns with nan values using most frequent of non nan values
constant: fill columns with nan values using predefined values
seed: random seed, default = 1
every random draw from the minority class will increase the random seed by 1
"""
def __init__(self, cycle=10, method="mean", seed=1):
self.method = method
self.cycle = cycle
self.seed = seed
self._fitted = False # whether the imputer has been fitted
def fill(self, X):
self.cycle = int(self.cycle)
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No nan values found, no change.")
self._fitted = True
return _X
def _fill(self, X):
features = list(X.columns)
for _column in features:
if (X[_column].dtype == object) or (str(X[_column].dtype) == "category"):
raise ValueError(
"MICE can only handle numerical filling, run encoding first!"
)
self._missing_feature = [] # features contains missing values
self._missing_vector = (
[]
) # vector with missing values, to mark the missing index
# create _missing_table with _missing_feature
# missing index will be 1, existed index will be 0
for _column in features:
if X[_column].isnull().values.any():
self._missing_feature.append(_column)
self._missing_vector.append(
X.loc[X[_column].isnull()].index.astype(int)
)
self._missing_vector = np.array(self._missing_vector).T
self._missing_table = pd.DataFrame(
self._missing_vector, columns=self._missing_feature
)
X = SimpleImputer(method=self.method).fill(
X
) # initial filling for missing values
random_features = random_list(
self._missing_feature, self.seed
) # the order to regress on missing features
for _ in range(self.cycle):
X = self._cycle_impute(X, random_features)
return X
def _cycle_impute(self, X, random_features):
from sklearn.linear_model import LinearRegression, LogisticRegression, LassoCV
features = list(X.columns)
for _column in random_features:
_subfeature = features
_subfeature.remove(_column)
_missing_index = self._missing_table[_column].tolist()
X.loc[X.index.astype(int).isin(_missing_index), _column] = np.nan
if len(X[_column].unique()) == 2:
fit_model = LogisticRegression()
elif len(features) <= 15:
fit_model = LinearRegression()
else:
fit_model = LassoCV()
fit_model.fit(
X.loc[~X[_column].isnull(), _subfeature],
X.loc[~X[_column].isnull(), _column],
)
X.loc[X[_column].isnull(), _column] = fit_model.predict(
X.loc[X[_column].isnull(), _subfeature]
)
return X
|
PanyiDong/AutoML | My_AutoML/_model/_lightgbm.py | <reponame>PanyiDong/AutoML
"""
File: _lightgbm.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_lightgbm.py
File Created: Friday, 15th April 2022 12:19:01 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 7:15:24 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
from lightgbm import LGBMClassifier, LGBMRegressor
from My_AutoML._constant import (
LIGHTGBM_BINARY_CLASSIFICATION,
LIGHTGBM_MULTICLASS_CLASSIFICATION,
LIGHTGBM_REGRESSION,
LIGHTGBM_BOOSTING,
LIGHTGBM_TREE_LEARNER,
)
#####################################################################################################################
# LightGBM support
# https://lightgbm.readthedocs.io/en/latest/Parameters-Tuning.html
class LightGBM_Base:
"""
LightGBM Classification/Regression Wrapper
Parameters
----------
task_type: task type, one of classification or regression, default = "classification"
objective: objective function (metric/loss) to evaluate model, default = "regression"
boosting: boosting method, default = "gbdt"
support ["gbdt", "rf", "dart", "goss"]
n_estimators: number of estimators to build, default = 100
max_depth: max depth of trees built, default = -1
-1 stands for no limitation
num_leaves: number of leaf nodes to build, default = 31
min_data_in_leaf: minimum number of data to generate a leaf node, default = 20
learning_rate: learning rate of the building process, default = 0.1
tree_learner: tree learner algorithm, default = "serial"
support ["serial", "feature", "data", "voting"]
num_iterations: number of iterations for the algorithm, default = 100
seed: random seed, default = 1
"""
def __init__(
self,
task_type="classification",
objective="regression",
boosting="gbdt",
n_estimators=100,
max_depth=-1,
num_leaves=31,
min_data_in_leaf=20,
learning_rate=0.1,
tree_learner="serial",
num_iterations=100,
seed=1,
):
self.task_type = task_type
self.objective = objective
self.boosting = boosting
self.n_estimators = n_estimators
self.max_depth = max_depth
self.num_leaves = num_leaves
self.min_data_in_leaf = min_data_in_leaf
self.learning_rate = learning_rate
self.tree_learner = tree_learner
self.num_iterations = num_iterations
self.seed = seed
self._fitted = False
def fit(self, X, y):
# get binary classification and multiclass classification
if self.task_type == "classification":
if len(pd.unique(y)) == 2:
self.task_type = "binary"
else:
self.task_type = "multiclass"
# check categorical hyperparameters in range
# objective
if (
self.task_type == "binary"
and self.objective not in LIGHTGBM_BINARY_CLASSIFICATION
):
raise AttributeError(
"For {} tasks, only accept objects: {}, get {}.".format(
self.task_type,
", ".join(LIGHTGBM_BINARY_CLASSIFICATION),
self.objective,
)
)
elif (
self.task_type == "multiclass"
and self.objective not in LIGHTGBM_MULTICLASS_CLASSIFICATION
):
raise AttributeError(
"For {} tasks, only accept objects: {}, get {}.".format(
self.task_type,
", ".join(LIGHTGBM_MULTICLASS_CLASSIFICATION),
self.objective,
)
)
elif (
self.task_type == "regression" and self.objective not in LIGHTGBM_REGRESSION
):
raise AttributeError(
"For {} tasks, only accept objects: {}, get {}.".format(
self.task_type, ", ".join(LIGHTGBM_REGRESSION), self.objective
)
)
# boosting
if self.boosting not in LIGHTGBM_BOOSTING:
raise AttributeError(
"Expect one of {} boosting method, get {}.".format(
", ".join(LIGHTGBM_BOOSTING), self.boosting
)
)
# tree learner
if self.tree_learner not in LIGHTGBM_TREE_LEARNER:
raise AttributeError(
"Expect one of {} tree learner, get {}.".format(
", ".join(LIGHTGBM_TREE_LEARNER), self.tree_learner
)
)
# model
if self.task_type in ["binary", "multiclass"]:
self.model = LGBMClassifier(
objective=self.objective,
boosting_type=self.boosting,
n_estimators=self.n_estimators,
max_depth=self.max_depth,
num_leaves=self.num_leaves,
min_data_in_leaf=self.min_data_in_leaf,
learning_rate=self.learning_rate,
tree_learner=self.tree_learner,
num_iterations=self.num_iterations,
)
elif self.task_type == "regression":
self.model = LGBMRegressor(
objective=self.objective,
boosting_type=self.boosting,
n_estimators=self.n_estimators,
max_depth=self.max_depth,
num_leaves=self.num_leaves,
min_data_in_leaf=self.min_data_in_leaf,
learning_rate=self.learning_rate,
tree_learner=self.tree_learner,
num_iterations=self.num_iterations,
)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.model.predict(X)
def predict_proba(self, X):
return self.model.predict_proba(X)
class LightGBM_Classifier(LightGBM_Base):
"""
LightGBM Classification Wrapper
Parameters
----------
objective: objective function (metric/loss) to evaluate model, default = "multiclass"
boosting: boosting method, default = "gbdt"
support ["gbdt", "rf", "dart", "goss"]
n_estimators: number of estimators to build, default = 100
max_depth: max depth of trees built, default = -1
-1 stands for no limitation
num_leaves: number of leaf nodes to build, default = 31
min_data_in_leaf: minimum number of data to generate a leaf node, default = 20
learning_rate: learning rate of the building process, default = 0.1
tree_learner: tree learner algorithm, default = "serial"
support ["serial", "feature", "data", "voting"]
num_iterations: number of iterations for the algorithm, default = 100
seed: random seed, default = 1
"""
def __init__(
self,
objective="multiclass",
boosting="gbdt",
n_estimators=100,
max_depth=-1,
num_leaves=31,
min_data_in_leaf=20,
learning_rate=0.1,
tree_learner="serial",
num_iterations=100,
seed=1,
):
self.objective = objective
self.boosting = boosting
self.n_estimators = n_estimators
self.max_depth = max_depth
self.num_leaves = num_leaves
self.min_data_in_leaf = min_data_in_leaf
self.learning_rate = learning_rate
self.tree_learner = tree_learner
self.num_iterations = num_iterations
self.seed = seed
self._fitted = False
super().__init__(
task_type="classification",
objective=self.objective,
boosting=self.boosting,
n_estimators=self.n_estimators,
max_depth=self.max_depth,
num_leaves=self.num_leaves,
min_data_in_leaf=self.min_data_in_leaf,
learning_rate=self.learning_rate,
tree_learner=self.tree_learner,
num_iterations=self.num_iterations,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class LightGBM_Regressor(LightGBM_Base):
"""
LightGBM Regression Wrapper
Parameters
----------
objective: objective function (metric/loss) to evaluate model, default = "regression"
boosting: boosting method, default = "gbdt"
support ["gbdt", "rf", "dart", "goss"]
n_estimators: number of estimators to build, default = 100
max_depth: max depth of trees built, default = -1
-1 stands for no limitation
num_leaves: number of leaf nodes to build, default = 31
min_data_in_leaf: minimum number of data to generate a leaf node, default = 20
learning_rate: learning rate of the building process, default = 0.1
tree_learner: tree learner algorithm, default = "serial"
support ["serial", "feature", "data", "voting"]
num_iterations: number of iterations for the algorithm, default = 100
seed: random seed, default = 1
"""
def __init__(
self,
objective="regression",
boosting="gbdt",
n_estimators=100,
max_depth=-1,
num_leaves=31,
min_data_in_leaf=20,
learning_rate=0.1,
tree_learner="serial",
num_iterations=100,
seed=1,
):
self.objective = objective
self.boosting = boosting
self.n_estimators = n_estimators
self.max_depth = max_depth
self.num_leaves = num_leaves
self.min_data_in_leaf = min_data_in_leaf
self.learning_rate = learning_rate
self.tree_learner = tree_learner
self.num_iterations = num_iterations
self.seed = seed
self._fitted = False
super().__init__(
task_type="regression",
objective=self.objective,
boosting=self.boosting,
n_estimators=self.n_estimators,
max_depth=self.max_depth,
num_leaves=self.num_leaves,
min_data_in_leaf=self.min_data_in_leaf,
learning_rate=self.learning_rate,
tree_learner=self.tree_learner,
num_iterations=self.num_iterations,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
|
PanyiDong/AutoML | My_AutoML/_model/_RNN.py | """
File: _RNN.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_RNN.py
File Created: Tuesday, 5th April 2022 11:46:25 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 7:20:10 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Type
import warnings
import numpy as np
import pandas as pd
from My_AutoML._utils._data import assign_classes
from My_AutoML._utils._tensor import repackage_hidden
# check if pytorch exists
# if exists, import pytorch
import importlib
pytorch_spec = importlib.util.find_spec("torch")
if pytorch_spec is not None:
import torch
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
####################################################################################################
# RNN models
# RNN/LSTM/GRU models supported
class RNN_Net(nn.Module):
"""
Recurrent Neural Network (RNN) model structure
Parameters
----------
input_size: input size
hidden_size: dimension of the hidden layer
output_size: output size of the model, for classification tasks, this is the number of classes;
for regression tasks, this is 1
n_layers: number of hidden layers, default = 1
RNN_unit: RNN unit type, default = "RNN"
support type ["RNN", "LSTM", "GRU"]
activation: activation function, default = "Sigmoid"
support type ["ReLU", "Tanh", "Sigmoid"]
dropout: dropout rate in fully-connected layers, default = 0.2
device: device cpu/gpu for the training
"""
def __init__(
self,
input_size,
hidden_size,
output_size,
n_layers=1,
RNN_unit="RNN",
activation="Sigmoid",
dropout=0.2,
use_softmax=False,
device=torch.device("cuda"),
):
super().__init__()
# assign device
self.device = device
# hidden size and hidden layers
self.hidden_size = hidden_size
self.n_layers = n_layers
# embedding layer
# for tabular, no need for embedding layer
# self.embedding = nn.Embedding(input_size, input_size)
# select RNN unit from ["RNN", "LSTM", "GRU"]
self.RNN_unit = RNN_unit
if self.RNN_unit == "RNN":
self.rnn = nn.RNN(input_size, hidden_size, n_layers, batch_first=True)
elif self.RNN_unit == "LSTM":
self.rnn = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
elif self.RNN_unit == "GRU":
self.rnn = nn.GRU(input_size, hidden_size, n_layers, batch_first=True)
else:
raise TypeError("Not recognizing RNN unit type!")
# linear connection after RNN layers
self.hidden2tag = nn.Linear(hidden_size, output_size)
# activation function
if activation == "ReLU":
self.activation = nn.ReLU()
elif activation == "Tanh":
self.activation = nn.Tanh()
elif activation == "Sigmoid":
self.activation = nn.Sigmoid()
else:
raise TypeError("Not recognizing activation function!")
self.dropout = nn.Dropout(p=dropout) # dropout layer
self.use_softmax = use_softmax
if self.use_softmax:
self.softmax_layer = nn.LogSoftmax(dim=1) # softmax layer
def forward(self, input, hidden):
# embeds = self.embedding(input)
if self.RNN_unit in ["LSTM"]:
rnn_out, (rnn_hidden, rnn_cell) = self.rnn(input, hidden)
elif self.RNN_unit in ["RNN", "GRU"]:
rnn_out, rnn_hidden = self.rnn(input, hidden)
tag_space = self.hidden2tag(rnn_out)
tag_scores = self.dropout(self.activation(tag_space))
if self.use_softmax:
tag_scores = self.softmax_layer(
tag_scores
) # [:, -1, :] # keep full output here
if self.RNN_unit in ["LSTM"]:
return tag_scores, (rnn_hidden, rnn_cell)
elif self.RNN_unit in ["RNN", "GRU"]:
return tag_scores, rnn_hidden
def init_hidden(self, batch_size):
h0 = torch.zeros((self.n_layers, batch_size, self.hidden_size)).to(self.device)
# if LSTM, need (h0, c0)
if self.RNN_unit in ["LSTM"]:
c0 = torch.zeros((self.n_layers, batch_size, self.hidden_size)).to(
self.device
)
return (h0, c0)
# if not (RNN, GRU), need h0
if self.RNN_unit in ["RNN", "GRU"]:
return h0
class RNN_Base:
"""
Recurrent Neural Network (RNN) models for classification tasks, training/evaluation
Parameters
----------
input_size: input size, default = 1
hidden_size: dimension of the hidden layer, default = 256
output_size: output size, default = 1
will be assigned to number of classes
n_layers: number of hidden layers, default = 1
RNN_unit: RNN unit type, default = "RNN"
support type ["RNN", "LSTM", "GRU"]
activation: activation function, default = "Sigmoid"
support type ["ReLU", "Tanh", "Sigmoid"]
dropout: dropout rate in fully-connected layers, default = 0.2
learning_rate: learning rate for the optimizer, default = None
optimizer: optimizer for training, default = "Adam"
support type ["Adam", "SGD"]
criteria: loss function, default = "CrossEntropy"
support type ["CrossEntropy", "NegativeLogLikelihood"]
batch_size: batch size for training, default = 32
num_epochs: number of epochs for training, default = 20
is_cuda: whether to use GPU for training, default = True
seed: random seed, default = 1
"""
def __init__(
self,
input_size=1,
hidden_size=256,
output_size=1,
n_layers=1,
RNN_unit="RNN",
activation="Sigmoid",
dropout=0.2,
learning_rate=None,
optimizer="Adam",
criteria="CrossEntropy",
batch_size=32,
num_epochs=20,
use_softmax=False,
is_cuda=True,
seed=1,
):
# model parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.RNN_unit = RNN_unit
self.activation = activation
self.dropout = dropout
self.use_softmax = use_softmax
# training parameters
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
# set seed
torch.manual_seed(self.seed)
# use cuda if detect GPU and is_cuda is True
self.device = torch.device(
"cuda" if torch.cuda.is_available() and self.is_cuda else "cpu"
)
# if try cuda and no cuda available, raise warning
if self.is_cuda and str(self.device) == "cpu":
warnings.warn("No GPU detected, use CPU for training.")
# make sure RNN unit is supported
if self.RNN_unit not in ["RNN", "LSTM", "GRU"]:
raise ValueError("RNN unit must be RNN, LSTM or GRU!")
# self.output_size = len(pd.unique(y)) # unique classes as output size
# load model
self.model = RNN_Net(
input_size=self.input_size,
hidden_size=self.hidden_size,
output_size=self.output_size,
n_layers=self.n_layers,
RNN_unit=self.RNN_unit,
activation=self.activation,
dropout=self.dropout,
use_softmax=self.use_softmax,
device=self.device,
).to(self.device)
# specify optimizer
if self.optimizer == "Adam":
lr = 0.001 if self.learning_rate is None else self.learning_rate
optimizer = optim.Adam(self.model.parameters(), lr=lr)
elif self.optimizer == "SGD":
lr = 0.1 if self.learning_rate is None else self.learning_rate
optimizer = optim.SGD(self.model.parameters(), lr=lr)
# specify loss function
if self.criteria == "CrossEntropy":
criteria = nn.CrossEntropyLoss()
elif self.criteria == "MSE":
criteria = nn.MSELoss()
elif self.criteria == "MAE":
criteria = nn.L1Loss()
elif self.criteria == "NegativeLogLikelihood":
criteria = nn.NLLLoss()
else:
raise ValueError("Not recognized criteria: {}.".format(self.criteria))
# load data to DataLoader
train_tensor = TensorDataset(X, y)
train_loader = DataLoader(
train_tensor, batch_size=self.batch_size, shuffle=True, drop_last=True
)
# training process
for _ in range(self.num_epochs):
# initialize hidden state for each batch
h = self.model.init_hidden(self.batch_size)
for batch_idx, (data, target) in enumerate(train_loader):
# put data, target to device
data = data.to(self.device)
target = target.to(self.device)
# only get the values, no need for gradient
h = repackage_hidden(h)
self.model.zero_grad()
output, h = self.model(data, h) # forward step
# only use last output for classification (at last time T)
loss = criteria(output[:, -1, :], target) # calculate loss
loss.backward() # backpropagation
optimizer.step() # update parameters
return self
def predict(self, X):
# convert to tensor
X = torch.as_tensor(
X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float
)
X.unsqueeze_(-1) # expand to 3d tensor
# load data to TensorDataset
test_tensor = TensorDataset(X)
# load data to DataLoader
test_loader = DataLoader(test_tensor, batch_size=len(test_tensor))
# initialize hidden state
h = self.model.init_hidden(len(test_tensor))
# predict
for batch_idx, [data] in enumerate(test_loader):
with torch.no_grad():
results, h = self.model(data.to(self.device), h)
return results[:, -1, :].cpu().numpy() # return prediction to cpu
class RNN_Classifier(RNN_Base):
"""
RNN Classifier
Parameters
----------
input_size: input size, default = 1
hidden_size: dimension of the hidden layer, default = 256
output_size: output size, default = 1
will be assigned to number of classes
n_layers: number of hidden layers, default = 1
RNN_unit: RNN unit type, default = "RNN"
support type ["RNN", "LSTM", "GRU"]
activation: activation function, default = "Sigmoid"
support type ["ReLU", "Tanh", "Sigmoid"]
dropout: dropout rate in fully-connected layers, default = 0.2
learning_rate: learning rate for the optimizer, default = None
optimizer: optimizer for training, default = "Adam"
support type ["Adam", "SGD"]
criteria: loss function, default = "CrossEntropy"
support type ["CrossEntropy", "NegativeLogLikelihood"]
batch_size: batch size for training, default = 32
num_epochs: number of epochs for training, default = 20
is_cuda: whether to use GPU for training, default = True
seed: random seed, default = 1
"""
def __init__(
self,
input_size=1,
hidden_size=256,
output_size=1,
n_layers=1,
RNN_unit="RNN",
activation="Sigmoid",
dropout=0.2,
learning_rate=None,
optimizer="Adam",
criteria="CrossEntropy",
batch_size=32,
num_epochs=20,
is_cuda=True,
seed=1,
):
# model parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.RNN_unit = RNN_unit
self.activation = activation
self.dropout = dropout
# training parameters
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
# get unique classes
self.output_size = len(pd.unique(y))
# convert data to tensor
if not isinstance(X, torch.Tensor):
X = torch.as_tensor(
X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float
)
X.unsqueeze_(-1) # expand to 3d tensor
if not isinstance(y, torch.Tensor):
y = torch.as_tensor(
y.values if isinstance(y, pd.DataFrame) else y, dtype=torch.long
)
super().__init__(
input_size=self.input_size,
hidden_size=self.hidden_size,
output_size=self.output_size,
n_layers=self.n_layers,
RNN_unit=self.RNN_unit,
activation=self.activation,
dropout=self.dropout,
learning_rate=self.learning_rate,
optimizer=self.optimizer,
criteria=self.criteria,
batch_size=self.batch_size,
num_epochs=self.num_epochs,
use_softmax=True,
is_cuda=self.is_cuda,
seed=self.seed,
)
return super().fit(X, y)
def predict(self, X):
# need to assign prediction to classes
return assign_classes(super().predict(X))
def predict_proba(self, X):
# not need to use argmax to select the one class
# but to return full probability
return super().predict(X)
class RNN_Regressor(RNN_Base):
"""
RNN Classifier
Parameters
----------
input_size: input size, default = 1
hidden_size: dimension of the hidden layer, default = 256
output_size: output size, default = 1
will be assigned to 1
n_layers: number of hidden layers, default = 1
RNN_unit: RNN unit type, default = "RNN"
support type ["RNN", "LSTM", "GRU"]
activation: activation function, default = "Sigmoid"
support type ["ReLU"]
dropout: dropout rate in fully-connected layers, default = 0.2
learning_rate: learning rate for the optimizer, default = None
optimizer: optimizer for training, default = "Adam"
support type ["Adam", "SGD"]
criteria: loss function, default = "MSE"
support type ["MSE", "MAE"]
batch_size: batch size for training, default = 32
num_epochs: number of epochs for training, default = 20
is_cuda: whether to use GPU for training, default = True
seed: random seed, default = 1
"""
def __init__(
self,
input_size=1,
hidden_size=256,
output_size=1,
n_layers=1,
RNN_unit="RNN",
activation="Sigmoid",
dropout=0.2,
learning_rate=None,
optimizer="Adam",
criteria="MSE",
batch_size=32,
num_epochs=20,
is_cuda=True,
seed=1,
):
# model parameters
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.RNN_unit = RNN_unit
self.activation = activation
self.dropout = dropout
# training parameters
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
# get unique classes
self.output_size = 1
# convert data to tensor
if not isinstance(X, torch.Tensor):
X = torch.as_tensor(
X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float
)
X.unsqueeze_(-1) # expand to 3d tensor
if not isinstance(y, torch.Tensor):
y = torch.as_tensor(
y.values if isinstance(y, pd.DataFrame) else y, dtype=torch.float
)
super().__init__(
input_size=self.input_size,
hidden_size=self.hidden_size,
output_size=self.output_size,
n_layers=self.n_layers,
RNN_unit=self.RNN_unit,
activation=self.activation,
dropout=self.dropout,
learning_rate=self.learning_rate,
optimizer=self.optimizer,
criteria=self.criteria,
batch_size=self.batch_size,
num_epochs=self.num_epochs,
use_softmax=False,
is_cuda=self.is_cuda,
seed=self.seed,
)
return super().fit(X, y)
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
|
PanyiDong/AutoML | My_AutoML/_utils/__init__.py | """
File: __init__.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/__init__.py
File Created: Wednesday, 6th April 2022 12:00:12 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 7:42:02 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ._base import (
random_guess,
random_index,
random_list,
is_date,
feature_rounding,
minloc,
maxloc,
True_index,
type_of_task,
Timer,
)
from ._data import (
train_test_split,
as_dataframe,
formatting,
unify_nan,
remove_index_columns,
get_missing_matrix,
)
from ._file import save_model
from ._stat import (
nan_cov,
class_means,
empirical_covariance,
class_cov,
Pearson_Corr,
MI,
t_score,
ANOVA,
)
# from ._preprocessing import (
# text_preprocessing_torchtext,
# text_preprocessing_transformers,
# )
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_hyperopt/_regressor_hyperparameter.py | """
File: _regressor_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_hyperopt/_regressor_hyperparameter.py
File Created: Tuesday, 5th April 2022 11:06:33 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:23:10 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
# regressor hyperparameters
regressor_hyperparameter = [
# extract from autosklearn
{
"model": "AdaboostRegressor",
"n_estimators": scope.int(
hp.quniform("AdaboostRegressor_n_estimators", 50, 500, 1)
),
"learning_rate": hp.loguniform(
"AdaboostRegressor_learning_rate", np.log(0.01), np.log(2)
),
"loss": hp.choice(
"AdaboostRegressor_algorithm", ["linear", "square", "exponential"]
),
# for base_estimator of Decision Tree
"max_depth": scope.int(hp.quniform("AdaboostRegressor_max_depth", 1, 10, 1)),
},
{
"model": "ARDRegression",
"n_iter": hp.choice("ARDRegression_n_iter", [300]),
"tol": hp.loguniform("ARDRegression_tol", np.log(1e-5), np.log(1e-1)),
"alpha_1": hp.loguniform("ARDRegression_alpha_1", np.log(1e-10), np.log(1e-3)),
"alpha_2": hp.loguniform("ARDRegression_alpha_2", np.log(1e-10), np.log(1e-3)),
"lambda_1": hp.loguniform(
"ARDRegression_lambda_1", np.log(1e-10), np.log(1e-3)
),
"lambda_2": hp.loguniform(
"ARDRegression_lambda_2", np.log(1e-10), np.log(1e-3)
),
"threshold_lambda": hp.loguniform(
"ARDRegression_threshold_lambda", np.log(1e3), np.log(1e5)
),
"fit_intercept": hp.choice("ARDRegression_fit_intercept", [True]),
},
{
"model": "DecisionTree",
"criterion": hp.choice(
"DecisionTree_criterion", ["mse", "friedman_mse", "mae"]
),
"max_features": hp.choice("DecisionTree_max_features", [1.0]),
"max_depth_factor": hp.uniform("DecisionTree_max_depth_factor", 0.0, 2.0),
"min_samples_split": scope.int(
hp.quniform("DecisionTree_min_samples_split", 2, 20, 1)
),
"min_samples_leaf": scope.int(
hp.quniform("DecisionTree_min_samples_leaf", 1, 20, 1)
),
"min_weight_fraction_leaf": hp.choice(
"DecisionTree_min_weight_fraction_leaf", [0.0]
),
"max_leaf_nodes": hp.choice("DecisionTree_max_leaf_nodes", [None]),
"min_impurity_decrease": hp.choice("DecisionTree_min_impurity_decrease", [0.0]),
},
{
"model": "ExtraTreesRegressor",
"criterion": hp.choice(
"ExtraTreesRegressor_criterion", ["mse", "friedman_mse", "mae"]
),
"min_samples_leaf": scope.int(
hp.quniform("ExtraTreesRegressor_min_samples_leaf", 1, 20, 1)
),
"min_samples_split": scope.int(
hp.quniform("ExtraTreesRegressor_min_samples_split", 2, 20, 1)
),
"max_features": hp.uniform("ExtraTreesRegressor_max_features", 0.1, 1.0),
"bootstrap": hp.choice("ExtraTreesRegressor_bootstrap", [True, False]),
"max_leaf_nodes": hp.choice("ExtraTreesRegressor_max_leaf_nodes", [None]),
"max_depth": hp.choice("ExtraTreesRegressor_max_depth", [None]),
"min_weight_fraction_leaf": hp.choice(
"ExtraTreesRegressor_min_weight_fraction_leaf", [0.0]
),
"min_impurity_decrease": hp.choice(
"ExtraTreesRegressor_min_impurity_decrease", [0.0]
),
},
{
"model": "GaussianProcess",
"alpha": hp.loguniform("GaussianProcess_alpha", np.log(1e-14), np.log(1)),
"thetaL": hp.loguniform("GaussianProcess_thetaL", np.log(1e-10), np.log(1e-3)),
"thetaU": hp.loguniform("GaussianProcess_thetaU", np.log(1), np.log(1e5)),
},
{
"model": "GradientBoosting",
# n_iter_no_change only selected for early_stop in ['valid', 'train']
# validation_fraction only selected for early_stop = 'valid'
"loss": hp.choice("GradientBoosting_loss", ["least_squares"]),
"learning_rate": hp.loguniform(
"GradientBoosting_learning_rate", np.log(0.01), np.log(1)
),
"min_samples_leaf": scope.int(
hp.loguniform("GradientBoosting_min_samples_leaf", np.log(1), np.log(200))
),
"max_depth": hp.choice("GradientBoosting_max_depth", [None]),
"max_leaf_nodes": scope.int(
hp.loguniform("GradientBoosting_max_leaf_nodes", np.log(3), np.log(2047))
),
"max_bins": hp.choice("GradientBoosting_max_bins", [255]),
"l2_regularization": hp.loguniform(
"GradientBoosting_l2_regularization", np.log(1e-10), np.log(1)
),
"early_stop": hp.choice(
"GradientBoosting_early_stop", ["off", "train", "valid"]
),
"tol": hp.choice("GradientBoosting_tol", [1e-7]),
"scoring": hp.choice("GradientBoosting_scoring", ["loss"]),
"n_iter_no_change": scope.int(
hp.quniform("GradientBoosting_n_iter_no_change", 1, 20, 1)
),
"validation_fraction": hp.uniform(
"GradientBoosting_validation_fraction", 0.01, 0.4
),
},
{
"model": "KNearestNeighborsRegressor",
"n_neighbors": scope.int(
hp.quniform("KNearestNeighborsRegressor_n_neighbors", 1, 100, 1)
),
"weights": hp.choice(
"KNearestNeighborsRegressor_weights", ["uniform", "distance"]
),
"p": hp.choice("KNearestNeighborsRegressor_p", [1, 2]),
},
{
"model": "LibLinear_SVR",
# forbid loss = 'epsilon_insensitive' and dual = False
"epsilon": hp.loguniform("LibLinear_SVR_tol", np.log(0.001), np.log(1)),
"loss": hp.choice(
"LibLinear_SVR__loss",
["squared_epsilon_insensitive"],
),
"dual": hp.choice("LibLinear_SVR__dual", [False]),
"tol": hp.loguniform("LibLinear_SVR__tol", np.log(1e-5), np.log(1e-1)),
"C": hp.loguniform("LibLinear_SVR__C", np.log(0.03125), np.log(32768)),
"fit_intercept": hp.choice("LibLinear_SVR__fit_intercept", [True]),
"intercept_scaling": hp.choice("LibLinear_SVR__intercept_scaling", [1]),
},
{
"model": "LibSVM_SVR",
# degree only selected for kernel in ['poly', 'rbf', 'sigmoid']
# gamma only selected for kernel in ['poly', 'rbf']
# coef0 only selected for kernel in ['poly', 'sigmoid']
"kernel": hp.choice("LibSVM_SVR_kernel", ["linear", "poly", "rbf", "sigmoid"]),
"C": hp.loguniform("LibSVM_SVR__C", np.log(0.03125), np.log(32768)),
"epsilon": hp.uniform("LibSVM_SVR_epsilon", 1e-5, 1),
"tol": hp.loguniform("LibSVM_SVR__tol", np.log(1e-5), np.log(1e-1)),
"shrinking": hp.choice("LibSVM_SVR_shrinking", [True, False]),
"degree": scope.int(hp.quniform("LibSVM_SVR_degree", 2, 5, 1)),
"gamma": hp.loguniform("LibSVM_SVR_gamma", np.log(3.0517578125e-5), np.log(8)),
"coef0": hp.uniform("LibSVM_SVR_coef0", -1, 1),
"max_iter": hp.choice("LibSVM_SVR_max_iter", [-1]),
},
{
"model": "MLPRegressor",
# validation_fraction only selected for early_stopping = 'valid'
"hidden_layer_depth": scope.int(
hp.quniform("MLPRegressor_hidden_layer_depth", 1, 3, 1)
),
"num_nodes_per_layer": scope.int(
hp.loguniform("MLPRegressor_num_nodes_per_layer", np.log(16), np.log(264))
),
"activation": hp.choice("MLPRegressor_activation", ["tanh", "relu"]),
"alpha": hp.loguniform("MLPRegressor_alpha", np.log(1e-7), np.log(1e-1)),
"learning_rate_init": hp.loguniform(
"MLPRegressor_learning_rate_init", np.log(1e-4), np.log(0.5)
),
"early_stopping": hp.choice("MLPRegressor_early_stopping", ["valid", "train"]),
"solver": hp.choice("MLPRegressor_solver", ["adam"]),
"batch_size": hp.choice("MLPRegressor_batch_size", ["auto"]),
"n_iter_no_change": hp.choice("MLPRegressor_n_iter_no_change", [32]),
"tol": hp.choice("MLPRegressor_tol", [1e-4]),
"shuffle": hp.choice("MLPRegressor_shuffle", [True]),
"beta_1": hp.choice("MLPRegressor_beta_1", [0.9]),
"beta_2": hp.choice("MLPRegressor_beta_2", [0.999]),
"epsilon": hp.choice("MLPRegressor_epsilon", [1e-8]),
"validation_fraction": hp.choice("MLPRegressor_validation_fraction", [0.1]),
},
{
"model": "RandomForest",
"criterion": hp.choice(
"RandomForest_criterion", ["mse", "friedman_mse", "mae"]
),
"max_features": hp.uniform("RandomForest_max_features", 0.1, 1.0),
"max_depth": hp.choice("RandomForest_max_depth", [None]),
"min_samples_split": scope.int(
hp.quniform("RandomForest_min_samples_split", 2, 20, 1)
),
"min_samples_leaf": scope.int(
hp.quniform("RandomForest_min_samples_leaf", 1, 20, 1)
),
"min_weight_fraction_leaf": hp.choice(
"RandomForest_min_weight_fraction_leaf", [0.0]
),
"bootstrap": hp.choice("RandomForest_bootstrap", [True, False]),
"max_leaf_nodes": hp.choice("RandomForest_max_leaf_nodes", [None]),
"min_impurity_decrease": hp.choice("RandomForest_min_impurity_decrease", [0.0]),
},
{
"model": "SGD",
# l1_ratio only selected for penalty = 'elasticnet'
# epsilon only selected for loss in ['huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']
# eta0 only selected for learning_rate in ['constant', 'invscaling']
# power_t only selected for learning_rate = 'invscaling'
"loss": hp.choice(
"SGD_loss",
[
"squared_loss",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
),
"penalty": hp.choice("SGD_penalty", ["l1", "l2", "elasticnet"]),
"alpha": hp.loguniform("SGD_alpha", np.log(1e-7), np.log(1e-1)),
"fit_intercept": hp.choice("SGD_fit_intercept", [True]),
"tol": hp.loguniform("SGD_tol", np.log(1e-5), np.log(1e-1)),
"learning_rate": hp.choice(
"SGD_learning_rate", ["constant", "optimal", "invscaling"]
),
"l1_ratio": hp.loguniform("SGD_l1_ratio", np.log(1e-9), np.log(1.0)),
"epsilon": hp.loguniform("SGD_epsilon", np.log(1e-5), np.log(1e-1)),
"power_t": hp.uniform("SGD_power_t", 1e-5, 1),
"average": hp.choice("SGD_average", [True, False]),
},
# self-defined models
{
"model": "MLP_Regressor",
"hidden_layer": scope.int(hp.quniform("MLP_Regressor_hidden_layer", 1, 5, 1)),
"hidden_size": scope.int(hp.quniform("MLP_Regressor_hidden_size", 1, 20, 1)),
"activation": hp.choice(
"MLP_Regressor_activation", ["ReLU", "Tanh", "Sigmoid"]
),
"learning_rate": hp.uniform("MLP_Regressor_learning_rate", 1e-5, 1),
"optimizer": hp.choice("MLP_Regressor_optimizer", ["Adam", "SGD"]),
"criteria": hp.choice("MLP_Regressor_criteria", ["MSE", "MAE"]),
"batch_size": hp.choice("MLP_Regressor_batch_size", [16, 32, 64]),
"num_epochs": scope.int(hp.quniform("MLP_Regressor_num_epochs", 5, 50, 1)),
},
]
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_ray/_balancing_hyperparameter.py | <gh_stars>1-10
"""
File: _balancing_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_ray/_balancing_hyperparameter.py
File Created: Wednesday, 6th April 2022 10:06:01 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:23:34 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ray import tune
# balancing
# if the imbalance threshold small, TomekLink will take too long
balancing_hyperparameter = [
{"balancing_1": "no_processing"},
{
"balancing_2": "SimpleRandomOverSampling",
"SimpleRandomOverSampling_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_3": "SimpleRandomUnderSampling",
"SimpleRandomUnderSampling_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_4": "TomekLink",
"TomekLink_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_5": "EditedNearestNeighbor",
"EditedNearestNeighbor_imbalance_threshold": tune.uniform(0.8, 1),
"EditedNearestNeighbor_k": tune.qrandint(1, 7, 1),
},
{
"balancing_6": "CondensedNearestNeighbor",
"CondensedNearestNeighbor_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_7": "OneSidedSelection",
"OneSidedSelection_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_8": "CNN_TomekLink",
"CNN_TomekLink_imbalance_threshold": tune.uniform(0.8, 1),
},
{
"balancing_9": "Smote",
"Smote_imbalance_threshold": tune.uniform(0.8, 1),
"Smote_k": tune.qrandint(1, 10, 1),
},
{
"balancing_10": "Smote_TomekLink",
"Smote_TomekLink_imbalance_threshold": tune.uniform(0.8, 1),
"Smote_TomekLink_k": tune.qrandint(1, 10, 1),
},
{
"balancing_11": "Smote_ENN",
"Smote_ENN_imbalance_threshold": tune.uniform(0.8, 1),
"Smote_ENN_k": tune.qrandint(1, 10, 1),
},
]
|
PanyiDong/AutoML | My_AutoML/_feature_selection/__init__.py | """
File: __init__.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_feature_selection/__init__.py
File Created: Tuesday, 5th April 2022 11:32:54 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 29th April 2022 10:37:52 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ._base import PCA_FeatureSelection, RBFSampler
# from ._imported import (
# Densifier,
# ExtraTreesPreprocessorClassification,
# ExtraTreesPreprocessorRegression,
# FastICA,
# FeatureAgglomeration,
# KernelPCA,
# RandomKitchenSinks,
# LibLinear_Preprocessor,
# Nystroem,
# PCA,
# PolynomialFeatures,
# RandomTreesEmbedding,
# SelectPercentileClassification,
# SelectPercentileRegression,
# SelectClassificationRates,
# SelectRegressionRates,
# TruncatedSVD,
# )
from ._advance import (
FeatureFilter,
ASFFS,
GeneticAlgorithm,
# ExhaustiveFS,
SFS,
mRMR,
CBFS,
)
from My_AutoML._base import no_processing
feature_selections = {
"no_processing": no_processing,
# "LDASelection": LDASelection,
# "PCA_FeatureSelection": PCA_FeatureSelection,
"RBFSampler": RBFSampler,
"FeatureFilter": FeatureFilter,
"ASFFS": ASFFS,
"GeneticAlgorithm": GeneticAlgorithm,
# "ExhaustiveFS": ExhaustiveFS, # exhaustive search is not practical, takes too long
"SFS": SFS,
"mRMR": mRMR,
"CBFS": CBFS,
}
import importlib
# check if autosklearn is installed, if not, use sklearn replacement
autosklearn_spec = importlib.util.find_spec("autosklearn")
sklearn_spec = importlib.util.find_spec("sklearn")
if autosklearn_spec is not None:
from ._autosklearn import (
extra_trees_preproc_for_classification,
extra_trees_preproc_for_regression,
liblinear_svc_preprocessor,
polynomial,
select_percentile_classification,
select_percentile_regression,
select_rates_classification,
select_rates_regression,
truncatedSVD,
)
# from autosklearn
feature_selections[
"extra_trees_preproc_for_classification"
] = extra_trees_preproc_for_classification
feature_selections[
"extra_trees_preproc_for_regression"
] = extra_trees_preproc_for_regression
feature_selections["liblinear_svc_preprocessor"] = liblinear_svc_preprocessor
feature_selections["polynomial"] = polynomial
feature_selections[
"select_percentile_classification"
] = select_percentile_classification
feature_selections["select_percentile_regression"] = select_percentile_regression
feature_selections["select_rates_classification"] = select_rates_classification
feature_selections["select_rates_regression"] = select_rates_regression
feature_selections["truncatedSVD"] = truncatedSVD
# elif sklearn not installed, raise error
elif sklearn_spec is None:
raise ImportError(
"None of autosklearn or sklearn is installed. Please install at least one of them to use feature selection."
)
else:
from ._sklearn import (
extra_trees_preproc_for_classification,
extra_trees_preproc_for_regression,
liblinear_svc_preprocessor,
polynomial,
select_percentile_classification,
select_percentile_regression,
select_rates_classification,
select_rates_regression,
truncatedSVD,
)
# from autosklearn
feature_selections[
"extra_trees_preproc_for_classification"
] = extra_trees_preproc_for_classification
feature_selections[
"extra_trees_preproc_for_regression"
] = extra_trees_preproc_for_regression
feature_selections["liblinear_svc_preprocessor"] = liblinear_svc_preprocessor
feature_selections["polynomial"] = polynomial
feature_selections[
"select_percentile_classification"
] = select_percentile_classification
feature_selections["select_percentile_regression"] = select_percentile_regression
feature_selections["select_rates_classification"] = select_rates_classification
feature_selections["select_rates_regression"] = select_rates_regression
feature_selections["truncatedSVD"] = truncatedSVD
|
PanyiDong/AutoML | My_AutoML/_utils/_base.py | <gh_stars>1-10
"""
File: _base.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_base.py
File Created: Wednesday, 6th April 2022 12:01:20 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Thursday, 28th April 2022 7:08:05 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import numpy as np
import pandas as pd
from dateutil.parser import parse
# set response to [0, 1] class, random guess at 0.5
def random_guess(number, seed=1):
if seed != None:
np.random.seed(seed)
if number > 0.5:
return 1
elif number < 0.5:
return 0
else:
return np.random.randint(0, 2)
# Return random index of a list (unique values only)
# from total draw n, default total = n
def random_index(n, total=None, seed=1):
if seed is not None:
np.random.seed(seed)
if total is None:
total = n
output = []
vlist = [i for i in range(total)]
for _ in range(n):
# np.random.seed(int(datetime.now().strftime("%H%M%S")))
index = np.random.randint(0, high=len(vlist), size=1)[0]
output.append(vlist[index])
vlist.pop(index)
return output
# Return randomly shuffle of a list (unique values only)
def random_list(vlist, seed=1):
if seed != None:
np.random.seed(seed)
output = []
for _ in range(len(vlist)):
# np.random.seed(int(datetime.now().strftime("%H%M%S")))
index = np.random.randint(0, high=len(vlist), size=1)[0]
output.append(vlist[index])
vlist.pop(index)
return output
# check if values in the dataframe is time string
# rule = 'any' will consider the column as date type as long as one value is date type,
# rule = 'all' will consider the column as date type only when all values are date type.
def is_date(df, rule="any"):
def _is_date(string, fuzzy=False):
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
_check = []
for item in df.values:
_check.append(_is_date(str(item[0])))
if rule == "any":
return any(_check)
elif rule == "all":
return all(_check)
# Round data for categorical features (in case after preprocessing/modification, the data changed)
def feature_rounding(X, uni_class=20):
features = list(X.columns)
_X = X.copy(deep=True)
for _column in features:
_unique = np.sort(_X[_column].dropna().unique())
if len(_unique) <= uni_class:
_X[_column] = np.round(_X[_column])
return _X
# Return location of minimum values
def minloc(vlist):
if len(vlist) == 0:
raise ValueError("Invalid List!")
elif len(vlist) == 1:
return 0
else:
result = 0
for i in range(len(vlist) - 1):
if vlist[i + 1] < vlist[result]:
result = i + 1
else:
continue
return result
# Return location of maximum values
def maxloc(vlist):
if len(vlist) == 0:
raise ValueError("Invalid List!")
elif len(vlist) == 1:
return 0
else:
result = 0
for i in range(len(vlist) - 1):
if vlist[i + 1] > vlist[result]:
result = i + 1
else:
continue
return result
# return the index of Boolean list or {0, 1} list
# default 1 consider as True
def True_index(X, _true=[True, 1]):
result = [i for i, value in enumerate(X) if value in _true]
return result
# determine the task types
def type_of_task(y):
if isinstance(y, pd.DataFrame):
y = y.values
if y.dtype.kind == "f" and np.any(y != y.astype(int)):
return "continuous" # assign for regression tasks
if y.dtype.kind in ["i", "u"] and len(np.unique(y)) >= 0.5 * len(y):
return "integer" # assign for regression tasks
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return "multiclass" # assign for classification tasks
else:
return "binary" # assign for regression tasks
# define a Timer to record efficiency
# enable multiple running times for comparison
class Timer:
def __init__(self):
self.times = []
self.start()
def start(self): # start the timer
self.tik = time.time()
def stop(self): # stop the timer and record the time
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
return sum(self.times) / len(self.times)
def sum(self):
return sum(self.times)
def cumsum(self):
return np.array(self.times).cumsum().tolist()
# determine whether using a python terminal environment or
# a jupyter notebook environment
def type_of_script():
try:
ipy_str = str(type(get_ipython()))
if "zmqshell" in ipy_str:
return "jupyter"
if "terminal" in ipy_str:
return "ipython"
except:
return "terminal"
# determine whether a method exists in a class
def has_method(obj, name):
return callable(getattr(obj, name, None))
# check if is None
def is_none(item, pat=[None, "None", "none", "NONE"]):
if item in pat:
return True
else:
return False
|
PanyiDong/AutoML | My_AutoML/_hpo/_legacy.py | <gh_stars>1-10
"""
File: _legacy.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hpo/_legacy.py
File Created: Thursday, 7th April 2022 4:00:35 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:22:03 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import ast
import shutil
import warnings
import numpy as np
import pandas as pd
import scipy
import scipy.stats
import mlflow
from hyperopt import (
fmin,
hp,
rand,
tpe,
atpe,
Trials,
SparkTrials,
space_eval,
STATUS_OK,
pyll,
)
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from My_AutoML._encoding._legacy import encoders
from My_AutoML._imputation._legacy import imputers
from My_AutoML._balancing._legacy import balancings
from My_AutoML._scaling._legacy import scalings
from My_AutoML._feature_selection._legacy import feature_selections
from My_AutoML._model._legacy import (
classifiers,
regressors,
)
from My_AutoML._hyperparameters._hyperopt import (
encoder_hyperparameter,
imputer_hyperparameter,
scaling_hyperparameter,
balancing_hyperparameter,
feature_selection_hyperparameter,
classifier_hyperparameter,
regressor_hyperparameter,
)
from My_AutoML._base import no_processing
from My_AutoML._utils._base import type_of_task
from My_AutoML._utils._file import save_model
# filter certain warnings
warnings.filterwarnings("ignore", message="The dataset is balanced, no change.")
warnings.filterwarnings("ignore", message="Variables are collinear")
warnings.filterwarnings("ignore", category=UserWarning)
"""
Classifiers/Hyperparameters from autosklearn:
1. AdaBoost: n_estimators, learning_rate, algorithm, max_depth
2. Bernoulli naive Bayes: alpha, fit_prior
3. Decision Tree: criterion, max_features, max_depth_factor, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes, min_impurity_decrease
4. Extra Trees: criterion, min_samples_leaf, min_samples_split, max_features,
bootstrap, max_leaf_nodes, max_depth, min_weight_fraction_leaf, min_impurity_decrease
5. Gaussian naive Bayes
6. Gradient boosting: loss, learning_rate, min_samples_leaf, max_depth,
max_leaf_nodes, max_bins, l2_regularization, early_stop, tol, scoring
7. KNN: n_neighbors, weights, p
8. LDA: shrinkage, tol
9. Linear SVC (LibLinear): penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling
10. kernel SVC (LibSVM): C, kernel, gamma, shrinking, tol, max_iter
11. MLP (Multilayer Perceptron): hidden_layer_depth, num_nodes_per_layer, activation, alpha,
learning_rate_init, early_stopping, solver, batch_size, n_iter_no_change, tol,
shuffle, beta_1, beta_2, epsilon
12. Multinomial naive Bayes: alpha, fit_prior
13. Passive aggressive: C, fit_intercept, tol, loss, average
14. QDA: reg_param
15. Random forest: criterion, max_features, max_depth, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, bootstrap, max_leaf_nodes
16. SGD (Stochastic Gradient Descent): loss, penalty, alpha, fit_intercept, tol,
learning_rate
"""
# Auto binary classifier
class AutoTabularClassifier:
"""
Perform model selection and hyperparameter optimization for classification tasks
using sklearn models, predefine hyperparameters
Parameters
----------
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support ('AdaboostClassifier', 'BernoulliNB', 'DecisionTree', 'ExtraTreesClassifier',
'GaussianNB', 'GradientBoostingClassifier', 'KNearestNeighborsClassifier',
'LDA', 'LibLinear_SVC', 'LibSVM_SVC', 'MLPClassifier', 'MultinomialNB',
'PassiveAggressive', 'QDA', 'RandomForest', 'SGD')
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support ("accuracy", "precision", "auc", "hinge", "f1")
method: Model selection/hyperparameter optimization methods, default = 'Bayesian'
algo: Search algorithm, default = 'tpe'
support (rand, tpe, atpe)
spark_trials: Whether to use SparkTrials, default = False
progressbar: Whether to show progress bar, default = False
seed: Random seed, default = 1
"""
def __init__(
self,
timeout=360,
max_evals=64,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="accuracy",
method="Bayesian",
algo="tpe",
spark_trials=False,
progressbar=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.method = method
self.algo = algo
self.spark_trials = spark_trials
self.progressbar = progressbar
self.seed = seed
self._iter = 0 # record iteration number
# create hyperparameter space using Hyperopt.hp.choice
# the pipeline of AutoClassifier is [encoder, imputer, scaling, balancing, feature_selection, model]
# only chosen ones will be added to hyperparameter space
def _get_hyperparameter_space(
self,
X,
encoders_hyperparameters,
encoder,
imputers_hyperparameters,
imputer,
balancings_hyperparameters,
balancing,
scalings_hyperparameters,
scaling,
feature_selection_hyperparameters,
feature_selection,
models_hyperparameters,
models,
):
# encoding space
_encoding_hyperparameter = []
for _encoder in [*encoder]:
for (
item
) in encoders_hyperparameters: # search the encoders' hyperparameters
if item["encoder"] == _encoder:
_encoding_hyperparameter.append(item)
_encoding_hyperparameter = hp.choice(
"classification_encoders", _encoding_hyperparameter
)
# imputation space
_imputer_hyperparameter = []
if not X.isnull().values.any(): # if no missing, no need for imputation
_imputer_hyperparameter = hp.choice(
"classification_imputers", [{"imputer": "no_processing"}]
)
else:
for _imputer in [*imputer]:
for (
item
) in imputers_hyperparameters: # search the imputer' hyperparameters
if item["imputer"] == _imputer:
_imputer_hyperparameter.append(item)
_imputer_hyperparameter = hp.choice(
"classification_imputers", _imputer_hyperparameter
)
# balancing space
_balancing_hyperparameter = []
for _balancing in [*balancing]:
for (
item
) in balancings_hyperparameters: # search the balancings' hyperparameters
if item["balancing"] == _balancing:
_balancing_hyperparameter.append(item)
_balancing_hyperparameter = hp.choice(
"classification_balancing", _balancing_hyperparameter
)
# scaling space
_scaling_hyperparameter = []
for _scaling in [*scaling]:
for (
item
) in scalings_hyperparameters: # search the scalings' hyperparameters
if item["scaling"] == _scaling:
_scaling_hyperparameter.append(item)
_scaling_hyperparameter = hp.choice(
"classification_scaling", _scaling_hyperparameter
)
# feature selection space
_feature_selection_hyperparameter = []
for _feature_selection in [*feature_selection]:
for (
item
) in (
feature_selection_hyperparameters
): # search the feature selections' hyperparameters
if item["feature_selection"] == _feature_selection:
_feature_selection_hyperparameter.append(item)
_feature_selection_hyperparameter = hp.choice(
"classification_feature_selection", _feature_selection_hyperparameter
)
# model selection and hyperparameter optimization space
_model_hyperparameter = []
for _model in [*models]:
# checked before at models that all models are in default space
for item in models_hyperparameters: # search the models' hyperparameters
if item["model"] == _model:
_model_hyperparameter.append(item)
_model_hyperparameter = hp.choice(
"classification_models", _model_hyperparameter
)
# the pipeline search space
return pyll.as_apply(
{
"encoder": _encoding_hyperparameter,
"imputer": _imputer_hyperparameter,
"balancing": _balancing_hyperparameter,
"scaling": _scaling_hyperparameter,
"feature_selection": _feature_selection_hyperparameter,
"classification": _model_hyperparameter,
}
)
# initialize and get hyperparameter search space
def get_hyperparameter_space(self, X, y):
# initialize default search options
# use copy to allows multiple manipulation
# all encoders available
self._all_encoders = encoders.copy()
# all hyperparameters for encoders
self._all_encoders_hyperparameters = encoder_hyperparameter.copy()
# all imputers available
self._all_imputers = imputers.copy()
# all hyperparemeters for imputers
self._all_imputers_hyperparameters = imputer_hyperparameter.copy()
# all scalings available
self._all_scalings = scalings.copy()
# all balancings available
self._all_balancings = balancings.copy()
# all hyperparameters for balancing methods
self._all_balancings_hyperparameters = balancing_hyperparameter.copy()
# all hyperparameters for scalings
self._all_scalings_hyperparameters = scaling_hyperparameter.copy()
# all feature selections available
self._all_feature_selection = feature_selections.copy()
# special treatment, remove some feature selection for regression
del self._all_feature_selection["extra_trees_preproc_for_regression"]
del self._all_feature_selection["select_percentile_regression"]
del self._all_feature_selection["select_rates_regression"]
if X.shape[0] * X.shape[1] > 10000:
del self._all_feature_selection["liblinear_svc_preprocessor"]
# all hyperparameters for feature selections
self._all_feature_selection_hyperparameters = (
feature_selection_hyperparameter.copy()
)
# all classification models available
self._all_models = classifiers.copy()
# special treatment, remove SVM methods when observations are large
# SVM suffers from the complexity o(n_samples^2 * n_features),
# which is time-consuming for large datasets
if X.shape[0] * X.shape[1] > 10000:
del self._all_models["LibLinear_SVC"]
del self._all_models["LibSVM_SVC"]
# all hyperparameters for the classification models
self._all_models_hyperparameters = classifier_hyperparameter.copy()
self.hyperparameter_space = None
# Encoding
# convert string types to numerical type
# get encoder space
if self.encoder == "auto":
encoder = self._all_encoders.copy()
else:
encoder = {} # if specified, check if encoders in default encoders
for _encoder in self.encoder:
if _encoder not in [*self._all_encoders]:
raise ValueError(
"Only supported encoders are {}, get {}.".format(
[*self._all_encoders], _encoder
)
)
encoder[_encoder] = self._all_encoders[_encoder]
# Imputer
# fill missing values
# get imputer space
if self.imputer == "auto":
if not X.isnull().values.any(): # if no missing values
imputer = {"no_processing": no_processing}
self._all_imputers = imputer # limit default imputer space
else:
imputer = self._all_imputers.copy()
else:
if not X.isnull().values.any(): # if no missing values
imputer = {"no_processing": no_processing}
self._all_imputers = imputer
else:
imputer = {} # if specified, check if imputers in default imputers
for _imputer in self.imputer:
if _imputer not in [*self._all_imputers]:
raise ValueError(
"Only supported imputers are {}, get {}.".format(
[*self._all_imputers], _imputer
)
)
imputer[_imputer] = self._all_imputers[_imputer]
# Balancing
# deal with imbalanced dataset, using over-/under-sampling methods
# get balancing space
if self.balancing == "auto":
balancing = self._all_balancings.copy()
else:
balancing = {} # if specified, check if balancings in default balancings
for _balancing in self.balancing:
if _balancing not in [*self._all_balancings]:
raise ValueError(
"Only supported balancings are {}, get {}.".format(
[*self._all_balancings], _balancing
)
)
balancing[_balancing] = self._all_balancings[_balancing]
# Scaling
# get scaling space
if self.scaling == "auto":
scaling = self._all_scalings.copy()
else:
scaling = {} # if specified, check if scalings in default scalings
for _scaling in self.scaling:
if _scaling not in [*self._all_scalings]:
raise ValueError(
"Only supported scalings are {}, get {}.".format(
[*self._all_scalings], _scaling
)
)
scaling[_scaling] = self._all_scalings[_scaling]
# Feature selection
# Remove redundant features, reduce dimensionality
# get feature selection space
if self.feature_selection == "auto":
feature_selection = self._all_feature_selection.copy()
else:
feature_selection = (
{}
) # if specified, check if balancings in default balancings
for _feature_selection in self.feature_selection:
if _feature_selection not in [*self._all_feature_selection]:
raise ValueError(
"Only supported feature selections are {}, get {}.".format(
[*self._all_feature_selection], _feature_selection
)
)
feature_selection[_feature_selection] = self._all_feature_selection[
_feature_selection
]
# Model selection/Hyperparameter optimization
# using Bayesian Optimization
# model space, only select chosen models to space
if self.models == "auto": # if auto, model pool will be all default models
models = self._all_models.copy()
else:
models = {} # if specified, check if models in default models
for _model in self.models:
if _model not in [*self._all_models]:
raise ValueError(
"Only supported models are {}, get {}.".format(
[*self._all_models], _model
)
)
models[_model] = self._all_models[_model]
# initialize the hyperparameter space
_all_encoders_hyperparameters = self._all_encoders_hyperparameters.copy()
_all_imputers_hyperparameters = self._all_imputers_hyperparameters.copy()
_all_balancings_hyperparameters = self._all_balancings_hyperparameters.copy()
_all_scalings_hyperparameters = self._all_scalings_hyperparameters.copy()
_all_feature_selection_hyperparameters = (
self._all_feature_selection_hyperparameters.copy()
)
_all_models_hyperparameters = self._all_models_hyperparameters.copy()
# generate the hyperparameter space
if self.hyperparameter_space is None:
self.hyperparameter_space = self._get_hyperparameter_space(
X,
_all_encoders_hyperparameters,
encoder,
_all_imputers_hyperparameters,
imputer,
_all_balancings_hyperparameters,
balancing,
_all_scalings_hyperparameters,
scaling,
_all_feature_selection_hyperparameters,
feature_selection,
_all_models_hyperparameters,
models,
) # _X to choose whether include imputer
# others are the combinations of default hyperparameter space & methods selected
return encoder, imputer, balancing, scaling, feature_selection, models
# select optimal settings and fit on optimal hyperparameters
def _fit_optimal(self, best_results, _X, _y):
# mapping the optimal model and hyperparameters selected
# fit the optimal setting
optimal_point = space_eval(self.hyperparameter_space, best_results)
# optimal encoder
self.optimal_encoder_hyperparameters = optimal_point["encoder"]
self.optimal_encoder = self.optimal_encoder_hyperparameters["encoder"]
del self.optimal_encoder_hyperparameters["encoder"]
# optimal imputer
self.optimal_imputer_hyperparameters = optimal_point["imputer"]
self.optimal_imputer = self.optimal_imputer_hyperparameters["imputer"]
del self.optimal_imputer_hyperparameters["imputer"]
# optimal balancing
self.optimal_balancing_hyperparameters = optimal_point["balancing"]
self.optimal_balancing = self.optimal_balancing_hyperparameters["balancing"]
del self.optimal_balancing_hyperparameters["balancing"]
# optimal scaling
self.optimal_scaling_hyperparameters = optimal_point["scaling"]
self.optimal_scaling = self.optimal_scaling_hyperparameters["scaling"]
del self.optimal_scaling_hyperparameters["scaling"]
# optimal feature selection
self.optimal_feature_selection_hyperparameters = optimal_point[
"feature_selection"
]
self.optimal_feature_selection = self.optimal_feature_selection_hyperparameters[
"feature_selection"
]
del self.optimal_feature_selection_hyperparameters["feature_selection"]
# optimal classifier
self.optimal_classifier_hyperparameters = optimal_point[
"classification"
] # optimal model selected
self.optimal_classifier = self.optimal_classifier_hyperparameters[
"model"
] # optimal hyperparameter settings selected
del self.optimal_classifier_hyperparameters["model"]
# record optimal settings
with open(self.temp_directory + "/optimal_setting.txt", "w") as f:
f.write("Optimal encoding method is: {}\n".format(self.optimal_encoder))
f.write("Optimal encoding hyperparameters:")
print(self.optimal_encoder_hyperparameters, file=f, end="\n\n")
f.write("Optimal imputation method is: {}\n".format(self.optimal_imputer))
f.write("Optimal imputation hyperparameters:")
print(self.optimal_imputer_hyperparameters, file=f, end="\n\n")
f.write("Optimal balancing method is: {}\n".format(self.optimal_balancing))
f.write("Optimal balancing hyperparamters:")
print(self.optimal_balancing_hyperparameters, file=f, end="\n\n")
f.write("Optimal scaling method is: {}\n".format(self.optimal_scaling))
f.write("Optimal scaling hyperparameters:")
print(self.optimal_scaling_hyperparameters, file=f, end="\n\n")
f.write(
"Optimal feature selection method is: {}\n".format(
self.optimal_feature_selection
)
)
f.write("Optimal feature selection hyperparameters:")
print(self.optimal_feature_selection_hyperparameters, file=f, end="\n\n")
f.write(
"Optimal classification model is: {}\n".format(self.optimal_classifier)
)
f.write("Optimal classification hyperparameters:")
print(self.optimal_classifier_hyperparameters, file=f, end="\n\n")
# encoding
self._fit_encoder = self._all_encoders[self.optimal_encoder](
**self.optimal_encoder_hyperparameters
)
_X = self._fit_encoder.fit(_X)
# imputer
self._fit_imputer = self._all_imputers[self.optimal_imputer](
**self.optimal_imputer_hyperparameters
)
_X = self._fit_imputer.fill(_X)
# balancing
self._fit_balancing = self._all_balancings[self.optimal_balancing](
**self.optimal_balancing_hyperparameters
)
_X, _y = self._fit_balancing.fit_transform(_X, _y)
# make sure the classes are integers (belongs to certain classes)
_y = _y.astype(int)
_y = _y.astype(int)
# scaling
self._fit_scaling = self._all_scalings[self.optimal_scaling](
**self.optimal_scaling_hyperparameters
)
self._fit_scaling.fit(_X, _y)
_X = self._fit_scaling.transform(_X)
# feature selection
self._fit_feature_selection = self._all_feature_selection[
self.optimal_feature_selection
](**self.optimal_feature_selection_hyperparameters)
self._fit_feature_selection.fit(_X, _y)
_X = self._fit_feature_selection.transform(_X)
# classification
self._fit_classifier = self._all_models[self.optimal_classifier](
**self.optimal_classifier_hyperparameters
)
self._fit_classifier.fit(_X, _y.values.ravel())
# save the model
if self.save:
save_model(
self.optimal_encoder,
self.optimal_encoder_hyperparameters,
self.optimal_imputer,
self.optimal_imputer_hyperparameters,
self.optimal_balancing,
self.optimal_balancing_hyperparameters,
self.optimal_scaling,
self.optimal_scaling_hyperparameters,
self.optimal_feature_selection,
self.optimal_feature_selection_hyperparameters,
self.optimal_classifier,
self.optimal_classifier_hyperparameters,
self.model_name,
)
return self
def load_model(self, _X, _y):
with open(self.model_name) as f:
optimal_setting = f.readlines()
# remove change line signs
optimal_setting = [item.replace("\n", "") for item in optimal_setting]
# remove blank spaces
while "" in optimal_setting:
optimal_setting.remove("")
self.optimal_encoder = optimal_setting[0]
self.optimal_encoder_hyperparameters = ast.literal_eval(optimal_setting[1])
self.optimal_imputer = optimal_setting[2]
self.optimal_imputer_hyperparameters = ast.literal_eval(optimal_setting[3])
self.optimal_balancing = optimal_setting[4]
self.optimal_balancing_hyperparameters = ast.literal_eval(optimal_setting[5])
self.optimal_scaling = optimal_setting[6]
self.optimal_scaling_hyperparameters = ast.literal_eval(optimal_setting[7])
self.optimal_feature_selection = optimal_setting[8]
self.optimal_feature_selection_hyperparameters = ast.literal_eval(
optimal_setting[9]
)
self.optimal_classifier = optimal_setting[10]
self.optimal_classifier_hyperparameters = ast.literal_eval(optimal_setting[11])
# encoding
self._fit_encoder = self._all_encoders[self.optimal_encoder](
**self.optimal_encoder_hyperparameters
)
_X = self._fit_encoder.fit(_X)
# imputer
self._fit_imputer = self._all_imputers[self.optimal_imputer](
**self.optimal_imputer_hyperparameters
)
_X = self._fit_imputer.fill(_X)
# balancing
self._fit_balancing = self._all_balancings[self.optimal_balancing](
**self.optimal_balancing_hyperparameters
)
_X, _y = self._fit_balancing.fit_transform(_X, _y)
# make sure the classes are integers (belongs to certain classes)
_y = _y.astype(int)
_y = _y.astype(int)
# scaling
self._fit_scaling = self._all_scalings[self.optimal_scaling](
**self.optimal_scaling_hyperparameters
)
self._fit_scaling.fit(_X, _y)
_X = self._fit_scaling.transform(_X)
# feature selection
self._fit_feature_selection = self._all_feature_selection[
self.optimal_feature_selection
](**self.optimal_feature_selection_hyperparameters)
self._fit_feature_selection.fit(_X, _y)
_X = self._fit_feature_selection.transform(_X)
# classification
self._fit_classifier = self._all_models[self.optimal_classifier](
**self.optimal_classifier_hyperparameters
)
self._fit_classifier.fit(_X, _y.values.ravel())
return self
def fit(self, X, y):
if self.ignore_warning: # ignore all warnings to generate clearer outputs
warnings.filterwarnings("ignore")
_X = X.copy()
_y = y.copy()
(
encoder,
imputer,
balancing,
scaling,
feature_selection,
models,
) = self.get_hyperparameter_space(_X, _y)
# if the model is already trained, read the setting
if os.path.exists(self.model_name):
print("Stored model found, load previous model.")
self.load_model(_X, _y)
return self
# initialize temp directory
# check if temp directory exists, if exists, empty it
if os.path.isdir(self.temp_directory):
shutil.rmtree(self.temp_directory)
os.makedirs(self.temp_directory)
# write basic information to init.txt
with open(self.temp_directory + "/init.txt", "w") as f:
f.write("Features of the dataset: {}\n".format(list(_X.columns)))
f.write(
"Shape of the design matrix: {} * {}\n".format(_X.shape[0], _X.shape[1])
)
f.write("Response of the dataset: {}\n".format(list(_y.columns)))
f.write(
"Shape of the response vector: {} * {}\n".format(
_y.shape[0], _y.shape[1]
)
)
f.write("Type of the task: Classification.\n")
if self.validation: # only perform train_test_split when validation
# train test split so the performance of model selection and
# hyperparameter optimization can be evaluated
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
_X, _y, test_size=self.valid_size, random_state=self.seed
)
# the objective function of Bayesian Optimization tries to minimize
# use accuracy score
@ignore_warnings(category=ConvergenceWarning)
def _objective(params):
# evaluation for predictions
if self.objective == "accuracy":
from sklearn.metrics import accuracy_score
_obj = accuracy_score
elif self.objective == "precision":
from sklearn.metrics import precision_score
_obj = precision_score
elif self.objective == "auc":
from sklearn.metrics import roc_auc_score
_obj = roc_auc_score
elif self.objective == "hinge":
from sklearn.metrics import hinge_loss
_obj = hinge_loss
elif self.objective == "f1":
from sklearn.metrics import f1_score
_obj = f1_score
else:
raise ValueError(
'Only support ["accuracy", "precision", "auc", "hinge", "f1"], get{}'.format(
self.objective
)
)
# pipeline of objective, [encoder, imputer, balancing, scaling, feature_selection, model]
# select encoder and set hyperparameters
# must have encoder
_encoder_hyper = params["encoder"]
_encoder = _encoder_hyper["encoder"]
del _encoder_hyper["encoder"]
enc = encoder[_encoder](**_encoder_hyper)
# select imputer and set hyperparameters
_imputer_hyper = params["imputer"]
_imputer = _imputer_hyper["imputer"]
del _imputer_hyper["imputer"]
imp = imputer[_imputer](**_imputer_hyper)
# select balancing and set hyperparameters
# must have balancing, since no_preprocessing is included
_balancing_hyper = params["balancing"]
_balancing = _balancing_hyper["balancing"]
del _balancing_hyper["balancing"]
blc = balancing[_balancing](**_balancing_hyper)
# select scaling and set hyperparameters
# must have scaling, since no_preprocessing is included
_scaling_hyper = params["scaling"]
_scaling = _scaling_hyper["scaling"]
del _scaling_hyper["scaling"]
scl = scaling[_scaling](**_scaling_hyper)
# select feature selection and set hyperparameters
# must have feature selection, since no_preprocessing is included
_feature_selection_hyper = params["feature_selection"]
_feature_selection = _feature_selection_hyper["feature_selection"]
del _feature_selection_hyper["feature_selection"]
fts = feature_selection[_feature_selection](**_feature_selection_hyper)
# select classifier model and set hyperparameters
# must have a classifier
_classifier_hyper = params["classification"]
_classifier = _classifier_hyper["model"]
del _classifier_hyper["model"]
clf = models[_classifier](
**_classifier_hyper
) # call the model using passed parameters
obj_tmp_directory = self.temp_directory + "/iter_" + str(self._iter + 1)
if not os.path.isdir(obj_tmp_directory):
os.makedirs(obj_tmp_directory)
with open(obj_tmp_directory + "/hyperparameter_settings.txt", "w") as f:
f.write("Encoding method: {}\n".format(_encoder))
f.write("Encoding Hyperparameters:")
print(_encoder_hyper, file=f, end="\n\n")
f.write("Imputation method: {}\n".format(_imputer))
f.write("Imputation Hyperparameters:")
print(_imputer_hyper, file=f, end="\n\n")
f.write("Balancing method: {}\n".format(_balancing))
f.write("Balancing Hyperparameters:")
print(_balancing_hyper, file=f, end="\n\n")
f.write("Scaling method: {}\n".format(_scaling))
f.write("Scaling Hyperparameters:")
print(_scaling_hyper, file=f, end="\n\n")
f.write("Feature Selection method: {}\n".format(_feature_selection))
f.write("Feature Selection Hyperparameters:")
print(_feature_selection_hyper, file=f, end="\n\n")
f.write("Classification model: {}\n".format(_classifier))
f.write("Classifier Hyperparameters:")
print(_classifier_hyper, file=f, end="\n\n")
if self.validation:
_X_train_obj, _X_test_obj = X_train.copy(), X_test.copy()
_y_train_obj, _y_test_obj = y_train.copy(), y_test.copy()
# encoding
_X_train_obj = enc.fit(_X_train_obj)
_X_test_obj = enc.refit(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_train_obj = imp.fill(_X_train_obj)
_X_test_obj = imp.fill(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_train_obj, _y_train_obj = blc.fit_transform(
_X_train_obj, _y_train_obj
)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Balancing finished, in scaling process.")
# make sure the classes are integers (belongs to certain classes)
_y_train_obj = _y_train_obj.astype(int)
_y_test_obj = _y_test_obj.astype(int)
# scaling
scl.fit(_X_train_obj, _y_train_obj)
_X_train_obj = scl.transform(_X_train_obj)
_X_test_obj = scl.transform(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Scaling finished, in feature selection process.")
# feature selection
fts.fit(_X_train_obj, _y_train_obj)
_X_train_obj = fts.transform(_X_train_obj)
_X_test_obj = fts.transform(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Feature selection finished, in classification model.")
if scipy.sparse.issparse(
_X_train_obj
): # check if returns sparse matrix
_X_train_obj = _X_train_obj.toarray()
if scipy.sparse.issparse(_X_test_obj):
_X_test_obj = _X_test_obj.toarray()
# classification
# store preprocessed train/test datasets
if isinstance(
_X_train_obj, np.ndarray
): # in case numpy array is returned
pd.concat(
[pd.DataFrame(_X_train_obj), _y_train_obj],
axis=1,
ignore_index=True,
).to_csv(obj_tmp_directory + "/train_preprocessed.csv", index=False)
elif isinstance(_X_train_obj, pd.DataFrame):
pd.concat([_X_train_obj, _y_train_obj], axis=1).to_csv(
obj_tmp_directory + "/train_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
if isinstance(_X_test_obj, np.ndarray):
pd.concat(
[pd.DataFrame(_X_test_obj), _y_test_obj],
axis=1,
ignore_index=True,
).to_csv(obj_tmp_directory + "/test_preprocessed.csv", index=False)
elif isinstance(_X_test_obj, pd.DataFrame):
pd.concat([_X_test_obj, _y_test_obj], axis=1).to_csv(
obj_tmp_directory + "/test_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
clf.fit(_X_train_obj, _y_train_obj.values.ravel())
os.remove(obj_tmp_directory + "/objective_process.txt")
y_pred = clf.predict(_X_test_obj)
_loss = -_obj(y_pred, _y_test_obj.values)
with open(obj_tmp_directory + "/testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {:.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
# since fmin of Hyperopt tries to minimize the objective function, take negative accuracy here
return {"loss": _loss, "status": STATUS_OK}
else:
_X_obj = _X.copy()
_y_obj = _y.copy()
# encoding
_X_obj = enc.fit(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_obj = imp.fill(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_obj = blc.fit_transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Balancing finished, in feature selection process.")
# scaling
scl.fit(_X_obj, _y_obj)
_X_obj = scl.transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Scaling finished, in balancing process.")
# feature selection
fts.fit(_X_obj, _y_obj)
_X_obj = fts.transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Feature selection finished, in classification model.")
# classification
clf.fit(_X_obj.values, _y_obj.values.ravel())
pd.concat([_X_obj, _y_obj], axis=1).to_csv(
obj_tmp_directory + "/data_preprocessed.csv", index=False
)
os.remove(obj_tmp_directory + "/objective_process.txt")
y_pred = clf.predict(_X_obj.values)
_loss = -_obj(y_pred, _y_obj.values)
with open(obj_tmp_directory + "/testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
return {"loss": _loss, "status": STATUS_OK}
# call hyperopt to use Bayesian Optimization for Model Selection and Hyperparameter Selection
# search algorithm
if self.algo == "rand":
algo = rand.suggest
elif self.algo == "tpe":
algo = tpe.suggest
elif self.algo == "atpe":
algo = atpe.suggest
# Storage for evaluation points
if self.spark_trials:
trials = SparkTrials(parallelism=8)
else:
trials = Trials()
# run fmin to search for optimal hyperparameter settings
with mlflow.start_run():
best_results = fmin(
fn=_objective,
space=self.hyperparameter_space,
algo=algo,
max_evals=self.max_evals,
timeout=self.timeout,
trials=trials,
show_progressbar=self.progressbar,
rstate=np.random.RandomState(seed=self.seed),
)
# select optimal settings and fit optimal pipeline
self._fit_optimal(best_results, _X, _y)
# whether to retain temp files
if self.delete_temp_after_terminate:
shutil.rmtree(self.temp_directory)
return self
def predict(self, X):
_X = X.copy()
# may need preprocessing for test data, the preprocessing should be the same as in fit part
# Encoding
# convert string types to numerical type
_X = self._fit_encoder.refit(_X)
# Imputer
# fill missing values
_X = self._fit_imputer.fill(_X)
# Balancing
# deal with imbalanced dataset, using over-/under-sampling methods
# No need to balance on test data
# Scaling
_X = self._fit_scaling.transform(_X)
# Feature selection
# Remove redundant features, reduce dimensionality
_X = self._fit_feature_selection.transform(_X)
return self._fit_classifier.predict(_X)
"""
Regressors/Hyperparameters from sklearn:
1. AdaBoost: n_estimators, learning_rate, loss, max_depth
2. Ard regression: n_iter, tol, alpha_1, alpha_2, lambda_1, lambda_2,
threshold_lambda, fit_intercept
3. Decision tree: criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_leaf_nodes, min_impurity_decrease
4. extra trees: criterion, min_samples_leaf, min_samples_split,
max_features, bootstrap, max_leaf_nodes, max_depth,
min_weight_fraction_leaf, min_impurity_decrease
5. Gaussian Process: alpha, thetaL, thetaU
6. Gradient boosting: loss, learning_rate, min_samples_leaf, max_depth,
max_leaf_nodes, max_bins, l2_regularization, early_stop, tol, scoring
7. KNN: n_neighbors, weights, p
8. Linear SVR (LibLinear): loss, epsilon, dual, tol, C, fit_intercept,
intercept_scaling
9. Kernel SVR (LibSVM): kernel, C, epsilon, tol, shrinking
10. Random forest: criterion, max_features, max_depth, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, bootstrap,
max_leaf_nodes, min_impurity_decrease
11. SGD (Stochastic Gradient Descent): loss, penalty, alpha, fit_intercept, tol,
learning_rate
12. MLP (Multilayer Perceptron): hidden_layer_depth, num_nodes_per_layer,
activation, alpha, learning_rate_init, early_stopping, solver,
batch_size, n_iter_no_change, tol, shuffle, beta_1, beta_2, epsilon
"""
class AutoTabularRegressor:
"""
Perform model selection and hyperparameter optimization for regression tasks
using sklearn models, predefine hyperparameters
Parameters
----------
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_regression',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_regression','select_rates_regression',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support ("AdaboostRegressor", "ARDRegression", "DecisionTree", "ExtraTreesRegressor",
"GaussianProcess", "GradientBoosting", "KNearestNeighborsRegressor", "LibLinear_SVR",
"LibSVM_SVR", "MLPRegressor", "RandomForest", "SGD")
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'MSE'
support ("MSE", "MAE", "MSLE", "R2", "MAX")
method: Model selection/hyperparameter optimization methods, default = 'Bayesian'
algo: Search algorithm, default = 'tpe'
support (rand, tpe, atpe)
spark_trials: Whether to use SparkTrials, default = False
progressbar: Whether to show progress bar, default = False
seed: Random seed, default = 1
"""
def __init__(
self,
timeout=360,
max_evals=64,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="MSE",
method="Bayesian",
algo="tpe",
spark_trials=False,
progressbar=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.method = method
self.algo = algo
self.spark_trials = spark_trials
self.progressbar = progressbar
self.seed = seed
self._iter = 0 # record iteration number
# create hyperparameter space using Hyperopt.hp.choice
# the pipeline of AutoClassifier is [encoder, imputer, scaling, balancing, feature_selection, model]
# only chosen ones will be added to hyperparameter space
def _get_hyperparameter_space(
self,
X,
encoders_hyperparameters,
encoder,
imputers_hyperparameters,
imputer,
balancings_hyperparameters,
balancing,
scalings_hyperparameters,
scaling,
feature_selection_hyperparameters,
feature_selection,
models_hyperparameters,
models,
):
# encoding space
_encoding_hyperparameter = []
for _encoder in [*encoder]:
for (
item
) in encoders_hyperparameters: # search the encoders' hyperparameters
if item["encoder"] == _encoder:
_encoding_hyperparameter.append(item)
_encoding_hyperparameter = hp.choice(
"regression_encoders", _encoding_hyperparameter
)
# imputation space
_imputer_hyperparameter = []
if not X.isnull().values.any(): # if no missing, no need for imputation
_imputer_hyperparameter = hp.choice(
"regression_imputers", [{"imputer": "no_processing"}]
)
else:
for _imputer in [*imputer]:
for (
item
) in imputers_hyperparameters: # search the imputer' hyperparameters
if item["imputer"] == _imputer:
_imputer_hyperparameter.append(item)
_imputer_hyperparameter = hp.choice(
"regression_imputers", _imputer_hyperparameter
)
# balancing space
_balancing_hyperparameter = []
for _balancing in [*balancing]:
for (
item
) in balancings_hyperparameters: # search the balancings' hyperparameters
if item["balancing"] == _balancing:
_balancing_hyperparameter.append(item)
_balancing_hyperparameter = hp.choice(
"regression_balancing", _balancing_hyperparameter
)
# scaling space
_scaling_hyperparameter = []
for _scaling in [*scaling]:
for (
item
) in scalings_hyperparameters: # search the scalings' hyperparameters
if item["scaling"] == _scaling:
_scaling_hyperparameter.append(item)
_scaling_hyperparameter = hp.choice(
"regression_scaling", _scaling_hyperparameter
)
# feature selection space
_feature_selection_hyperparameter = []
for _feature_selection in [*feature_selection]:
for (
item
) in (
feature_selection_hyperparameters
): # search the feature selections' hyperparameters
if item["feature_selection"] == _feature_selection:
_feature_selection_hyperparameter.append(item)
_feature_selection_hyperparameter = hp.choice(
"regression_feature_selection", _feature_selection_hyperparameter
)
# model selection and hyperparameter optimization space
_model_hyperparameter = []
for _model in [*models]:
# checked before at models that all models are in default space
for item in models_hyperparameters: # search the models' hyperparameters
if item["model"] == _model:
_model_hyperparameter.append(item)
_model_hyperparameter = hp.choice("regression_models", _model_hyperparameter)
# the pipeline search space
return pyll.as_apply(
{
"encoder": _encoding_hyperparameter,
"imputer": _imputer_hyperparameter,
"balancing": _balancing_hyperparameter,
"scaling": _scaling_hyperparameter,
"feature_selection": _feature_selection_hyperparameter,
"regression": _model_hyperparameter,
}
)
# initialize and get hyperparameter search space
def get_hyperparameter_space(self, X, y=None):
# initialize default search options
# use copy to allows multiple manipulation
# all encoders available
self._all_encoders = encoders.copy()
# all hyperparameters for encoders
self._all_encoders_hyperparameters = encoder_hyperparameter.copy()
# all imputers available
self._all_imputers = imputers.copy()
# all hyperparemeters for imputers
self._all_imputers_hyperparameters = imputer_hyperparameter.copy()
# all scalings available
self._all_scalings = scalings.copy()
# all balancings available
self._all_balancings = balancings.copy()
# all hyperparameters for balancing methods
self._all_balancings_hyperparameters = balancing_hyperparameter.copy()
# all hyperparameters for scalings
self._all_scalings_hyperparameters = scaling_hyperparameter.copy()
# all feature selections available
self._all_feature_selection = feature_selections.copy()
# special treatment, remove some feature selection for classification
del self._all_feature_selection["extra_trees_preproc_for_classification"]
del self._all_feature_selection["select_percentile_classification"]
del self._all_feature_selection["select_rates_classification"]
# remove SVM feature selection since it's time-consuming for large datasets
if X.shape[0] * X.shape[1] > 10000:
del self._all_feature_selection["liblinear_svc_preprocessor"]
# all hyperparameters for feature selections
self._all_feature_selection_hyperparameters = (
feature_selection_hyperparameter.copy()
)
# all regression models available
self._all_models = regressors.copy()
# special treatment, remove SVM methods when observations are large
# SVM suffers from the complexity o(n_samples^2 * n_features),
# which is time-consuming for large datasets
if X.shape[0] * X.shape[1] > 10000:
del self._all_models["LibLinear_SVR"]
del self._all_models["LibSVM_SVR"]
# all hyperparameters for the regression models
self._all_models_hyperparameters = regressor_hyperparameter.copy()
self.hyperparameter_space = None
# Encoding
# convert string types to numerical type
# get encoder space
if self.encoder == "auto":
encoder = self._all_encoders.copy()
else:
encoder = {} # if specified, check if encoders in default encoders
for _encoder in self.encoder:
if _encoder not in [*self._all_encoders]:
raise ValueError(
"Only supported encoders are {}, get {}.".format(
[*self._all_encoders], _encoder
)
)
encoder[_encoder] = self._all_encoders[_encoder]
# Imputer
# fill missing values
# get imputer space
if self.imputer == "auto":
if not X.isnull().values.any(): # if no missing values
imputer = {"no_processing": no_processing}
self._all_imputers = imputer # limit default imputer space
else:
imputer = self._all_imputers.copy()
else:
if not X.isnull().values.any(): # if no missing values
imputer = {"no_processing": no_processing}
self._all_imputers = imputer
else:
imputer = {} # if specified, check if imputers in default imputers
for _imputer in self.imputer:
if _imputer not in [*self._all_imputers]:
raise ValueError(
"Only supported imputers are {}, get {}.".format(
[*self._all_imputers], _imputer
)
)
imputer[_imputer] = self._all_imputers[_imputer]
# Balancing
# deal with imbalanced dataset, using over-/under-sampling methods
# get balancing space
if self.balancing == "auto":
balancing = self._all_balancings.copy()
else:
balancing = {} # if specified, check if balancings in default balancings
for _balancing in self.balancing:
if _balancing not in [*self._all_balancings]:
raise ValueError(
"Only supported balancings are {}, get {}.".format(
[*self._all_balancings], _balancing
)
)
balancing[_balancing] = self._all_balancings[_balancing]
# Scaling
# get scaling space
if self.scaling == "auto":
scaling = self._all_scalings.copy()
else:
scaling = {} # if specified, check if scalings in default scalings
for _scaling in self.scaling:
if _scaling not in [*self._all_scalings]:
raise ValueError(
"Only supported scalings are {}, get {}.".format(
[*self._all_scalings], _scaling
)
)
scaling[_scaling] = self._all_scalings[_scaling]
# Feature selection
# Remove redundant features, reduce dimensionality
# get feature selection space
if self.feature_selection == "auto":
feature_selection = self._all_feature_selection.copy()
else:
feature_selection = (
{}
) # if specified, check if balancings in default balancings
for _feature_selection in self.feature_selection:
if _feature_selection not in [*self._all_feature_selection]:
raise ValueError(
"Only supported feature selections are {}, get {}.".format(
[*self._all_feature_selection], _feature_selection
)
)
feature_selection[_feature_selection] = self._all_feature_selection[
_feature_selection
]
# Model selection/Hyperparameter optimization
# using Bayesian Optimization
# model space, only select chosen models to space
if self.models == "auto": # if auto, model pool will be all default models
models = self._all_models.copy()
else:
models = {} # if specified, check if models in default models
for _model in self.models:
if _model not in [*self._all_models]:
raise ValueError(
"Only supported models are {}, get {}.".format(
[*self._all_models], _model
)
)
models[_model] = self._all_models[_model]
# initialize the hyperparameter space
_all_encoders_hyperparameters = self._all_encoders_hyperparameters.copy()
_all_imputers_hyperparameters = self._all_imputers_hyperparameters.copy()
_all_balancings_hyperparameters = self._all_balancings_hyperparameters.copy()
_all_scalings_hyperparameters = self._all_scalings_hyperparameters.copy()
_all_feature_selection_hyperparameters = (
self._all_feature_selection_hyperparameters.copy()
)
_all_models_hyperparameters = self._all_models_hyperparameters.copy()
# generate the hyperparameter space
if self.hyperparameter_space is None:
self.hyperparameter_space = self._get_hyperparameter_space(
X,
_all_encoders_hyperparameters,
encoder,
_all_imputers_hyperparameters,
imputer,
_all_balancings_hyperparameters,
balancing,
_all_scalings_hyperparameters,
scaling,
_all_feature_selection_hyperparameters,
feature_selection,
_all_models_hyperparameters,
models,
) # _X to choose whether include imputer
# others are the combinations of default hyperparameter space & methods selected
return encoder, imputer, balancing, scaling, feature_selection, models
# select optimal settings and fit on optimal hyperparameters
def _fit_optimal(self, best_results, _X, _y):
# mapping the optimal model and hyperparameters selected
# fit the optimal setting
optimal_point = space_eval(self.hyperparameter_space, best_results)
# optimal encoder
self.optimal_encoder_hyperparameters = optimal_point["encoder"]
self.optimal_encoder = self.optimal_encoder_hyperparameters["encoder"]
del self.optimal_encoder_hyperparameters["encoder"]
# optimal imputer
self.optimal_imputer_hyperparameters = optimal_point["imputer"]
self.optimal_imputer = self.optimal_imputer_hyperparameters["imputer"]
del self.optimal_imputer_hyperparameters["imputer"]
# optimal balancing
self.optimal_balancing_hyperparameters = optimal_point["balancing"]
self.optimal_balancing = self.optimal_balancing_hyperparameters["balancing"]
del self.optimal_balancing_hyperparameters["balancing"]
# optimal scaling
self.optimal_scaling_hyperparameters = optimal_point["scaling"]
self.optimal_scaling = self.optimal_scaling_hyperparameters["scaling"]
del self.optimal_scaling_hyperparameters["scaling"]
# optimal feature selection
self.optimal_feature_selection_hyperparameters = optimal_point[
"feature_selection"
]
self.optimal_feature_selection = self.optimal_feature_selection_hyperparameters[
"feature_selection"
]
del self.optimal_feature_selection_hyperparameters["feature_selection"]
# optimal regressor
self.optimal_regressor_hyperparameters = optimal_point[
"regression"
] # optimal model selected
self.optimal_regressor = self.optimal_regressor_hyperparameters[
"model"
] # optimal hyperparameter settings selected
del self.optimal_regressor_hyperparameters["model"]
# record optimal settings
with open(self.temp_directory + "/optimal_setting.txt", "w") as f:
f.write("Optimal encoding method is: {}\n".format(self.optimal_encoder))
f.write("Optimal encoding hyperparameters:")
print(self.optimal_encoder_hyperparameters, file=f, end="\n\n")
f.write("Optimal imputation method is: {}\n".format(self.optimal_imputer))
f.write("Optimal imputation hyperparameters:")
print(self.optimal_imputer_hyperparameters, file=f, end="\n\n")
f.write("Optimal balancing method is: {}\n".format(self.optimal_balancing))
f.write("Optimal balancing hyperparamters:")
print(self.optimal_balancing_hyperparameters, file=f, end="\n\n")
f.write("Optimal scaling method is: {}\n".format(self.optimal_scaling))
f.write("Optimal scaling hyperparameters:")
print(self.optimal_scaling_hyperparameters, file=f, end="\n\n")
f.write(
"Optimal feature selection method is: {}\n".format(
self.optimal_feature_selection
)
)
f.write("Optimal feature selection hyperparameters:")
print(self.optimal_feature_selection_hyperparameters, file=f, end="\n\n")
f.write("Optimal regression model is: {}\n".format(self.optimal_regressor))
f.write("Optimal regression hyperparameters:")
print(self.optimal_regressor_hyperparameters, file=f, end="\n\n")
# encoding
self._fit_encoder = self._all_encoders[self.optimal_encoder](
**self.optimal_encoder_hyperparameters
)
_X = self._fit_encoder.fit(_X)
# imputer
self._fit_imputer = self._all_imputers[self.optimal_imputer](
**self.optimal_imputer_hyperparameters
)
_X = self._fit_imputer.fill(_X)
# balancing
self._fit_balancing = self._all_balancings[self.optimal_balancing](
**self.optimal_balancing_hyperparameters
)
_X, _y = self._fit_balancing.fit_transform(_X, _y)
# make sure the classes are integers (belongs to certain classes)
_y = _y.astype(int)
_y = _y.astype(int)
# scaling
self._fit_scaling = self._all_scalings[self.optimal_scaling](
**self.optimal_scaling_hyperparameters
)
self._fit_scaling.fit(_X, _y)
_X = self._fit_scaling.transform(_X)
# feature selection
self._fit_feature_selection = self._all_feature_selection[
self.optimal_feature_selection
](**self.optimal_feature_selection_hyperparameters)
self._fit_feature_selection.fit(_X, _y)
_X = self._fit_feature_selection.transform(_X)
# regression
self._fit_regressor = self._all_models[self.optimal_regressor](
**self.optimal_regressor_hyperparameters
)
self._fit_regressor.fit(_X, _y.values.ravel())
# save the model
if self.save:
save_model(
self.optimal_encoder,
self.optimal_encoder_hyperparameters,
self.optimal_imputer,
self.optimal_imputer_hyperparameters,
self.optimal_balancing,
self.optimal_balancing_hyperparameters,
self.optimal_scaling,
self.optimal_scaling_hyperparameters,
self.optimal_feature_selection,
self.optimal_feature_selection_hyperparameters,
self.optimal_regressor,
self.optimal_regressor_hyperparameters,
self.model_name,
)
return self
def load_model(self, _X, _y):
with open(self.model_name) as f:
optimal_setting = f.readlines()
# remove change line signs
optimal_setting = [item.replace("\n", "") for item in optimal_setting]
# remove blank spaces
while "" in optimal_setting:
optimal_setting.remove("")
self.optimal_encoder = optimal_setting[0]
self.optimal_encoder_hyperparameters = ast.literal_eval(optimal_setting[1])
self.optimal_imputer = optimal_setting[2]
self.optimal_imputer_hyperparameters = ast.literal_eval(optimal_setting[3])
self.optimal_balancing = optimal_setting[4]
self.optimal_balancing_hyperparameters = ast.literal_eval(optimal_setting[5])
self.optimal_scaling = optimal_setting[6]
self.optimal_scaling_hyperparameters = ast.literal_eval(optimal_setting[7])
self.optimal_feature_selection = optimal_setting[8]
self.optimal_feature_selection_hyperparameters = ast.literal_eval(
optimal_setting[9]
)
self.optimal_regressor = optimal_setting[10]
self.optimal_regressor_hyperparameters = ast.literal_eval(optimal_setting[11])
# encoding
self._fit_encoder = self._all_encoders[self.optimal_encoder](
**self.optimal_encoder_hyperparameters
)
_X = self._fit_encoder.fit(_X)
# imputer
self._fit_imputer = self._all_imputers[self.optimal_imputer](
**self.optimal_imputer_hyperparameters
)
_X = self._fit_imputer.fill(_X)
# balancing
self._fit_balancing = self._all_balancings[self.optimal_balancing](
**self.optimal_balancing_hyperparameters
)
_X, _y = self._fit_balancing.fit_transform(_X, _y)
# make sure the classes are integers (belongs to certain classes)
_y = _y.astype(int)
_y = _y.astype(int)
# scaling
self._fit_scaling = self._all_scalings[self.optimal_scaling](
**self.optimal_scaling_hyperparameters
)
self._fit_scaling.fit(_X, _y)
_X = self._fit_scaling.transform(_X)
# feature selection
self._fit_feature_selection = self._all_feature_selection[
self.optimal_feature_selection
](**self.optimal_feature_selection_hyperparameters)
self._fit_feature_selection.fit(_X, _y)
_X = self._fit_feature_selection.transform(_X)
# classification
self._fit_regressor = self._all_models[self.optimal_regressor](
**self.optimal_regressor_hyperparameters
)
self._fit_regressor.fit(_X, _y.values.ravel())
return self
def fit(self, X, y):
if self.ignore_warning: # ignore all warnings to generate clearer outputs
warnings.filterwarnings("ignore")
_X = X.copy()
_y = y.copy()
(
encoder,
imputer,
balancing,
scaling,
feature_selection,
models,
) = self.get_hyperparameter_space(_X, _y)
if os.path.exists(self.model_name):
print("Stored model found, load previous model.")
self.load_model(_X, _y)
return self
# initialize temp directory
# check if temp directory exists, if exists, empty it
if os.path.isdir(self.temp_directory):
shutil.rmtree(self.temp_directory)
os.makedirs(self.temp_directory)
# write basic information to init.txt
with open(self.temp_directory + "/init.txt", "w") as f:
f.write("Features of the dataset: {}\n".format(list(_X.columns)))
f.write(
"Shape of the design matrix: {} * {}\n".format(_X.shape[0], _X.shape[1])
)
f.write("Response of the dataset: {}\n".format(list(_y.columns)))
f.write(
"Shape of the response vector: {} * {}\n".format(
_y.shape[0], _y.shape[1]
)
)
f.write("Type of the task: Regression.\n")
if self.validation: # only perform train_test_split when validation
# train test split so the performance of model selection and
# hyperparameter optimization can be evaluated
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
_X, _y, test_size=self.valid_size, random_state=self.seed
)
# the objective function of Bayesian Optimization tries to minimize
# use accuracy score
@ignore_warnings(category=ConvergenceWarning)
def _objective(params):
# evaluation for predictions
if self.objective == "MSE":
from sklearn.metrics import mean_squared_error
_obj = mean_squared_error
elif self.objective == "MAE":
from sklearn.metrics import mean_absolute_error
_obj = mean_absolute_error
elif self.objective == "MSLE":
from sklearn.metrics import mean_squared_log_error
_obj = mean_squared_log_error
elif self.objective == "R2":
from sklearn.metrics import r2_score
_obj = r2_score
elif self.objective == "MAX":
from sklearn.metrics import (
max_error,
) # focus on reducing extreme losses
_obj = max_error
else:
raise ValueError(
'Only support ["MSE", "MAE", "MSLE", "R2", "MAX"], get{}'.format(
self.objective
)
)
# pipeline of objective, [encoder, imputer, balancing, scaling, feature_selection, model]
# select encoder and set hyperparameters
# must have encoder
_encoder_hyper = params["encoder"]
_encoder = _encoder_hyper["encoder"]
del _encoder_hyper["encoder"]
enc = encoder[_encoder](**_encoder_hyper)
# select imputer and set hyperparameters
_imputer_hyper = params["imputer"]
_imputer = _imputer_hyper["imputer"]
del _imputer_hyper["imputer"]
imp = imputer[_imputer](**_imputer_hyper)
# select balancing and set hyperparameters
# must have balancing, since no_preprocessing is included
_balancing_hyper = params["balancing"]
_balancing = _balancing_hyper["balancing"]
del _balancing_hyper["balancing"]
blc = balancing[_balancing](**_balancing_hyper)
# select scaling and set hyperparameters
# must have scaling, since no_preprocessing is included
_scaling_hyper = params["scaling"]
_scaling = _scaling_hyper["scaling"]
del _scaling_hyper["scaling"]
scl = scaling[_scaling](**_scaling_hyper)
# select feature selection and set hyperparameters
# must have feature selection, since no_preprocessing is included
_feature_selection_hyper = params["feature_selection"]
_feature_selection = _feature_selection_hyper["feature_selection"]
del _feature_selection_hyper["feature_selection"]
fts = feature_selection[_feature_selection](**_feature_selection_hyper)
# select regressor model and set hyperparameters
# must have a regressor
_regressor_hyper = params["regression"]
_regressor = _regressor_hyper["model"]
del _regressor_hyper["model"]
reg = models[_regressor](
**_regressor_hyper
) # call the model using passed parameters
obj_tmp_directory = self.temp_directory + "/iter_" + str(self._iter + 1)
if not os.path.isdir(obj_tmp_directory):
os.makedirs(obj_tmp_directory)
with open(obj_tmp_directory + "/hyperparameter_settings.txt", "w") as f:
f.write("Encoding method: {}\n".format(_encoder))
f.write("Encoding Hyperparameters:")
print(_encoder_hyper, file=f, end="\n\n")
f.write("Imputation method: {}\n".format(_imputer))
f.write("Imputation Hyperparameters:")
print(_imputer_hyper, file=f, end="\n\n")
f.write("Balancing method: {}\n".format(_balancing))
f.write("Balancing Hyperparameters:")
print(_balancing_hyper, file=f, end="\n\n")
f.write("Scaling method: {}\n".format(_scaling))
f.write("Scaling Hyperparameters:")
print(_scaling_hyper, file=f, end="\n\n")
f.write("Feature Selection method: {}\n".format(_feature_selection))
f.write("Feature Selection Hyperparameters:")
print(_feature_selection_hyper, file=f, end="\n\n")
f.write("Regression model: {}\n".format(_regressor))
f.write("Regression Hyperparameters:")
print(_regressor_hyper, file=f, end="\n\n")
if self.validation:
_X_train_obj, _X_test_obj = X_train.copy(), X_test.copy()
_y_train_obj, _y_test_obj = y_train.copy(), y_test.copy()
# encoding
_X_train_obj = enc.fit(_X_train_obj)
_X_test_obj = enc.refit(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_train_obj = imp.fill(_X_train_obj)
_X_test_obj = imp.fill(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_train_obj, _y_train_obj = blc.fit_transform(
_X_train_obj, _y_train_obj
)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Balancing finished, in scaling process.")
# make sure the classes are integers (belongs to certain classes)
_y_train_obj = _y_train_obj.astype(int)
_y_test_obj = _y_test_obj.astype(int)
# scaling
scl.fit(_X_train_obj, _y_train_obj)
_X_train_obj = scl.transform(_X_train_obj)
_X_test_obj = scl.transform(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Scaling finished, in feature selection process.")
# feature selection
fts.fit(_X_train_obj, _y_train_obj)
_X_train_obj = fts.transform(_X_train_obj)
_X_test_obj = fts.transform(_X_test_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Feature selection finished, in regression model.")
if scipy.sparse.issparse(
_X_train_obj
): # check if returns sparse matrix
_X_train_obj = _X_train_obj.toarray()
if scipy.sparse.issparse(_X_test_obj):
_X_test_obj = _X_test_obj.toarray()
# regression
# store the preprocessed train/test datasets
if isinstance(
_X_train_obj, np.ndarray
): # in case numpy array is returned
pd.concat(
[pd.DataFrame(_X_train_obj), _y_train_obj],
axis=1,
ignore_index=True,
).to_csv(obj_tmp_directory + "/train_preprocessed.csv", index=False)
elif isinstance(_X_train_obj, pd.DataFrame):
pd.concat([_X_train_obj, _y_train_obj], axis=1).to_csv(
obj_tmp_directory + "/train_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
if isinstance(_X_test_obj, np.ndarray):
pd.concat(
[pd.DataFrame(_X_test_obj), _y_test_obj],
axis=1,
ignore_index=True,
).to_csv(obj_tmp_directory + "/test_preprocessed.csv", index=False)
elif isinstance(_X_test_obj, pd.DataFrame):
pd.concat([_X_test_obj, _y_test_obj], axis=1).to_csv(
obj_tmp_directory + "/test_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
reg.fit(_X_train_obj, _y_train_obj.values.ravel())
os.remove(obj_tmp_directory + "/objective_process.txt")
y_pred = reg.predict(_X_test_obj)
if self.objective == "R2": # special treatment for r2_score
_loss = -_obj(y_pred, _y_test_obj.values)
else:
_loss = _obj(y_pred, _y_test_obj.values)
with open(obj_tmp_directory + "/testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {:.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
# since fmin of Hyperopt tries to minimize the objective function, take negative accuracy here
return {"loss": _loss, "status": STATUS_OK}
else:
_X_obj = _X.copy()
_y_obj = _y.copy()
# encoding
_X_obj = enc.fit(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_obj = imp.fill(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_obj = blc.fit_transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Balancing finished, in feature selection process.")
# scaling
scl.fit(_X_obj, _y_obj)
_X_obj = scl.transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Scaling finished, in balancing process.")
# feature selection
fts.fit(_X_obj, _y_obj)
_X_obj = fts.transform(_X_obj)
with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
f.write("Feature selection finished, in regression model.")
# regression
reg.fit(_X_obj.values, _y_obj.values.ravel())
pd.concat([_X_obj, _y_obj], axis=1).to_csv(
obj_tmp_directory + "/data_preprocessed.csv", index=False
)
os.remove(obj_tmp_directory + "/objective_process.txt")
y_pred = reg.predict(_X_obj.values)
if self.objective == "R2": # special treatment for r2_score
_loss = -_obj(y_pred, _y_obj.values)
else:
_loss = _obj(y_pred, _y_obj.values)
with open(obj_tmp_directory + "/testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
return {"loss": _loss, "status": STATUS_OK}
# call hyperopt to use Bayesian Optimization for Model Selection and Hyperparameter Selection
# search algorithm
if self.algo == "rand":
algo = rand.suggest
elif self.algo == "tpe":
algo = tpe.suggest
elif self.algo == "atpe":
algo = atpe.suggest
# Storage for evaluation points
if self.spark_trials:
trials = SparkTrials(parallelism=8)
else:
trials = Trials()
# run fmin to search for optimal hyperparameter settings
with mlflow.start_run():
best_results = fmin(
fn=_objective,
space=self.hyperparameter_space,
algo=algo,
max_evals=self.max_evals,
timeout=self.timeout,
trials=trials,
show_progressbar=self.progressbar,
rstate=np.random.RandomState(seed=self.seed),
)
# select optimal settings and fit optimal pipeline
self._fit_optimal(best_results, _X, _y)
# whether to retain temp files
if self.delete_temp_after_terminate:
shutil.rmtree(self.temp_directory)
return self
def predict(self, X):
_X = X.copy()
# may need preprocessing for test data, the preprocessing should be the same as in fit part
# Encoding
# convert string types to numerical type
_X = self._fit_encoder.refit(_X)
# Imputer
# fill missing values
_X = self._fit_imputer.fill(_X)
# Balancing
# deal with imbalanced dataset, using over-/under-sampling methods
# No need to balance on test data
# Scaling
_X = self._fit_scaling.transform(_X)
# Feature selection
# Remove redundant features, reduce dimensionality
_X = self._fit_feature_selection.transform(_X)
return self._fit_regressor.predict(_X)
class AutoTabular(AutoTabularClassifier, AutoTabularRegressor):
"""
Automatically assign to AutoTabularClassifier or AutoTabularRegressor
"""
def __init__(
self,
timeout=360,
max_evals=64,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective=None,
method="Bayesian",
algo="tpe",
spark_trials=False,
progressbar=True,
seed=1,
):
self.timeout = timeout
self.max_evals = max_evals
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.method = method
self.algo = algo
self.spark_trials = spark_trials
self.progressbar = progressbar
self.seed = seed
def fit(self, X, y=None):
if isinstance(y, pd.DataFrame) or isinstance(y, np.ndarray):
self._type = type_of_task(y)
elif y == None:
self._type = "Unsupervised"
if self._type in ["binary", "multiclass"]: # assign classification tasks
self.model = AutoTabularClassifier(
timeout=self.timeout,
max_evals=self.max_evals,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="accuracy" if not self.objective else self.objective,
method=self.method,
algo=self.algo,
spark_trials=self.spark_trials,
progressbar=self.progressbar,
seed=self.seed,
)
elif self._type in ["integer", "continuous"]: # assign regression tasks
self.model = AutoTabularRegressor(
timeout=self.timeout,
max_evals=self.max_evals,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="MSE" if not self.objective else self.objective,
method=self.method,
algo=self.algo,
spark_trials=self.spark_trials,
progressbar=self.progressbar,
seed=self.seed,
)
else:
raise ValueError(
'Not recognizing type, only ["binary", "multiclass", "integer", "continuous"] accepted, get {}!'.format(
self._type
)
)
self.model.fit(X, y)
return self
def predict(self, X):
if self.model:
return self.model.predict(X)
else:
raise ValueError("No tasks found! Need to fit first.")
|
PanyiDong/AutoML | My_AutoML/__init__.py | """
File: __init__.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/__init__.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 29th April 2022 2:25:44 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.2.0"
from ._base import no_processing, load_data
from ._utils import (
# random_guess,
# random_index,
# random_list,
# is_date,
# feature_rounding,
train_test_split,
# minloc,
# maxloc,
# True_index,
# nan_cov,
# class_means,
# empirical_covariance,
# class_cov,
# Pearson_Corr,
# MI,
# t_score,
# ANOVA,
# as_dataframe,
type_of_task,
# formatting,
# Timer,
# unify_nan,
# remove_index_columns,
# get_missing_matrix,
)
# from ._encoding import DataEncoding
# from ._imputation import (
# SimpleImputer,
# DummyImputer,
# JointImputer,
# ExpectationMaximization,
# KNNImputer,
# MissForestImputer,
# MICE,
# GAIN,
# AAI_kNN,
# KMI,
# CMI,
# k_Prototype_NN,
# )
# from ._balancing import (
# SimpleRandomOverSampling,
# SimpleRandomUnderSampling,
# TomekLink,
# EditedNearestNeighbor,
# CondensedNearestNeighbor,
# OneSidedSelection,
# CNN_TomekLink,
# Smote,
# Smote_TomekLink,
# Smote_ENN,
# )
# from ._scaling import (
# MinMaxScale,
# Standardize,
# Normalize,
# RobustScale,
# PowerTransformer,
# QuantileTransformer,
# Winsorization,
# Feature_Manipulation,
# Feature_Truncation,
# )
# from ._feature_selection import (
# PCA_FeatureSelection,
# LDASelection,
# FeatureFilter,
# RBFSampler,
# ASFFS,
# GeneticAlgorithm,
# )
# extracted from autosklearn
# not all used in the pipeline
# from ._feature_selection import (
# Densifier,
# ExtraTreesPreprocessorClassification,
# ExtraTreesPreprocessorRegression,
# FastICA,
# FeatureAgglomeration,
# KernelPCA,
# RandomKitchenSinks,
# LibLinear_Preprocessor,
# Nystroem,
# PCA,
# PolynomialFeatures,
# RandomTreesEmbedding,
# SelectPercentileClassification,
# SelectPercentileRegression,
# SelectClassificationRates,
# SelectRegressionRates,
# TruncatedSVD,
# )
from ._hpo import (
AutoTabular,
AutoTabularClassifier,
AutoTabularRegressor,
AutoTextClassifier,
AutoNextWordPrediction,
)
# from ._model import classifiers, regressors
# base = {"load_data": load_data}
# encoders = {"DataEncoding": DataEncoding}
# model_selection = {
# "AutoTabular": AutoTabular,
# "AutoTabularClassifier": AutoTabularClassifier,
# "AutoTabularRegressor": AutoTabularRegressor,
# "AutoTextClassifier": AutoTextClassifier,
# "AutoNextWordPrediction": AutoNextWordPrediction,
# }
__all__ = [
"load_data", # _base
"no_processing",
"random_guess", # _utils
"random_index",
"random_list",
"is_date",
"feature_rounding",
"train_test_split",
"minloc",
"maxloc",
"True_index",
"nan_cov",
"class_means",
"empirical_covariance",
"class_cov",
"Pearson_Corr",
"MI",
"t_score",
"ANOVA",
"as_dataframe",
"type_of_task",
"formatting",
"Timer",
"unify_nan",
"remove_index_columns",
"get_missing_matrix",
"SimpleImputer", # _missing
"DummyImputer",
"JointImputer",
"ExpectationMaximization",
"KNNImputer",
"MissForestImputer",
"MICE",
"GAIN",
"AAI_kNN",
"KMI",
"CMI",
"k_Prototype_NN",
"DataEncoding", # _encoding
"MinMaxScale", # _scaling
"Standardize",
"Normalize",
"RobustScale",
"PowerTransformer",
"QuantileTransformer",
"Winsorization",
"Feature_Manipulation",
"Feature_Truncation",
"SimpleRandomOverSampling", # _imbalance
"SimpleRandomUnderSampling",
"TomekLink",
"EditedNearestNeighbor",
"CondensedNearestNeighbor",
"OneSidedSelection",
"CNN_TomekLink",
"Smote",
"Smote_TomekLink",
"Smote_ENN",
"LDASelection", # _feature_selection
"PCA_FeatureSelection",
"RBFSampler",
"FeatureFilter",
"ASFFS",
"GeneticAlgorithm",
"densifier", # from autosklearn
"extra_trees_preproc_for_classification",
"extra_trees_preproc_for_regression",
"fast_ica",
"feature_agglomeration",
"kernel_pca",
"kitchen_sinks",
"liblinear_svc_preprocessor",
"nystroem_sampler",
"pca",
"polynomial",
"random_trees_embedding",
# 'select_percentile',
"select_percentile_classification",
"select_percentile_regression",
"select_rates_classification",
"select_rates_regression",
"truncatedSVD",
"classifiers", # _model
"regressors",
"AutoTabular", # _model_selection
"AutoTabularClassifier",
"AutoTabularRegressor",
"AutoTextClassifier",
"AutoNextWordPrediction",
]
|
PanyiDong/AutoML | My_AutoML/_balancing/_under_sampling.py | """
File: _under_sampling.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_balancing/_under_sampling.py
File Created: Wednesday, 6th April 2022 12:21:04 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 15th April 2022 11:10:10 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import warnings
import sklearn
import sklearn.utils
from My_AutoML._utils._data import is_imbalance, LinkTable
"""
Reference for: Simple Random Over Sampling, Simple Random Under Sampling, Tomek Link, \
Edited Nearest Neighbor, Condensed Nearest Neighbor, One Sided Selection, CNN_TomekLink, \
Smote, Smote_TomekLink, Smote_ENN
<NAME>., <NAME>. and <NAME>., 2004. A study of the behavior of several methods for
balancing machine learning training data. ACM SIGKDD explorations newsletter, 6(1), pp.20-29.
"""
class SimpleRandomUnderSampling:
"""
Simple Random Under-Sampling
Randomly eliminate samples from majority class
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
"""
def __init__(
self,
imbalance_threshold=0.9,
all=False,
max_iter=1000,
seed=1,
):
self.imbalance_threshold = imbalance_threshold
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
def _fit_transform(
self, X
): # using random over-sampling to balance the first imbalanced feature
features = list(X.columns)
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_majority_class = X.loc[X[_imbalanced_feature] == _majority]
sample = _majority_class.sample(n=1, random_state=_seed)
X = X.drop(sample.index)
_seed += 1
_iter += 1
X = sklearn.utils.shuffle(X.reset_index(drop=True)).reset_index(drop=True)
return X
class TomekLink:
"""
Use Tomek Links to remove noisy or border significant majority class sample
Tomek links define as nearest neighbors with different classification
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
norm: how the distance between different samples calculated, default = 'l2'
all supported norm ['l1', 'l2']
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
"""
def __init__(
self, imbalance_threshold=0.9, norm="l2", all=False, max_iter=1000, seed=1
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
def _fit_transform(self, X):
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_minority_class = X.loc[X[_imbalanced_feature] != _majority]
_minority_sample = _minority_class.sample(
n=max(int(len(_minority_class) / 100), 1), random_state=_seed
)
_link_table = LinkTable(_minority_sample, X, self.norm)
drop_index = []
for _link_item in _link_table:
_nearest = _link_item.index(
sorted(_link_item)[1]
) # since the closest will always be the sample itself
# if nearest is the majority class, add to drop_index
if X.loc[_nearest, _imbalanced_feature] == _majority:
drop_index.append(_nearest)
drop_index = list(set(drop_index)) # get unique drop indexes
X = X.drop(index=drop_index, axis=0).reset_index(drop=True)
_seed += 1
_iter += 1
return X
class EditedNearestNeighbor:
"""
Edited Nearest Neighbor (ENN)
Under-sampling method, drop samples where majority of k nearest neighbors belong to different class
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
norm: how the distance between different samples calculated, default = 'l2'
all supported norm ['l1', 'l2']
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
k: nearest neighbors to find, default = 3
"""
def __init__(
self,
imbalance_threshold=0.9,
norm="l2",
all=False,
max_iter=1000,
seed=1,
k=3,
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self.k = k
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if (self.k % 2) == 0:
warnings.warn(
"Criteria of majority better select odd k nearest neighbors, get {}.".format(
self.k
)
)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
def _fit_transform(self, X):
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_majority_class = X.loc[X[_imbalanced_feature] == _majority]
_majority_index = _majority_class.index
_sample = X.sample(n=1, random_state=_seed)
_sample_type = (
"majority" if (_sample.index[0] in _majority_index) else "minority"
)
_link_table = LinkTable(_sample, X, self.norm)
for _link_item in _link_table:
_k_nearest = [
_link_item.index(item)
for item in sorted(_link_item)[1 : (self.k + 1)]
]
count = 0
for _index in _k_nearest:
_class = "majority" if (_index in _majority_index) else "minority"
if _class == _sample_type:
count += 1
if count < (self.k + 1) / 2:
# if sample belongs to majority, remove the sample; else, remove the nearest neighbor
if _sample_type == "majority":
X = X.drop(_sample.index).reset_index(drop=True)
else:
X = X.drop(_link_item.index(sorted(_link_item)[1])).reset_index(
drop=True
)
_seed += 1
_iter += 1
if len(_majority_class) == len(X):
warnings.warn("No minority class left!")
break
if len(X) < 1:
warnings.warn("No sample left!")
return X
class CondensedNearestNeighbor:
"""
Condensed Nearest Neighbor Rule (CNN)
get subset of E that can predict the same as E using 1-NN
algorithm: build the subset with all minority class and one of random majority class,
build a 1-NN model and predict on all samples, put all misclassified data to the subset
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
"""
def __init__(self, imbalance_threshold=0.9, all=False, max_iter=1000, seed=1):
self.imbalance_threshold = imbalance_threshold
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
def _fit_transform(self, X):
from sklearn.neighbors import KNeighborsClassifier
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_minority_class = X.loc[X[_imbalanced_feature] != _majority]
_majority_class = X.loc[X[_imbalanced_feature] == _majority]
_subset = pd.concat(
[_minority_class, _majority_class.sample(n=1, random_state=_seed)]
).reset_index(drop=True)
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(
_subset.loc[:, _subset.columns != _imbalanced_feature],
_subset[_imbalanced_feature],
)
y_predict = neigh.predict(
X.loc[
~X.index.isin(list(_subset.index)), X.columns != _imbalanced_feature
]
)
y_true = X.loc[
~X.index.isin(list(_subset.index)), X.columns == _imbalanced_feature
].values.T[0]
_not_matching_index = np.where((np.array(y_predict) != np.array(y_true)))[0]
X = pd.concat([_subset, X.iloc[_not_matching_index, :]]).reset_index(
drop=True
)
_seed += 1
_iter += 1
return X
class OneSidedSelection(TomekLink, CondensedNearestNeighbor):
"""
One Sided Selection (OSS)
employs Tomek Link to remove noisy and border majority class samples, then use CNN to remove majority
samples that are distinct to decision boundary
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
norm: how the distance between different samples calculated, default = 'l2'
all supported norm ['l1', 'l2']
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
"""
def __init__(
self, imbalance_threshold=0.9, norm="l2", all=False, max_iter=1000, seed=1
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
super().__init__(
imbalance_threshold=(1.0 + self.imbalance_threshold) / 2,
norm=self.norm,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
)
_data = super().fit_transform(_data)
super(TomekLink, self).__init__(
imbalance_threshold=self.imbalance_threshold,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
)
_data = super(TomekLink, self).fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
class CNN_TomekLink(CondensedNearestNeighbor, TomekLink):
"""
CNN_Tomek Link
employs CNN first and Tomek Link to reduce the calculation for Tomek Link (large calculation for distance
between each sample points, especially for large sample size)
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
norm: how the distance between different samples calculated, default = 'l2'
all supported norm ['l1', 'l2']
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the majority class will increase the random seed by 1
"""
def __init__(
self, imbalance_threshold=0.9, norm="l2", all=False, max_iter=1000, seed=1
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
super().__init__(
imbalance_threshold=(1.0 + self.imbalance_threshold) / 2,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
)
_data = super().fit_transform(_data)
super(CondensedNearestNeighbor, self).__init__(
imbalance_threshold=self.imbalance_threshold,
norm=self.norm,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
)
_data = super(CondensedNearestNeighbor, self).fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
|
PanyiDong/AutoML | tests/test_model/test_model.py | """
File: test_model.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_model/test_model.py
File Created: Friday, 15th April 2022 11:13:40 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 8:23:29 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
from My_AutoML._utils import formatting
def test_classifiers():
from My_AutoML._model import classifiers
for method_name, method in classifiers.items():
# pass these methods as they are tested individually
if method_name not in [
"LightGBM_Classifier",
"XGBoost_Classifier",
"GAM_Classifier",
"MLP_Classifier",
"RNN_Classifier",
]:
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
mol = method()
mol.fit(X, y)
y_pred = mol.predict(X)
y_prob = mol.predict_proba(X)
assert mol._fitted == True, "Model {} has not been fitted.".format(
method_name
)
# test sklearn version if autosklearn is installed
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is not None:
from My_AutoML._model._sklearn import (
AdaboostClassifier,
BernoulliNB,
DecisionTreeClassifier,
ExtraTreesClassifier,
GaussianNB,
HistGradientBoostingClassifier,
KNearestNeighborsClassifier,
LDA,
LibLinear_SVC,
LibSVM_SVC,
MLPClassifier,
MultinomialNB,
PassiveAggressive,
QDA,
RandomForestClassifier,
SGDClassifier,
)
sklearn_classifiers = {
"AdaboostClassifier": AdaboostClassifier,
"BernoulliNB": BernoulliNB,
"DecisionTreeClassifier": DecisionTreeClassifier,
"ExtraTreesClassifier": ExtraTreesClassifier,
"GaussianNB": GaussianNB,
"HistGradientBoostingClassifier": HistGradientBoostingClassifier,
"KNearestNeighborsClassifier": KNearestNeighborsClassifier,
"LDA": LDA,
"LibLinear_SVC": LibLinear_SVC,
"LibSVM_SVC": LibSVM_SVC,
"MLPClassifier": MLPClassifier,
"MultinomialNB": MultinomialNB,
"PassiveAggressive": PassiveAggressive,
"QDA": QDA,
"RandomForestClassifier": RandomForestClassifier,
"SGDClassifier": SGDClassifier,
}
for method_name, method in sklearn_classifiers.items():
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
mol = method()
mol.fit(X, y)
y_pred = mol.predict(X)
y_prob = mol.predict_proba(X)
assert mol._fitted == True, "Model {} has not been fitted.".format(
method_name
)
def test_regressors():
from My_AutoML._model import regressors
for method_name, method in regressors.items():
# pass these methods as they are tested individually
if method_name not in [
"LightGBM_Regressor",
"XGBoost_Regressor",
"GAM_Regressor",
"MLP_Regressor",
"RNN_Regressor",
]:
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
mol = method()
mol.fit(X, y)
y_pred = mol.predict(X)
try:
y_prob = mol.predict_proba(X)
except NotImplementedError:
pass
assert mol._fitted == True, "Model {} has not been fitted.".format(
method_name
)
# test sklearn version if autosklearn is installed
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is not None:
from My_AutoML._model._sklearn import (
AdaboostRegressor,
ARDRegression,
DecisionTreeRegressor,
ExtraTreesRegressor,
GaussianProcess,
HistGradientBoostingRegressor,
KNearestNeighborsRegressor,
LibLinear_SVR,
LibSVM_SVR,
MLPRegressor,
RandomForestRegressor,
SGDRegressor,
)
sklearn_regressors = {
"AdaboostRegressor": AdaboostRegressor,
"ARDRegression": ARDRegression,
"DecisionTreeRegressor": DecisionTreeRegressor,
"ExtraTreesRegressor": ExtraTreesRegressor,
"GaussianProcess": GaussianProcess,
"HistGradientBoostingRegressor": HistGradientBoostingRegressor,
"KNearestNeighborsRegressor": KNearestNeighborsRegressor,
"LibLinear_SVR": LibLinear_SVR,
"LibSVM_SVR": LibSVM_SVR,
"MLPRegressor": MLPRegressor,
"RandomForestRegressor": RandomForestRegressor,
"SGDRegressor": SGDRegressor,
}
for method_name, method in sklearn_regressors.items():
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
mol = method()
mol.fit(X, y)
y_pred = mol.predict(X)
try:
y_prob = mol.predict_proba(X)
except NotImplementedError:
pass
assert mol._fitted == True, "Model {} has not been fitted.".format(
method_name
)
def test_add_classifier():
from My_AutoML._model._sklearn import ComplementNB
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
mol = ComplementNB()
mol.fit(X.abs(), y) # make sure no negative values
y_pred = mol.predict(X)
y_prob = mol.predict_proba(X)
assert mol._fitted == True, "Model ComplementNB has not been fitted."
def test_add_regressor():
# from My_AutoML._model._sklearn import HistGradientBoostingRegressor
import importlib
# if autosklearn in installed, use autosklearn version for testing
# else, use sklearn version for testing
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._model._sklearn import LibSVM_SVR, MLPRegressor, SGDRegressor
else:
from My_AutoML._model._autosklearn import LibSVM_SVR, MLPRegressor, SGDRegressor
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# model = HistGradientBoostingRegressor()
# model.fit(X, y)
# y_pred = model.predict(X)
# assert (
# model._fitted == True
# ), "Model HistGradientBoostingRegressor has not been fitted."
model = LibSVM_SVR()
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model LibSVM_SVR has not been fitted."
model = MLPRegressor()
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model MLPRegressor has not been fitted."
model = SGDRegressor()
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model SGDRegressor has not been fitted."
def test_lightgbm_classifier():
from My_AutoML._model import LightGBM_Classifier
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = LightGBM_Classifier(
objective="binary",
boosting="gbdt",
n_estimators=100,
max_depth=-1,
num_leaves=31,
min_data_in_leaf=20,
learning_rate=0.1,
tree_learner="serial",
num_iterations=100,
seed=1,
)
model.fit(X, y)
y_pred = model.predict(X)
y_prob = model.predict_proba(X)
assert model._fitted == True, "Model has not been fitted."
def test_lightgbm_regressor():
from My_AutoML._model import LightGBM_Regressor
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = LightGBM_Regressor(
objective="regression",
boosting="gbdt",
n_estimators=100,
max_depth=-1,
num_leaves=31,
min_data_in_leaf=20,
learning_rate=0.1,
tree_learner="serial",
num_iterations=100,
seed=1,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model has not been fitted."
def test_xgboost_classifier():
from My_AutoML._model import XGBoost_Classifier
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = XGBoost_Classifier(
eta=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
reg_lambda=1,
reg_alpha=0,
)
model.fit(X, y)
y_pred = model.predict(X)
y_prob = model.predict_proba(X)
assert model._fitted == True, "Model has not been fitted."
def test_xgboost_regressor():
from My_AutoML._model import XGBoost_Regressor
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = XGBoost_Regressor(
eta=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
reg_lambda=1,
reg_alpha=0,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model has not been fitted."
def test_gam_classifier():
from My_AutoML._model import GAM_Classifier
data = pd.read_csv("example/example_data/heart.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = GAM_Classifier(
type="logistic",
tol=1e-4,
)
model.fit(X, y)
y_pred = model.predict(X)
y_prob = model.predict_proba(X)
assert model._fitted == True, "Model has not been fitted."
def test_gam_regressor():
from My_AutoML._model import GAM_Regressor
data = pd.read_csv("example/example_data/insurance.csv")
# encoding categorical features
encoder = formatting()
encoder.fit(data)
# X/y split
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
model = GAM_Regressor(
type="linear",
tol=1e-4,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model GAM_Regressor Linear has not been fitted."
model = GAM_Regressor(
type="gamma",
tol=1e-4,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model GAM_Regressor Gamma has not been fitted."
model = GAM_Regressor(
type="poisson",
tol=1e-4,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert model._fitted == True, "Model GAM_Regressor Poisson has not been fitted."
model = GAM_Regressor(
type="inverse_gaussian",
tol=1e-4,
)
model.fit(X, y)
y_pred = model.predict(X)
try:
y_prob = model.predict_proba(X)
except NotImplementedError:
pass
assert (
model._fitted == True
), "Model GAM_Regressor Inverse Gaussian has not been fitted."
|
PanyiDong/AutoML | My_AutoML/_utils/_stat.py | """
File: _stat.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_stat.py
File Created: Wednesday, 6th April 2022 12:02:53 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 23rd April 2022 10:15:51 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
import numpy as np
import pandas as pd
import scipy.stats
import copy
# return non-nan covariance matrix between X and y, (return covariance of X if y = None)
# default calculate at columns (axis = 0), axis = 1 at rows
def nan_cov(X, y=None, axis=0):
if isinstance(y, pd.DataFrame):
_empty = y.isnull().all().all()
elif isinstance(y, pd.Series):
_empty = y.isnull().all()
elif isinstance(y, np.ndarray):
_empty = np.all(np.isnan(y))
else:
_empty = y == None
if _empty:
y = copy.deepcopy(X)
else:
y = y
if axis == 0:
if len(X) != len(y):
raise ValueError("X and y must have same length of rows!")
elif axis == 1:
if len(X[0]) != len(y[0]):
raise ValueError("X and y must have same length of columns!")
X = np.array(X)
y = np.array(y)
# reshape the X/y
try:
X.shape[1]
except IndexError:
X = X.reshape(len(X), 1)
try:
y.shape[1]
except IndexError:
y = y.reshape(len(y), 1)
_x_mean = np.nanmean(X, axis=axis)
_y_mean = np.nanmean(y, axis=axis)
_cov = np.array(
[[0.0 for _i in range(y.shape[1 - axis])] for _j in range(X.shape[1 - axis])]
) # initialize covariance matrix
for i in range(_cov.shape[0]):
for j in range(_cov.shape[1]):
if axis == 0:
_cov[i, j] = np.nansum(
(X[:, i] - _x_mean[i]) * (y[:, j] - _y_mean[j])
) / (len(X) - 1)
elif axis == 1:
_cov[i, j] = np.nansum(
(X[i, :] - _x_mean[i]) * (y[j, :] - _y_mean[j])
) / (len(X[0]) - 1)
return _cov
# return class (unique in y) mean of X
def class_means(X, y):
_class = np.unique(y)
result = []
for _cl in _class:
data = X.loc[y.values == _cl]
result.append(np.mean(data, axis=0).values)
return result
# return maximum likelihood estimate for covariance
def empirical_covariance(X, *, assume_centered=False):
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one data sample available!")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
# return weighted within-class covariance matrix
def class_cov(X, y, priors):
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if not isinstance(y, pd.Series):
y = pd.Series(y)
_class = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, _cl in enumerate(_class):
_data = X.loc[y.values == _cl, :]
cov += priors[idx] * empirical_covariance(_data)
return cov
# return Pearson Correlation Coefficients
def Pearson_Corr(X, y):
features = list(X.columns)
result = []
for _column in features:
result.append(
(nan_cov(X[_column], y) / np.sqrt(nan_cov(X[_column]) * nan_cov(y)))[0][0]
)
return result
# return Mutual Information
def MI(X, y):
if len(X) != len(y):
raise ValueError("X and y not same size!")
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if not isinstance(y, pd.DataFrame):
y = pd.DataFrame(y)
features = list(X.columns)
_y_column = list(y.columns)
result = []
_y_pro = y.groupby(_y_column[0]).size().div(len(X)).values
_H_y = -sum(item * np.log(item) for item in _y_pro)
for _column in features:
_X_y = pd.concat([X[_column], y], axis=1)
_pro = (
_X_y.groupby([_column, _y_column[0]]).size().div(len(X))
) # combine probability (x, y)
_pro_val = _pro.values # take only values
_X_pro = X[[_column]].groupby(_column).size().div(len(X)) # probability (x)
_H_y_X = -sum(
_pro_val[i]
* np.log(
_pro_val[i] / _X_pro.loc[_X_pro.index == _pro.index[i][0]].values[0]
)
for i in range(len(_pro))
)
result.append(_H_y - _H_y_X)
return result
# return t-statistics of dataset, only two groups dataset are suitable
def t_score(X, y, fvalue=True, pvalue=False):
if len(X) != len(y):
raise ValueError("X and y not same size!")
features = list(X.columns)
if len(y.shape) > 1:
y = y.iloc[:, 0] # only accept one column of response
_group = y.unique()
if len(_group) != 2:
raise ValueError(
"Only 2 group datasets are acceptable, get {}.".format(len(_group))
)
_f = []
_p = []
for _col in features:
t_test = scipy.stats.ttest_ind(
X.loc[y == _group[0], _col],
X.loc[y == _group[1], _col],
)
if fvalue:
_f.append(t_test[0])
if pvalue:
_p.append(t_test[1])
if fvalue and pvalue:
return _f, _p
elif fvalue:
return _f
elif pvalue:
return _p
# return ANOVA of dataset, more than two groups dataset are suitable
def ANOVA(X, y, fvalue=True, pvalue=False):
if len(X) != len(y):
raise ValueError("X and y not same size!")
features = list(X.columns)
if len(y.shape) > 1:
y = y.iloc[:, 0] # only accept one column of response
_group = y.unique()
_f = []
_p = []
for _col in features:
_group_value = []
for _g in _group:
_group_value.append(X.loc[y == _g, _col])
_test = scipy.stats.f_oneway(*_group_value)
if fvalue:
_f.append(_test[0])
if pvalue:
_p.append(_test[1])
if fvalue and pvalue:
return _f, _p
elif fvalue:
return _f
elif pvalue:
return _p
# convert metrics to minimize the error (as a loss function)
# add negative sign to make maximization to minimize
def neg_R2(y_true, y_pred):
from sklearn.metrics import r2_score
return -r2_score(y_true, y_pred)
def neg_accuracy(y_true, y_pred):
from sklearn.metrics import accuracy_score
return -accuracy_score(y_true, y_pred)
def neg_precision(y_true, y_pred):
from sklearn.metrics import precision_score
return -precision_score(y_true, y_pred)
def neg_auc(y_true, y_pred):
from sklearn.metrics import roc_auc_score
return -roc_auc_score(y_true, y_pred)
def neg_hinge(y_true, y_pred):
from sklearn.metrics import hinge_loss
return -hinge_loss(y_true, y_pred)
def neg_f1(y_true, y_pred):
from sklearn.metrics import f1_score
return -f1_score(y_true, y_pred)
|
PanyiDong/AutoML | My_AutoML/_feature_selection/_autosklearn.py | <reponame>PanyiDong/AutoML<filename>My_AutoML/_feature_selection/_autosklearn.py<gh_stars>0
"""
File: _autosklearn.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: \My_AutoML\_feature_selection\_autosklearn.py
File Created: Friday, 15th April 2022 2:55:10 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 11:45:12 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import autosklearn.pipeline.components.feature_preprocessing as askfs
########################################################################################
# wrap autosklearn feature selection methods
# can be initialized without specifying hyperparameters
class densifier(askfs.densifier.Densifier):
def __init__(self):
super().__init__()
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class extra_trees_preproc_for_classification(
askfs.extra_trees_preproc_for_classification.ExtraTreesPreprocessorClassification
):
def __init__(
self,
n_estimators=5,
criterion="entropy",
min_samples_leaf=5,
min_samples_split=5,
max_features=0.5,
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
min_impurity_decrease=0.0,
):
super().__init__(
n_estimators=n_estimators,
criterion=criterion,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
max_features=max_features,
bootstrap=bootstrap,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
min_weight_fraction_leaf=min_weight_fraction_leaf,
min_impurity_decrease=min_impurity_decrease,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class extra_trees_preproc_for_regression(
askfs.extra_trees_preproc_for_regression.ExtraTreesPreprocessorRegression
):
def __init__(
self,
n_estimators=5,
criterion="mse",
min_samples_leaf=5,
min_samples_split=5,
max_features=0.5,
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
):
super().__init__(
n_estimators=n_estimators,
criterion=criterion,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
max_features=max_features,
bootstrap=bootstrap,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
min_weight_fraction_leaf=min_weight_fraction_leaf,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class fast_ica(askfs.fast_ica.FastICA):
def __init__(
self,
algorithm="parallel",
whiten=False,
fun="logcosh",
n_components=5,
):
super().__init__(
algorithm=algorithm,
whiten=whiten,
fun=fun,
n_components=n_components,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class feature_agglomeration(askfs.feature_agglomeration.FeatureAgglomeration):
def __init__(
self,
n_clusters=5,
affinity="euclidean",
linkage="ward",
pooling_func="mean",
):
super().__init__(
n_clusters=n_clusters,
affinity=affinity,
linkage=linkage,
pooling_func=pooling_func,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class kernel_pca(askfs.kernel_pca.KernelPCA):
def __init__(
self,
n_components=5,
kernel="rbf",
gamma=0.1,
degree=3,
coef0=0.5,
):
super().__init__(
n_components=n_components,
kernel=kernel,
gamma=gamma,
degree=degree,
coef0=coef0,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class kitchen_sinks(askfs.kitchen_sinks.RandomKitchenSinks):
def __init__(
self,
gamma=0.1,
n_components=50,
):
super().__init__(
gamma=gamma,
n_components=n_components,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class liblinear_svc_preprocessor(
askfs.liblinear_svc_preprocessor.LibLinear_Preprocessor
):
def __init__(
self,
penalty="l1",
loss="squared_hinge",
dual=False,
tol=0.0001,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
):
super().__init__(
penalty=penalty,
loss=loss,
dual=dual,
tol=tol,
C=C,
multi_class=multi_class,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class nystroem_sampler(askfs.nystroem_sampler.Nystroem):
def __init__(
self,
kernel="rbf",
n_components=50,
gamma=0.1,
degree=3,
coef0=0.5,
):
super().__init__(
kernel=kernel,
n_components=n_components,
gamma=gamma,
degree=degree,
coef0=coef0,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class pca(askfs.pca.PCA):
def __init__(
self,
keep_variance=0.5,
whiten=True,
) -> None:
super().__init__(
keep_variance=keep_variance,
whiten=whiten,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class polynomial(askfs.polynomial.PolynomialFeatures):
def __init__(
self,
degree=3,
interaction_only=False,
include_bias=True,
):
super().__init__(
degree=degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class random_trees_embedding(askfs.random_trees_embedding.RandomTreesEmbedding):
def __init__(
self,
n_estimators=5,
max_depth=3,
min_samples_split=5,
min_samples_leaf=5,
min_weight_fraction_leaf=1.0,
max_leaf_nodes=None,
bootstrap=True,
):
super().__init__(
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_leaf_nodes=max_leaf_nodes,
bootstrap=bootstrap,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class select_percentile_classification(
askfs.select_percentile_classification.SelectPercentileClassification
):
def __init__(
self,
percentile=90,
score_func="chi2",
):
super().__init__(
percentile=percentile,
score_func=score_func,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class select_percentile_regression(
askfs.select_percentile_regression.SelectPercentileRegression
):
def __init__(
self,
percentile=90,
score_func="f_regression",
):
super().__init__(
percentile=percentile,
score_func=score_func,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class select_rates_classification(
askfs.select_rates_classification.SelectClassificationRates
):
def __init__(
self,
alpha=0.3,
score_func="chi2",
mode="fpr",
):
super().__init__(
alpha=alpha,
score_func="mutual_info_classif"
if score_func == "mutual_info"
else score_func,
# deal with sklearn/autosklearn incosistency
mode=mode,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class select_rates_regression(askfs.select_rates_regression.SelectRegressionRates):
def __init__(
self,
alpha=0.3,
score_func="f_regression",
mode="fpr",
):
super().__init__(
alpha=alpha,
score_func=score_func,
mode=mode,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
class truncatedSVD(askfs.truncatedSVD.TruncatedSVD):
def __init__(
self,
target_dim=5,
):
super().__init__(
target_dim=target_dim,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
return super().transform(X)
|
PanyiDong/AutoML | My_AutoML/_imputation/_nn.py | <gh_stars>1-10
"""
File: _nn.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_imputation/_nn.py
File Created: Tuesday, 5th April 2022 11:50:10 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Sunday, 24th April 2022 10:50:35 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from time import sleep
from tqdm import tqdm
import numpy as np
import pandas as pd
import warnings
# check if tensorflow exists
# if exists, import tensorflow
import importlib
tensorflow_spec = importlib.util.find_spec("tensorflow")
if tensorflow_spec is not None:
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
# tf.compat.v1.disable_v2_behavior() # use tf < 2.0 functions
torch_spec = importlib.util.find_spec("torch")
if torch_spec is not None:
import torch
import torch.optim
from torch import nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from My_AutoML._utils import (
random_index,
feature_rounding,
get_missing_matrix,
formatting,
)
from My_AutoML._scaling import MinMaxScale
class GAIN_tf(formatting, MinMaxScale):
"""
Generative Adversarial Imputation Nets tensorflow version (GAIN)
train Generator (G) and Discriminator (D) to impute missing values [1]
[1] <NAME>., <NAME>. and <NAME>., 2018, July. Gain: Missing data imputation using
generative adversarial nets. In International Conference on Machine Learning (pp. 5689-5698). PMLR.
github.com/jsyooon0823/GAIN
Parameters
----------
batch_size: mini-batch sample size, default = 128
hint_rate: hint rate, default = 0.9
alpha: penalty in optimizing Generator, default = 100
optim: not supported for tensorflow version
lr: not supported for tensorflow version
max_iter: maximum number of iterations, default = 100
delta: not supported for tensorflow version
scaling: whether scale the dataset before imputation, default = True
deep_copy: whether to deep copy dataframe, deafult = False
seed: random seed, default = 1
"""
def __init__(
self,
batch_size=128,
hint_rate=0.9,
alpha=100,
optim=None,
lr=None,
max_iter=100,
delta=1e-8,
scaling=True,
progressbar=False,
deep_copy=False,
seed=1,
):
self.batch_size = batch_size
self.hint_rate = hint_rate
self.alpha = alpha
self.max_iter = max_iter
self.delta = delta
self.scaling = scaling
self.progressbar = progressbar
self.deep_copy = deep_copy
self.seed = seed
def mask_matrix(self, X):
"""
mask matrix, m_{ij} = 1 where x_{ij} exists; m_{ij} = 0 otherwise
"""
return 1 - X.isnull().astype(int)
# initialize normal tensor by size
def normal_initial(self, size):
_dim = size[0]
return tf.random.normal(shape=size, stddev=1 / tf.sqrt(_dim / 2))
# return random binary array by size
def binary_sampler(self, p=0.5, size=(1, 1)):
# allows only change row size with (n, )
# cannot handle (, n)
try:
if size[0] == None:
size[0] == 1
elif size[1] == None:
size[1] == 1
except IndexError:
size = (size[0], 1)
_random_unit = np.random.uniform(low=0, high=1, size=size)
return 1 * (_random_unit < p)
# return random uniform array by size
def uniform_sampler(self, low=0, high=1, size=(1, 1)):
# allows only change row size with (n, )
# cannot handle (, n)
try:
if size[0] == None:
size[0] == 1
elif size[1] == None:
size[1] == 1
except IndexError:
size = (size[0], 1)
return np.random.uniform(low=low, high=high, size=size)
# Generator
def Generator(self, data, mask):
G_W1, G_W2, G_W3, G_b1, G_b2, G_b3 = self.theta_G
_input = tf.concat(values=[data, mask], axis=1) # concate data with mask
G_h1 = tf.nn.relu(tf.matmul(_input, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
G_pro = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3) # MinMax normalization
return G_pro
# Discriminator
def Discriminator(self, data, hint):
D_W1, D_W2, D_W3, D_b1, D_b2, D_b3 = self.theta_D
_input = tf.concat(values=[data, hint], axis=1) # concate data with mask
D_h1 = tf.nn.relu(tf.matmul(_input, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
D_pro = tf.nn.sigmoid(tf.matmul(D_h2, D_W3) + D_b3) # MinMax normalization
return D_pro
def fill(self, X):
_X = X.copy(deep=True)
if _X.isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No nan values found, no change.")
return _X
def _fill(self, data):
_data = data.copy(deep=self.deep_copy)
n, p = _data.shape
_h_dim = int(p) # Hidden state dimensions
_mask = self.mask_matrix(_data).values
# convert categorical to numerical
formatter = formatting(inplace=True)
formatter.fit(_X)
# if scaling, use MinMaxScale to scale the features
if self.scaling:
scaling = MinMaxScale()
_X = scaling.fit_transform(_X)
_data_scaled = _data_scaled.fillna(0)
# divide dataframe to np array for values and features names list
_features = list(_data_scaled.columns)
_data_scaled = _data_scaled.values
# GAIN architecture
_X = tf.compat.v1.placeholder(tf.float32, shape=[None, p]) # data
_M = tf.compat.v1.placeholder(tf.float32, shape=[None, p]) # mask vector
_H = tf.compat.v1.placeholder(tf.float32, shape=[None, p]) # hint vector
# Generator Variables
G_W1 = tf.Variable(self.normal_initial([p * 2, _h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[_h_dim]))
G_W2 = tf.Variable(self.normal_initial([_h_dim, _h_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[_h_dim]))
G_W3 = tf.Variable(self.normal_initial([_h_dim, p]))
G_b3 = tf.Variable(tf.zeros(shape=[p]))
self.theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]
# Discriminator Varaibles
D_W1 = tf.Variable(self.normal_initial([p * 2, _h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[_h_dim]))
D_W2 = tf.Variable(self.normal_initial([_h_dim, _h_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[_h_dim]))
D_W3 = tf.Variable(self.normal_initial([_h_dim, p]))
D_b3 = tf.Variable(tf.zeros(shape=[p]))
self.theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
# GAIN structure
_G = self.Generator(_X, _M) # Generator
_hat_X = _X * _M + _G * (1 - _M) # combine mask with observed data
_D = self.Discriminator(_hat_X, _H) # Discriminator
_D_loss_tmp = -tf.reduce_mean(
_M * tf.compat.v1.log(_D + 1e-8)
+ (1 - _M) * tf.compat.v1.log(1.0 - _D + 1e-8)
) # Discriminator loss
_G_loss_tmp = -tf.reduce_mean(
(1 - _M) * tf.compat.v1.log(_D + 1e-8)
) # Generator loss
_MSE_loss = tf.reduce_mean((_M * _X - _M * _G) ** 2) / tf.reduce_mean(_M)
_D_loss = _D_loss_tmp
_G_loss = _G_loss_tmp + self.alpha * _MSE_loss
# GAIN solver
_G_solver = tf.compat.v1.train.AdamOptimizer().minimize(
_D_loss, var_list=self.theta_G
)
_D_solver = tf.compat.v1.train.AdamOptimizer().minimize(
_G_loss, var_list=self.theta_D
)
# Iterations
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
_seed = self.seed # initialize random seed
# training step
iterator = (
tqdm(range(self.max_iter)) if self.progressbar else range(self.max_iter)
)
for _run in iterator:
batch_index = random_index(
self.batch_size, n, seed=_seed
) # random sample batch
_X_mb = _data_scaled[batch_index, :]
_M_mb = _mask[batch_index, :]
_Z_mb = self.uniform_sampler(
low=0, high=0.01, size=(self.batch_size, p)
) # random sample vector
_H_mb_1 = self.binary_sampler(p=self.hint_rate, size=(self.batch_size, p))
_H_mb = _M_mb * _H_mb_1 # sample hint vectors
# combine random sample vector with observed data
_X_mb = _M_mb * _X_mb + (1 - _M_mb) * _Z_mb
_, _D_loss_now = sess.run(
[_D_solver, _D_loss_tmp], feed_dict={_M: _M_mb, _X: _X_mb, _H: _H_mb}
)
_, _G_loss_now, _MSE_loss_now = sess.run(
[_G_solver, _G_loss_tmp, _MSE_loss],
feed_dict={_M: _M_mb, _X: _X_mb, _H: _H_mb},
)
_seed += 1
# return imputed data
_Z_mb = self.uniform_sampler(low=0, high=0.01, size=(n, p))
_M_mb = _mask
_X_mb = _data_scaled
_X_mb = _M_mb * _X_mb + (1 - _M_mb) * _Z_mb
_imputed_data = sess.run([_G], feed_dict={_X: _X_mb, _M: _M_mb})[0]
_imputed_data = _mask * _data_scaled + (1 - _mask) * _imputed_data
# combine data with column names to dataframe
_imputed_data = pd.DataFrame(_imputed_data, columns=_features)
# convert self._fitted and store self.train
self._fitted = True
# if scaling, scale back
if self.scaling:
_X = scaling.inverse_transform(_X)
# convert numerical back to categorical
formatter.refit(_X)
return _imputed_data
class GAIN_torch(formatting, MinMaxScale):
"""
Generative Adversarial Imputation Nets (GAIN) pytorch version [1]
train Generator (G) and Discriminator (D) to impute missing values
[1] <NAME>., <NAME>. and <NAME>., 2018, July. Gain: Missing data imputation using
generative adversarial nets. In International Conference on Machine Learning (pp. 5689-5698).
PMLR. github.com/jsyooon0823/GAIN
Parameters
----------
batch_size: mini-batch sample size, default = 128
hint_rate: hint rate, default = 0.9
alpha: penalty in optimizing Generator, default = 100
optim: optimization algorithms, default = 'Adam'
support ["Adam", "SGD", "Adagrad", "LBFGS", "RMSprop"]
lr: learning rate, default: None
default lr will depend on optimizer
for 'LBFGS', default lr = 1
for 'Adam', default lr = 0.001
else, default lr = 0.01
max_iter: maximum number of iterations, default = 100
delta: training early stopping criteria, default = 1e-8
if changing percentage not significant, early stop training
scaling: whether scale the dataset before imputation, default = True
deep_copy: whether to deep copy dataframe, deafult = False
seed: random seed, default = 1
"""
def __init__(
self,
batch_size=128,
hint_rate=0.9,
alpha=100,
optim="Adam",
lr=None,
max_iter=100,
delta=1e-8,
scaling=True,
progressbar=False,
deep_copy=False,
seed=1,
):
self.batch_size = batch_size
self.hint_rate = hint_rate
self.alpha = alpha
self.optim = optim
# default learning rate dependent on optimizer
if self.optim == "LBFGS" and not lr:
self.lr = 1
elif self.optim == "Adam" and not lr:
self.lr = 0.001
elif not lr:
self.lr = 0.01
else:
self.lr = lr
self.max_iter = max_iter
self.delta = delta
self.scaling = scaling
self.progressbar = progressbar
self.deep_copy = deep_copy
self.seed = seed
np.random.seed(self.seed)
self._fitted = False # whether fitted on train set
# get random m integer number in range [0, n - 1]
def random_index(self, n, m):
return np.random.permutation(n)[:m]
# initialize Generator/Discriminator variables
def _initialization(self, p, h_dim):
# W with random normal initialization and b with zero initialization
# initialize Generator variables
G_W1 = nn.init.normal_(
torch.empty(2 * p, h_dim, dtype=torch.double, requires_grad=True)
)
G_b1 = torch.zeros(h_dim, dtype=torch.double, requires_grad=True)
G_W2 = nn.init.normal_(
torch.empty(h_dim, h_dim, dtype=torch.double, requires_grad=True)
)
G_b2 = torch.zeros(h_dim, dtype=torch.double, requires_grad=True)
G_W3 = nn.init.normal_(
torch.empty(h_dim, p, dtype=torch.double, requires_grad=True)
)
G_b3 = torch.zeros(p, dtype=torch.double, requires_grad=True)
self.theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]
# initialize Discriminator variables
D_W1 = nn.init.normal_(
torch.empty(2 * p, h_dim, dtype=torch.double, requires_grad=True)
)
D_b1 = torch.zeros(h_dim, dtype=torch.double, requires_grad=True)
D_W2 = nn.init.normal_(
torch.empty(h_dim, h_dim, dtype=torch.double, requires_grad=True)
)
D_b2 = torch.zeros(h_dim, dtype=torch.double, requires_grad=True)
D_W3 = nn.init.normal_(
torch.empty(h_dim, p, dtype=torch.double, requires_grad=True)
)
D_b3 = torch.zeros(p, dtype=torch.double, requires_grad=True)
self.theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
# Generator network structure
def Generator(self, data, mask):
G_W1, G_W2, G_W3, G_b1, G_b2, G_b3 = self.theta_G
_input = torch.cat(tensors=[data, mask], dim=1) # concate data with mask
G_h1 = F.relu(torch.matmul(_input, G_W1) + G_b1)
G_h2 = F.relu(torch.matmul(G_h1, G_W2) + G_b2)
G_pro = torch.sigmoid(
torch.matmul(G_h2, G_W3) + G_b3
) # normalize to probability
return G_pro
# Discriminator network structure
def Discriminator(self, data, hint):
D_W1, D_W2, D_W3, D_b1, D_b2, D_b3 = self.theta_D
_input = torch.cat(tensors=[data, hint], dim=1) # concate data with hint matrix
D_h1 = F.relu(torch.matmul(_input, D_W1) + D_b1)
D_h2 = F.relu(torch.matmul(D_h1, D_W2) + D_b2)
D_pro = torch.sigmoid(
torch.matmul(D_h2, D_W3) + D_b3
) # normalize to probability
return D_pro
# Generator loss
def network_loss(self, X, M, H):
_G = self.Generator(X, M)
_hat_X = X * M + _G * (1 - M)
_D = self.Discriminator(_hat_X, H)
# Discriminator loss
_D_loss = -torch.mean(
M * torch.log(_D + 1e-8) + (1 - M) * torch.log(1.0 - _D + 1e-8)
)
# Generator loss
_G_loss_1 = -torch.mean((1 - M) * torch.log(_D + 1e-8))
_G_loss_2 = torch.mean((M * X - M * _G) ** 2) / torch.mean(M)
_G_loss = _G_loss_1 + self.alpha * _G_loss_2
return _G_loss, _D_loss
def fill(self, X):
# make sure input is a dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise TypeError("Expect a dataframe, get {}.".format(type(X)))
_X = X.copy(deep=self.deep_copy)
if _X.isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No missing values found, no change.")
return _X
def _fill(self, X):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape # get shape of dataset
h_dim = int(p) # get hidden state dimensions
# make sure batch size is smaller than number of observations
self.batch_size = min(self.batch_size, n)
# convert categorical to numerical
formatter = formatting(inplace=True)
formatter.fit(_X)
# if scaling, use MinMaxScale to scale the features
if self.scaling:
scaling = MinMaxScale()
_X = scaling.fit_transform(_X)
# GAIN imputation
# initialization: fill missing with 0
_X = _X.fillna(0)
# divide dataframe to list of features and array of values
_features = list(_X.columns)
_X = _X.values
# get mask matrix
_M = get_missing_matrix(_X, missing=0)
# if not fitted, train the networks
if not self._fitted:
# initialize Generator/Discriminator variables
self._initialization(p, h_dim)
# network optimizer
if self.optim == "Adam":
optimizer = torch.optim.Adam(
params=self.theta_D + self.theta_G, lr=self.lr
)
elif self.optim == "SGD":
optimizer = torch.optim.SGD(
params=self.theta_D + self.theta_G, lr=self.lr
)
elif self.optim == "Adagrad":
optimizer = torch.optim.Adagrad(
params=self.theta_D + self.theta_G, lr=self.lr
)
elif self.optim == "LBFGS":
optimizer = torch.optim.LBFGS(
params=self.theta_D + self.theta_G, lr=self.lr
)
elif self.optim == "RMSprop":
optimizer = torch.optim.RMSprop(
params=self.theta_D + self.theta_G, lr=self.lr
)
else:
raise KeyError(
'Get unknown optimizer {}, should be one of ["Adam", "SGD", \
"Adagrad", "LBFGS", "RMSprop"].'.format(
self.optim
)
)
# initialize parameters to device
self.theta_D = [item.to(device) for item in self.theta_D]
self.theta_G = [item.to(device) for item in self.theta_G]
# store the losses for early_stopping
self.losses = []
# training step
iterator = (
tqdm(range(self.max_iter)) if self.progressbar else range(self.max_iter)
)
for _ in iterator:
# get mini-batch data
batch_index = self.random_index(n, self.batch_size)
_X_mb = _X[batch_index, :]
_M_mb = _M[batch_index, :] # mini-batch mask matrix
# mini-batch random imputation
_Z_mb = np.random.uniform(low=0, high=0.01, size=(self.batch_size, p))
_H_mb_1 = 1 * (
np.random.uniform(0, 1, size=(self.batch_size, p)) < self.hint_rate
)
_H_mb = _M_mb * _H_mb_1 # mini-batch hint matrix
# combine random imputation with data
_X_mb = _M_mb * _X_mb + (1 - _M_mb) * _Z_mb
# matrix to tensor
_X_mb = torch.tensor(_X_mb, dtype=torch.double, device=device)
_M_mb = torch.tensor(_M_mb, dtype=torch.double, device=device)
_H_mb = torch.tensor(_H_mb, dtype=torch.double, device=device)
# get the losses
# combine two losses as one
G_loss, D_loss = self.network_loss(_X_mb, _M_mb, _H_mb)
loss = G_loss + D_loss
# optimization step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# early_stopping
self.losses.append(loss.item())
if len(self.losses) > 1:
# if losses changing not significant,
# early stop
if (
np.abs(self.losses[-1] - self.losses[-2]) / self.losses[-2]
< self.delta
):
# for tqdm, to break, need manual close
if self.progressbar:
iterator.close()
break
# display loss
if self.progressbar:
iterator.set_postfix({"loss": loss.item()})
iterator.refresh() # to show immediately the update
sleep(0.01)
# impute the missing data
# _X or _M not tensor, convert to tensor
if not torch.is_tensor(_X):
_X = torch.tensor(_X, dtype=torch.double, device=device)
if not torch.is_tensor(_M):
_M = torch.tensor(_M, dtype=torch.double, device=device)
# impute using trained Generator
with torch.no_grad():
_X = _M * _X + (1 - _M) * self.Generator(_X, _M)
# if tensor, detach to numpy array
if torch.is_tensor(_X):
_X = _X.cpu().detach().numpy()
# convert back to dataframe
_X = pd.DataFrame(_X, columns=_features)
# convert self._fitted and store self.train
self._fitted = True
# if scaling, scale back
if self.scaling:
_X = scaling.inverse_transform(_X)
# convert numerical back to categorical
formatter.refit(_X)
return _X
class GAIN(GAIN_tf, GAIN_torch):
"""
Generative Adversarial Imputation Nets (GAIN) [1]
use pytorch/tensorflow when available
train Generator (G) and Discriminator (D) to impute missing values
[1] <NAME>., <NAME>. and <NAME>., 2018, July. Gain: Missing data imputation using
generative adversarial nets. In International Conference on Machine Learning (pp. 5689-5698).
PMLR. github.com/jsyooon0823/GAIN
Parameters
----------
batch_size: mini-batch sample size, default = 128
hint_rate: hint rate, default = 0.9
alpha: penalty in optimizing Generator, default = 100
optim: optimization algorithms, default = 'Adam'
support ["Adam", "SGD", "Adagrad", "LBFGS", "RMSprop"]
lr: learning rate, default: None
default lr will depend on optimizer
for 'LBFGS', default lr = 1
for 'Adam', default lr = 0.001
else, default lr = 0.01
max_iter: maximum number of iterations, default = 100
delta: training early stopping criteria, default = 1e-8
if changing percentage not significant, early stop training
scaling: whether scale the dataset before imputation, default = True
deep_copy: whether to deep copy dataframe, deafult = False
seed: random seed, default = 1
"""
def __init__(
self,
batch_size=128,
hint_rate=0.9,
alpha=100,
optim="Adam",
lr=None,
max_iter=100,
delta=1e-8,
scaling=True,
progressbar=False,
deep_copy=False,
seed=1,
):
self.batch_size = batch_size
self.hint_rate = hint_rate
self.alpha = alpha
self.optim = optim
self.lr = lr
self.max_iter = max_iter
self.delta = delta
self.scaling = scaling
self.progressbar = progressbar
self.deep_copy = deep_copy
self.seed = seed
np.random.seed(self.seed)
self._fitted = False # whether fitted on train set
if torch_spec is not None:
self.model = GAIN_torch(
batch_size=self.batch_size,
hint_rate=self.hint_rate,
alpha=self.alpha,
optim=self.optim,
lr=self.lr,
max_iter=self.max_iter,
delta=self.delta,
scaling=self.scaling,
progressbar=self.progressbar,
deep_copy=self.deep_copy,
seed=self.seed,
)
elif tensorflow_spec is not None:
self.model = GAIN_tf(
batch_size=self.batch_size,
hint_rate=self.hint_rate,
alpha=self.alpha,
optim=self.optim,
lr=self.lr,
max_iter=self.max_iter,
delta=self.delta,
scaling=self.scaling,
progressbar=self.progressbar,
deep_copy=self.deep_copy,
seed=self.seed,
)
else:
raise Exception(
"No tensorflow or torch installed. This method is not supported."
)
def fill(self, X):
self._fitted = True
return self.model.fill(X)
|
PanyiDong/AutoML | My_AutoML/_hpo/_ML.py | """
File: _ML.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hpo/_ML.py
File Created: Tuesday, 5th April 2022 10:50:27 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 11:31:28 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from ._base import AutoTabularBase
from My_AutoML._utils._base import type_of_task
class AutoTabularRegressor(AutoTabularBase):
""" "
AutoTabular for regression tasks build on top of AutoTabularBase.
Parameters
----------
n_estimators: top k pipelines used to create the ensemble, default: 5
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support regressors ("AdaboostRegressor", "ARDRegression", "DecisionTree",
"ExtraTreesRegressor", "GaussianProcess", "GradientBoosting",
"KNearestNeighborsRegressor", "LibLinear_SVR", "LibSVM_SVR",
"MLPRegressor", "RandomForest", "SGD")
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for regression ("MSE", "MAE", "MSLE", "R2", "MAX")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=5,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="MSE",
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger=["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.n_estimators = n_estimators
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
super().__init__(
task_mode="regression",
n_estimators=self.n_estimators,
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective=self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
class AutoTabularClassifier(AutoTabularBase):
""" "
AutoTabular for classification tasks build on top of AutoTabularBase
Parameters
----------
n_estimators: top k pipelines used to create the ensemble, default: 5
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support classifiers ('AdaboostClassifier', 'BernoulliNB', 'DecisionTree',
'ExtraTreesClassifier', 'GaussianNB', 'GradientBoostingClassifier',
'KNearestNeighborsClassifier', 'LDA', 'LibLinear_SVC', 'LibSVM_SVC',
'MLPClassifier', 'MultinomialNB','PassiveAggressive', 'QDA',
'RandomForest', 'SGD')
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for classification ("accuracy", "precision", "auc", "hinge", "f1")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=5,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective="accuracy",
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger=["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.n_estimators = n_estimators
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
super().__init__(
task_mode="classification",
n_estimators=self.n_estimators,
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective=self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
class AutoTabular(AutoTabularClassifier, AutoTabularRegressor):
"""
AutoTabular that automatically assign to AutoTabularClassifier or AutoTabularRegressor
Parameters
----------
n_estimators: top k pipelines used to create the ensemble, default: 5
timeout: Total time limit for the job in seconds, default = 360
max_evals: Maximum number of function evaluations allowed, default = 32
allow_error_prop: proportion of tasks allows failure, default = 0.1
allowed number of failures is int(max_evals * allow_error_prop)
temp_directory: folder path to store temporary model, default = 'tmp'
delete_temp_after_terminate: whether to delete temporary information, default = False
save: whether to save model after training, default = True
model_name: saved model name, default = 'model'
ignore_warning: whether to ignore warning, default = True
encoder: Encoders selected for the job, default = 'auto'
support ('DataEncoding')
'auto' will select all default encoders, or use a list to select
imputer: Imputers selected for the job, default = 'auto'
support ('SimpleImputer', 'JointImputer', 'ExpectationMaximization', 'KNNImputer',
'MissForestImputer', 'MICE', 'GAIN')
'auto' will select all default imputers, or use a list to select
balancing: Balancings selected for the job, default = 'auto'
support ('no_processing', 'SimpleRandomOverSampling', 'SimpleRandomUnderSampling',
'TomekLink', 'EditedNearestNeighbor', 'CondensedNearestNeighbor', 'OneSidedSelection',
'CNN_TomekLink', 'Smote', 'Smote_TomekLink', 'Smote_ENN')
'auto' will select all default balancings, or use a list to select
scaling: Scalings selected for the job, default = 'auto'
support ('no_processing', 'MinMaxScale', 'Standardize', 'Normalize', 'RobustScale',
'PowerTransformer', 'QuantileTransformer', 'Winsorization')
'auto' will select all default scalings, or use a list to select
feature_selection: Feature selections selected for the job, default = 'auto'
support ('no_processing', 'LDASelection', 'PCA_FeatureSelection', 'RBFSampler',
'FeatureFilter', 'ASFFS', 'GeneticAlgorithm', 'extra_trees_preproc_for_classification',
'fast_ica', 'feature_agglomeration', 'kernel_pca', 'kitchen_sinks',
'liblinear_svc_preprocessor', 'nystroem_sampler', 'pca', 'polynomial',
'random_trees_embedding', 'select_percentile_classification','select_rates_classification',
'truncatedSVD')
'auto' will select all default feature selections, or use a list to select
models: Models selected for the job, default = 'auto'
support classifiers ('AdaboostClassifier', 'BernoulliNB', 'DecisionTree',
'ExtraTreesClassifier', 'GaussianNB', 'GradientBoostingClassifier',
'KNearestNeighborsClassifier', 'LDA', 'LibLinear_SVC', 'LibSVM_SVC',
'MLPClassifier', 'MultinomialNB','PassiveAggressive', 'QDA',
'RandomForest', 'SGD')
support regressors ("AdaboostRegressor", "ARDRegression", "DecisionTree",
"ExtraTreesRegressor", "GaussianProcess", "GradientBoosting",
"KNearestNeighborsRegressor", "LibLinear_SVR", "LibSVM_SVR",
"MLPRegressor", "RandomForest", "SGD")
'auto' will select all default models, or use a list to select
validation: Whether to use train_test_split to test performance on test set, default = True
valid_size: Test percentage used to evaluate the performance, default = 0.15
only effective when validation = True
objective: Objective function to test performance, default = 'accuracy'
support metrics for regression ("MSE", "MAE", "MSLE", "R2", "MAX")
support metrics for classification ("accuracy", "precision", "auc", "hinge", "f1")
search_algo: search algorithm used for hyperparameter optimization, deafult = "HyperOpt"
support ("RandomSearch", "GridSearch", "BayesOptSearch", "AxSearch", "BOHB",
"BlendSearch", "CFO", "DragonflySearch", "HEBO", "HyperOpt", "Nevergrad",
"Optuna", "SigOpt", "Scikit-Optimize", "ZOOpt", "Reapter",
"ConcurrencyLimiter", callable)
search_algo_settings: search algorithm settings, default = {}
need manual configuration for each search algorithm
search_scheduler: search scheduler used, default = "FIFOScheduler"
support ("FIFOScheduler", "ASHAScheduler", "HyperBandScheduler", "MedianStoppingRule"
"PopulationBasedTraining", "PopulationBasedTrainingReplay", "PB2",
"HyperBandForBOHB", callable)
search_scheduler_settings: search scheduler settings, default = {}
need manual configuration for each search scheduler
logger: callback logger, default = ["Logger"]
list of supported callbacks, support ("Logger", "TBX", "JSON", "CSV", "MLflow", "Wandb")
progress_reporter: progress reporter, default = "CLIReporter"
support ("CLIReporter", "JupyterNotebookReporter")
full_status: whether to print full status, default = False
verbose: display for output, default = 1
support (0, 1, 2, 3)
cpu_threads: number of cpu threads to use, default = None
if None, get all available cpu threads
use_gpu: whether to use gpu, default = None
if None, will use gpu if available, otherwise False (not to use gpu)
reset_index: whether to reset index during traning, default = True
there are methods that are index independent (ignore index, resetted, e.g. GAIN)
if you wish to use these methods and set reset_index = False, please make sure
all input index are ordered and starting from 0
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=5,
timeout=360,
max_evals=64,
allow_error_prop=0.1,
temp_directory="tmp",
delete_temp_after_terminate=False,
save=True,
model_name="model",
ignore_warning=True,
encoder="auto",
imputer="auto",
balancing="auto",
scaling="auto",
feature_selection="auto",
models="auto",
validation=True,
valid_size=0.15,
objective=None,
search_algo="HyperOpt",
search_algo_settings={},
search_scheduler="FIFOScheduler",
search_scheduler_settings={},
logger = ["Logger"],
progress_reporter="CLIReporter",
full_status=False,
verbose=1,
cpu_threads=None,
use_gpu=None,
reset_index=True,
seed=1,
):
self.n_estimators = n_estimators
self.timeout = timeout
self.max_evals = max_evals
self.allow_error_prop = allow_error_prop
self.temp_directory = temp_directory
self.delete_temp_after_terminate = delete_temp_after_terminate
self.save = save
self.model_name = model_name
self.ignore_warning = ignore_warning
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
self.validation = validation
self.valid_size = valid_size
self.objective = objective
self.search_algo = search_algo
self.search_algo_settings = search_algo_settings
self.search_scheduler = search_scheduler
self.search_scheduler_settings = search_scheduler_settings
self.logger = logger
self.progress_reporter = progress_reporter
self.full_status = full_status
self.verbose = verbose
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu
self.reset_index = reset_index
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit(self, X, y=None):
if isinstance(y, pd.DataFrame) or isinstance(y, np.ndarray):
self._type = type_of_task(y)
elif y == None:
self._type = "Unsupervised"
if self._type in ["binary", "multiclass"]: # assign classification tasks
self.model = AutoTabularClassifier(
n_estimators=self.n_estimators,
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="accuracy" if not self.objective else self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
logger = self.logger,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
elif self._type in ["integer", "continuous"]: # assign regression tasks
self.model = AutoTabularRegressor(
n_estimators=self.n_estimators,
timeout=self.timeout,
max_evals=self.max_evals,
allow_error_prop=self.allow_error_prop,
temp_directory=self.temp_directory,
delete_temp_after_terminate=self.delete_temp_after_terminate,
save=self.save,
model_name=self.model_name,
ignore_warning=self.ignore_warning,
encoder=self.encoder,
imputer=self.imputer,
balancing=self.balancing,
scaling=self.scaling,
feature_selection=self.feature_selection,
models=self.models,
validation=self.validation,
valid_size=self.valid_size,
objective="MSE" if not self.objective else self.objective,
search_algo=self.search_algo,
search_algo_settings=self.search_algo_settings,
search_scheduler=self.search_scheduler,
search_scheduler_settings=self.search_scheduler_settings,
progress_reporter=self.progress_reporter,
full_status=self.full_status,
verbose=self.verbose,
cpu_threads=self.cpu_threads,
use_gpu=self.use_gpu,
reset_index=self.reset_index,
seed=self.seed,
)
else:
raise ValueError(
'Not recognizing type, only ["binary", "multiclass", "integer", "continuous"] accepted, get {}!'.format(
self._type
)
)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
if self.model:
return self.model.predict(X)
else:
raise ValueError("No tasks found! Need to fit first.")
|
PanyiDong/AutoML | My_AutoML/_feature_selection/_sklearn.py | <filename>My_AutoML/_feature_selection/_sklearn.py
"""
File: _sklearn.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_feature_selection/_sklearn.py
File Created: Friday, 29th April 2022 10:38:02 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 3:52:56 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import scipy
import numpy as np
import sklearn.feature_selection
import sklearn.decomposition
import sklearn.cluster
import sklearn.kernel_approximation
import sklearn.preprocessing
import sklearn.ensemble
import warnings
from My_AutoML._utils._base import is_none
###################################################################################################################
# sklearn replacement of feature selection
class densifier:
def __init__(
self,
):
self._fitted = False
def fit(self, X, y=None):
self._fitted = True
return self
def transform(self, X):
from scipy import sparse
if sparse.issparse(X):
return X.todense().getA()
else:
return X
class extra_trees_preproc_for_classification:
def __init__(
self,
n_estimators=5,
criterion="entropy",
min_samples_leaf=5,
min_samples_split=5,
max_features=0.5,
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
min_impurity_decrease=0.0,
):
self.n_estimators = int(n_estimators)
self.criterion = criterion
self.min_samples_leaf = int(min_samples_leaf)
self.min_samples_split = int(min_samples_split)
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.min_impurity_decrease = float(min_impurity_decrease)
self._fitted = False
def fit(self, X, y=None):
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
estimator = ExtraTreesClassifier(
n_estimators=self.n_estimators,
criterion=self.criterion,
min_samples_leaf=self.min_samples_leaf,
min_samples_split=self.min_samples_split,
max_features=int(X.shape[1] * self.max_features),
bootstrap=self.bootstrap,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
)
estimator.fit(X, y)
self.selector = SelectFromModel(
estimator,
threshold="mean",
prefit=True,
)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return self.selector.transform(X)
class extra_trees_preproc_for_regression:
def __init__(
self,
n_estimators=5,
criterion="mse",
min_samples_leaf=5,
min_samples_split=5,
max_features=0.5,
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
):
self.n_estimators = int(n_estimators)
self.criterion = criterion
self.min_samples_leaf = int(min_samples_leaf)
self.min_samples_split = int(min_samples_split)
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self._fitted = False
def fit(self, X, y=None):
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectFromModel
estimator = ExtraTreesRegressor(
n_estimators=self.n_estimators,
criterion=self.criterion,
min_samples_leaf=self.min_samples_leaf,
min_samples_split=self.min_samples_split,
max_features=int(np.log(X.shape[1] + 1) * self.max_features),
bootstrap=self.bootstrap,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
)
estimator.fit(X, y)
self.selector = SelectFromModel(
estimator,
threshold="mean",
prefit=True,
)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return self.selector.transform(X)
class fast_ica(sklearn.decomposition.FastICA):
def __init__(
self,
algorithm="parallel",
whiten=False,
fun="logcosh",
n_components=5,
):
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.n_components = None if is_none(n_components) else int(n_components)
super().__init__(
algorithm=self.algorithm,
whiten=self.whiten,
fun=self.fun,
n_components=self.n_components,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class feature_agglomeration(sklearn.cluster.FeatureAgglomeration):
def __init__(
self,
n_clusters=5,
affinity="euclidean",
linkage="ward",
pooling_func="mean",
):
self.n_clusters = int(n_clusters)
self.affinity = affinity
self.linkage = linkage
self.pooling_func = pooling_func
self.pooling_func_dict = {
"mean": np.mean,
"median": np.median,
"max": np.max,
}
self._fitted = False
def fit(self, X, y=None):
if not callable(self.pooling_func):
self.pooling_func = self.pooling_func_dict[self.pooling_func]
super().__init__(
n_clusters=min(self.n_clusters, X.shape[1]),
affinity=self.affinity,
linkage=self.linkage,
pooling_func=self.pooling_func,
)
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class kernel_pca(sklearn.decomposition.KernelPCA):
def __init__(
self,
n_components=5,
kernel="rbf",
gamma=0.1,
degree=3,
coef0=0.5,
):
self.n_components = None if is_none(n_components) else int(n_components)
self.kernel = kernel
self.gamma = None if is_none(gamma) else float(gamma)
self.degree = int(degree)
self.coef0 = float(coef0)
super().__init__(
n_components=self.n_components,
kernel=self.kernel,
gamma=self.gamma,
degree=self.degree,
coef0=self.coef0,
)
self._fitted = False
def fit(self, X, y=None):
if scipy.sparse.issparse(X):
X = X.stype(np.float64)
with warnings.catch_warnings():
warnings.filterwarnings("error")
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
with warnings.catch_warnings():
warnings.filterwarnings("error")
return super().transform(X)
class kitchen_sinks(sklearn.kernel_approximation.RBFSampler):
def __init__(
self,
gamma=0.1,
n_components=50,
):
self.gamma = float(gamma)
self.n_components = int(n_components)
super().__init__(
gamma=self.gamma,
n_components=self.n_components,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class liblinear_svc_preprocessor:
def __init__(
self,
penalty="l1",
loss="squared_hinge",
dual=False,
tol=0.0001,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = float(tol)
self.C = float(C)
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = float(intercept_scaling)
self._fitted = False
def fit(self, X, y=None):
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
estimator = LinearSVC(
penalty=self.penalty,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
multi_class=self.multi_class,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
)
estimator.fit(X, y)
self.selector = SelectFromModel(estimator, threshold="mean", prefit=True)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return self.selector.transform(X)
class nystroem_sampler(sklearn.kernel_approximation.Nystroem):
def __init__(
self,
kernel="rbf",
n_components=50,
gamma=0.1,
degree=3,
coef0=0.5,
):
self.kernel = kernel
self.n_components = int(n_components)
self.gamma = None if is_none(gamma) else float(gamma)
self.degree = int(degree)
self.coef0 = float(coef0)
super().__init__(
kernel=self.kernel,
n_components=self.n_components,
gamma=self.gamma,
degree=self.degree,
coef0=self.coef0,
)
self._fitted = False
def fit(self, X, y=None):
# for kernel = "chi2", make sure non-negative values are passed
if self.kernel == "chi2":
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
# for kernel = "chi2", make sure non-negative values are passed
if self.kernel == "chi2":
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
return super().transform(X)
class pca(sklearn.decomposition.PCA):
def __init__(
self,
keep_variance=0.5,
whiten=True,
):
self.keep_variance = float(keep_variance)
self.with_whiten = whiten
super().__init__(
n_components=self.keep_variance,
whiten=self.with_whiten,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class polynomial(sklearn.preprocessing.PolynomialFeatures):
def __init__(
self,
degree=3,
interaction_only=False,
include_bias=True,
):
self.degree = int(degree)
self.interaction_only = interaction_only
self.include_bias = include_bias
super().__init__(
degree=self.degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class random_trees_embedding(sklearn.ensemble.RandomTreesEmbedding):
def __init__(
self,
n_estimators=5,
max_depth=3,
min_samples_split=5,
min_samples_leaf=5,
min_weight_fraction_leaf=0.0,
max_leaf_nodes=None,
bootstrap=True,
):
self.n_estimators = int(n_estimators)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.bootstrap = bootstrap
super().__init__(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_leaf_nodes=self.max_leaf_nodes,
# bootstrap=self.bootstrap,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class select_percentile_classification(sklearn.feature_selection.SelectPercentile):
def __init__(
self,
percentile=90,
score_func="chi2",
):
self.percentile = int(percentile)
if score_func == "chi2":
self.score_func = sklearn.feature_selection.chi2
elif score_func == "f_classif":
self.score_func = sklearn.feature_selection.f_classif
elif score_func == "mutual_info":
self.score_func = sklearn.feature_selection.mutual_info_classif
else:
raise ValueError(
"score_func must be one of 'chi2', 'f_classif', 'mutual_info', but got {}".format(
score_func
)
)
super().__init__(
percentile=self.percentile,
score_func=self.score_func,
)
self._fitted = False
def fit(self, X, y=None):
# for score_func = "chi2", make sure non-negative values are passed
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
# for score_func = "chi2", make sure non-negative values are passed
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
return super().transform(X)
class select_percentile_regression(sklearn.feature_selection.SelectPercentile):
def __init__(
self,
percentile=90,
score_func="f_regression",
):
self.percentile = int(percentile)
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info":
self.score_func = sklearn.feature_selection.mutual_info_regression
else:
raise ValueError(
"score_func must be one of 'f_regression', 'mutual_info', but got {}".format(
score_func
)
)
super().__init__(
percentile=self.percentile,
score_func=self.score_func,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class select_rates_classification(sklearn.feature_selection.GenericUnivariateSelect):
def __init__(
self,
alpha=0.3,
score_func="chi2",
mode="fpr",
):
self.alpha = float(alpha)
if score_func == "chi2":
self.score_func = sklearn.feature_selection.chi2
elif score_func == "f_classif":
self.score_func = sklearn.feature_selection.f_classif
elif score_func == "mutual_info":
self.score_func = sklearn.feature_selection.mutual_info_classif
else:
raise ValueError(
"score_func must be one of 'chi2', 'f_classif', 'mutual_info', but got {}".format(
score_func
)
)
self.mode = mode
super().__init__(
param=self.alpha,
score_func=self.score_func,
mode=self.mode,
)
self._fitted = False
def fit(self, X, y=None):
# for score_func = "chi2", make sure non-negative values are passed
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
# for score_func = "chi2", make sure non-negative values are passed
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0
else:
X[X < 0] = 0
return super().transform(X)
class select_rates_regression(sklearn.feature_selection.GenericUnivariateSelect):
def __init__(
self,
alpha=0.3,
score_func="f_regression",
mode="fpr",
):
self.alpha = float(alpha)
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info":
self.score_func = sklearn.feature_selection.mutual_info_regression
else:
raise ValueError(
"score_func must be one of 'f_regression', 'mutual_info', but got {}".format(
score_func
)
)
self.mode = mode
super().__init__(
param=self.alpha,
score_func=self.score_func,
mode=self.mode,
)
self._fitted = False
def fit(self, X, y=None):
super().fit(X, y)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
class truncatedSVD(sklearn.decomposition.TruncatedSVD):
def __init__(
self,
target_dim=5,
):
self.target_dim = int(target_dim)
self._fitted = False
def fit(self, X, y=None):
super().__init__(
n_components=min(self.target_dim, X.shape[1] - 1),
algorithm="randomized",
)
super().fit(X)
self._fitted = True
return self
def transform(self, X):
if not self._fitted:
raise NotImplementedError("The model has not been fitted yet!")
return super().transform(X)
|
PanyiDong/AutoML | archive/_model.py | """
File: _model.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Relative Path: /archive/_model.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 6th April 2022 12:31:28 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
import copy
import numpy as np
import pandas as pd
import scipy
import scipy.stats
from scipy.optimize import fsolve
import sympy as sym
from sympy import *
from sympy import Abs, solveset, symbols, S
from sympy.functions import sign
import matplotlib
import matplotlib.pyplot as plt
# Linear Regression Model
# Using Matrix form calculation for least square
# Symbolic derivatives for gradient descent
class My_LinearRegression(object):
"""
Linear Regression Models
Parameters
----------
solution: how to solve linear regression, default = 'LS',
allows 'LS' (least square) and 'GS' (gradient descent)
backtracking: whether using backtracking to select gradient descent steps, default = False
method: how to achieve gradient descent, default = 'linear'
'linear' uses pre-calculated gradient, 'symbolic' uses symbolic formula (takes time)
ini_beta: initial linear regression coefficients, default = None
'default' will be 0 vector, or use list/array as initial coeffcients
plot: whether to plot loss during gradient descent, default = False
delta: initial/fix gradient descent steps, default = 0.05
max_iter: maximum number of iterations for gradient descent, contain computational time, default = 1000
loss_limit: loss threshold to stop iteration, default = 10 ** (-5)
backtracking_c: constant for setting Armijo–Goldstein Test in backtracking, default = 0.5
backtracking_tau: every change on delta in backtracking, default = 0.5
"""
def __init__(
self,
solution="LS",
backtracking=False,
method="linear",
ini_beta=None,
plot=False,
delta=0.05,
max_iter=1000,
loss_limit=10 ** (-5),
backtracking_c=0.5,
backtracking_tau=0.5,
):
self.solution = solution
self.backtracking = backtracking
self.method = method
self.ini_beta = ini_beta
self.plot = plot
self.delta = delta
self.max_iter = max_iter
self.loss_limit = loss_limit
self.backtracking_c = backtracking_c
self.backtracking_tau = backtracking_tau
def Loss_Cal(self, x, y, beta, include_gradient=False):
hat_y = np.dot(x, beta)
err = y - hat_y
loss = np.mean(err**2)
if include_gradient == True:
gradient = -2 * np.mean(err.T * x.T, axis=1).reshape((len(beta), 1))
return (loss, gradient)
else:
return loss
def Metric_Cal(self, x, y, beta):
hat_y = np.dot(x, beta)
err = y - hat_y
metric = np.mean(np.abs(err))
return metric
def fit(self, x, y):
if self.solution == "LS":
self.Least_Square(x, y)
elif self.solution == "GS":
self.Gradient_Descent(
x, y, self.backtracking, self.method, self.ini_beta, self.plot
)
else:
raise ValueError("Not recognizing solution!")
def Least_Square(self, x, y):
if len(y) != len(x):
raise ValueError("Arrays must have same sizes.")
m = len(x)
n = len(x[0])
dof = m - n - 2 # degree of freedom
# Calculate the properties of the Linear Model
x_mean = [np.mean(x[:][i]) for i in range(n)]
y_mean = np.mean(y)
# Calculate estimates of regerssion coefficients with matrix calculation
# b is estimates of true regression parameters \beta
# (X' * X) * b = X' * y # X' for transpose of X
x = np.array(np.insert(x, 0, 1, axis=1), dtype=np.float64).tolist()
A = np.array(np.dot(np.array(x).T, x), dtype=np.float64)
b = np.dot(np.array(x).T, y)
if np.linalg.det(A) != 0:
beta = np.linalg.solve(A, b)
else:
beta = np.linalg.lstsq(A, b, rcond=None)[0]
regress = np.dot(x, beta)
residual = y - regress
residual_mean = np.mean(residual)
loss = sum(item**2 for item in residual) / len(residual)
metric = sum(np.abs(item) for item in residual) / len(residual)
RSE = np.sqrt(
sum(i**2 for i in residual) / dof
) # Residual Standard Error, unbiased
# 95 percent Confidence Interval on regression parameters
# Standard error matrix
# SE = s^{2} * (X' * X)^{-1}
variance_matrix = (RSE**2) * np.array(np.linalg.inv(A), dtype=np.float64)
SE = []
for i in range(n + 1):
SE.append(np.sqrt(variance_matrix[i][i]))
# 95% Confidence interval of estimated coefficients
# with Normal distribution
# stored as 2D tuple, [min, max] form
CI = []
for i in range(n + 1):
CI.append(
[
beta[i] - scipy.stats.norm.ppf(0.975) * SE[i],
beta[i] + scipy.stats.norm.ppf(0.975) * SE[i],
]
)
# Sum of Squares
TSS = sum((y[i] - y_mean) ** 2 for i in range(m)) # Total Sum of Squares
RSS = sum((y[i] - regress[i]) ** 2 for i in range(m)) # Residual Sum of Squares
ESS = sum(
(regress[i] - y_mean) ** 2 for i in range(m)
) # Explained Sum of Saqures
R_squared = ESS / TSS
# Estimate for test error
# C_{p}
hat_sigma_squared = sum(
(residual[i] - residual_mean) ** 2 for i in range(m)
) / (m - 1)
C_P = (RSS + 2 * n * hat_sigma_squared) / m
# AIC
AIC = (RSS + 2 * n * hat_sigma_squared) / (m * hat_sigma_squared)
# BIC
BIC = (RSS + math.log(m) * n * hat_sigma_squared) / m
# Adjusted R^{2}
Adjusted_R_squared = 1 - (RSS / (m - n - 1)) / (TSS / (m - 1))
# Save these properties as attributes
self.dof = dof
self.x_mean = [round(item, 4) for item in x_mean]
self.y_mean = round(y_mean, 4)
self.beta = [item for item in beta]
self.loss = loss
self.metric = metric
self.variance_matrix = variance_matrix
self.CI = CI
self.RSE = RSE
self.TSS = TSS
self.RSS = RSS
self.ESS = ESS
self.R_squared = R_squared
self.C_P = C_P
self.AIC = AIC
self.BIC = BIC
self.Adjusted_R_squared = Adjusted_R_squared
def Gradient_Descent(self, x, y):
x = np.array(x)
y = np.array(y)
# Basic of the data to fit
if len(y) != len(x):
raise ValueError("Arrays must have same sizes.")
m = len(x)
n = len(x[0])
dof = m - n - 2 # degree of freedom
# Calculate the properties of the Linear Model
x_mean = [np.mean(x[:][i]) for i in range(n)]
y_mean = np.mean(y)
# Calculate estimates of regerssion coefficients with matrix calculation
# b is estimates of true regression parameters \beta
x = np.array(np.insert(x, 0, 1, axis=1), dtype=np.float64)
# initial states
if self.ini_beta == None:
beta = [[0] for i in range(n + 1)]
else:
beta = self.ini_beta
iter_time = 0
delta_loss = np.Infinity
_delta = self.delta
time_series = []
loss_series = []
metric_series = []
if (
self.method == "symbolic"
): # symbolic differential calculation, much slower but more compatible
beta_vector = [[symbols("beta" + str(i))] for i in range(n + 1)]
# loss function and differential in symbolic form
hat_y = np.dot(x, beta_vector)
loss = np.mean([(y[i][0] - hat_y[i][0]) ** 2 for i in range(len(hat_y))])
loss = sym.simplify(loss)
partial_loss = [[diff(loss, item[0])] for item in beta_vector]
eval_partial_loss = lambdify(
[item[0] for item in beta_vector], partial_loss
)
while iter_time < self.max_iter and delta_loss > self.loss_limit:
loss, gradient = self.Loss_Cal(x, y, beta, include_gradient=True)
if self.method == "symbolic":
grad = eval_partial_loss(*[item[0] for item in beta])
new_beta = beta - _delta * np.array(grad) / m
elif self.method == "linear":
new_beta = beta - _delta * gradient
new_loss = self.Loss_Cal(x, y, new_beta, include_gradient=False)
# use backtracking to find optimal delta
if self.backtracking == False:
beta = new_beta
new_metric = self.Metric_Cal(x, y, beta)
delta_loss = np.abs(new_loss - loss)
_delta = copy.deepcopy(self.delta) # reset delta
iter_time = iter_time + 1
time_series.append(iter_time)
loss_series.append(new_loss)
metric_series.append(new_metric)
elif self.backtracking == True:
# Armijo–Goldstein Test
if new_loss <= loss - self.backtracking_c * _delta * np.sum(
gradient**2
):
beta = new_beta
new_metric = self.Metric_Cal(x, y, beta)
delta_loss = np.abs(new_loss - loss)
_delta = copy.deepcopy(self.delta) # reset delta
iter_time = iter_time + 1
time_series.append(iter_time)
loss_series.append(new_loss)
metric_series.append(new_metric)
else:
_delta = _delta * self.backtracking_tau
if self.plot == True:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
ax1.scatter(time_series, loss_series, color="red")
ax1.plot(time_series, loss_series, color="red", label="Loss")
ax1.set_xlabel("Iteration Times")
ax1.set_ylabel("Loss")
ax1.legend()
ax2.scatter(time_series, metric_series, color="red")
ax2.plot(time_series, metric_series, color="red", label="Metric")
ax2.set_xlabel("Iteration Times")
ax2.set_ylabel("Metric")
ax2.legend()
plt.show()
if delta_loss > self.loss_limit and iter_time >= self.max_iter:
print(
"Maximum Iteration reached, last change in loss function is", delta_loss
)
self.beta = beta
self.loss = new_loss
self.metric = new_metric
def predict(self, x):
if len(x[0]) != len(self.beta) - 1:
raise ValueError("Length of inputs not matching regression model!")
x0 = []
for row in x:
x0.append(np.insert(row, 0, 1).tolist())
return [round(np.dot(np.array(item).T, self.beta)[0], 4) for item in x0]
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
Lasso,
Ridge,
LassoCV,
RidgeCV,
)
from sklearn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
)
|
PanyiDong/AutoML | My_AutoML/_hpo/_utils.py | <gh_stars>0
"""
File: _utils.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Last Version: 0.2.1
Relative Path: /My_AutoML/_hpo/_utils.py
File Created: Tuesday, 10th May 2022 10:27:56 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 11th May 2022 12:14:37 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
from typing import Callable
from inspect import getfullargspec
import scipy
import numpy as np
import pandas as pd
# from logging import warning
import os
import json
from ray import tune
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from My_AutoML._utils._data import formatting
from My_AutoML._utils._file import save_methods
from My_AutoML._utils._data import train_test_split
class Pipeline:
""" "
A pipeline of entire AutoML process.
"""
def __init__(
self,
encoder=None,
imputer=None,
balancing=None,
scaling=None,
feature_selection=None,
model=None,
):
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.model = model
self._fitted = False # whether the pipeline is fitted
def fit(self, X, y=None):
# loop all components, make sure they are fitted
# if they are not fitted, fit them
if self.encoder is not None:
if self.encoder._fitted:
pass
else:
X = self.encoder.fit(X)
if self.imputer is not None:
if self.imputer._fitted:
pass
else:
X = self.imputer.fill(X)
if self.balancing is not None:
if self.balancing._fitted:
pass
else:
X, y = self.balancing.fit_transform(X, y)
if self.scaling is not None:
if self.scaling._fitted:
pass
else:
self.scaling.fit(X, y)
X = self.scaling.transform(X)
if self.feature_selection is not None:
if self.feature_selection._fitted:
pass
else:
self.feature_selection.fit(X, y)
X = self.feature_selection.transform(X)
if scipy.sparse.issparse(X): # check if returns sparse matrix
X = X.toarray()
if self.model is None:
raise ValueError("model is not defined!")
if self.model._fitted:
pass
else:
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
if not self._fitted:
raise ValueError("Pipeline is not fitted!")
if self.encoder is not None:
X = self.encoder.refit(X)
if self.imputer is not None:
X = self.imputer.fill(X)
# no need for balancing
if self.scaling is not None:
X = self.scaling.transform(X)
if self.feature_selection is not None:
X = self.feature_selection.transform(X)
return self.model.predict(X)
def predict_proba(self, X):
if not self._fitted:
raise ValueError("Pipeline is not fitted!")
if self.encoder is not None:
X = self.encoder.refit(X)
if self.imputer is not None:
X = self.imputer.fill(X)
# no need for balancing
if self.scaling is not None:
X = self.scaling.transform(X)
if self.feature_selection is not None:
X = self.feature_selection.transform(X)
return self.model.predict_proba(X)
class ClassifierEnsemble(formatting):
"""
Ensemble of classifiers for classification.
"""
def __init__(
self,
estimators,
voting="hard",
weights=None,
):
self.estimators = estimators
self.voting = voting
self.weights = weights
# initialize the formatting
super(ClassifierEnsemble, self).__init__(
inplace=False,
)
self._fitted = False
def fit(self, X, y):
# check for voting type
if not self.voting in ["hard", "soft"]:
raise ValueError("voting must be either 'hard' or 'soft'")
# format the weights
self.weights = (
[w for est, w in zip(self.estimators, self.weights)]
if self.weights is not None
else None
)
# remember all unique labels
super(ClassifierEnsemble, self).fit(y)
# remember the name of response
if isinstance(y, pd.Series):
self._response = [y.name]
elif isinstance(y, pd.DataFrame):
self._response = list(y.columns)
elif isinstance(y, np.ndarray):
y = pd.DataFrame(y, columns=["response"])
self._response = ["response"]
# check for estimators type
if not isinstance(self.estimators, list):
raise TypeError("estimators must be a list")
for item in self.estimators:
if not isinstance(item, tuple):
raise TypeError("estimators must be a list of tuples.")
if not isinstance(item[1], Pipeline):
raise TypeError(
"estimators must be a list of tuples of (name, Pipeline)."
)
# make sure all estimators are fitted
if not item[1]._fitted:
item[1].fit(X, y)
self._fitted = True
return self
def predict(self, X):
if not self._fitted:
raise ValueError("Ensemble is not fitted!")
if self.voting == "hard":
# calculate predictions for all pipelines
pred_list = np.asarray(
[pipeline.predict(X) for (name, pipeline) in self.estimators]
).T
pred = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self.weights)),
axis=1,
arr=pred_list,
)
elif self.voting == "soft":
# calculate probabilities for all pipelines
prob_list = np.asarray(
[pipeline.predict_proba(X) for (name, pipeline) in self.estimators]
)
pred = np.argmax(
np.average(prob_list, axis=0, weights=self.weights), axis=1
)
# make sure all predictions are seen
if isinstance(pred, pd.DataFrame):
return super(ClassifierEnsemble, self).refit(pred)
# if not dataframe, convert to dataframe for formatting
else:
return super(ClassifierEnsemble, self).refit(
pd.DataFrame(pred, columns=self._response)
)
class RegressorEnsemble(formatting):
"""
Ensemble of regressors for regression.
"""
def __init__(
self,
estimators,
voting="mean",
weights=None,
):
self.estimators = estimators
self.voting = voting
self.weights = weights
self._fitted = False
self._voting_methods = {
"mean": np.average,
"median": np.median,
"max": np.max,
"min": np.min,
}
def fit(self, X, y):
# check for voting type
if self.voting in ["mean", "median", "max", "min"]:
self.voting = self._voting_methods[self.voting]
elif isinstance(self.voting, Callable):
self.voting = self.voting
else:
raise ValueError(
"voting must be either 'mean', 'median', 'max', 'min' or a callable"
)
# format the weights
self.weights = (
[w for est, w in zip(self.estimators, self.weights)]
if self.weights is not None
else None
)
# check for estimators type
if not isinstance(self.estimators, list):
raise TypeError("estimators must be a list")
for item in self.estimators:
if not isinstance(item, tuple):
raise TypeError("estimators must be a list of tuples.")
if not isinstance(item[1], Pipeline):
raise TypeError(
"estimators must be a list of tuples of (name, Pipeline)."
)
# make sure all estimators are fitted
if not item[1]._fitted:
item[1].fit(X, y)
self._fitted = True
return self
def predict(self, X):
if not self._fitted:
raise ValueError("Ensemble is not fitted!")
# calculate predictions for all pipelines
pred_list = np.asarray(
[pipeline.predict(X) for (name, pipeline) in self.estimators]
).T
# if weights not included, not use weights
if "weights" in getfullargspec(self.voting).args:
return self.voting(pred_list, axis=1, weights=self.weights)
else:
# if weights included, but not available in voting function, warn users
if self.weights is not None:
warnings.warn("weights are not used in voting method")
return self.voting(pred_list, axis=1)
class TabularObjective(tune.Trainable):
def setup(
self,
config,
_X=None,
_y=None,
encoder=None,
imputer=None,
balancing=None,
scaling=None,
feature_selection=None,
models=None,
model_name="model",
task_mode="classification",
objective="accuracy",
validation=True,
valid_size=0.15,
full_status=False,
reset_index=True,
timeout=36,
_iter=1,
seed=1,
):
# assign hyperparameter arguments
self.encoder = encoder
self.imputer = imputer
self.balancing = balancing
self.scaling = scaling
self.feature_selection = feature_selection
self.models = models
# assign objective parameters
self._X = _X
self._y = _y
self.model_name = model_name
self.task_mode = task_mode
self.objective = objective
self.validation = validation
self.valid_size = valid_size
self.full_status = full_status
self.reset_index = reset_index
self.timeout = timeout
self._iter = _iter
self.seed = seed
if isinstance(self._X, pd.DataFrame):
self.dict2config(config)
def step(self):
# try:
# self.status_dict = self._objective()
# except:
# warnings.warn("Objective not finished due to timeout.")
# if self.full_status:
# self.status_dict = {
# "encoder": self._encoder,
# "encoder_hyperparameter": self._encoder_hyper,
# "imputer": self._imputer,
# "imputer_hyperparameter": self._imputer_hyper,
# "balancing": self._balancing,
# "balancing_hyperparameter": self._balancing_hyper,
# "scaling": self._scaling,
# "scaling_hyperparameter": self._scaling_hyper,
# "feature_selection": self._feature_selection,
# "feature_selection_hyperparameter": self._feature_selection_hyper,
# "model": self._model,
# "model_hyperparameter": self._model_hyper,
# "training_status": "not fitted",
# "status": "TIMEOUT",
# }
# else:
# self.status_dict = {
# "training_status": "not fitted",
# "status": "TIMEOUT",
# }
self.status_dict = self._objective()
return self.status_dict
def reset_config(self, new_config):
self.dict2config(new_config)
return True
# convert dict hyperparameter to actual classes
def dict2config(self, params):
# pipeline of objective, [encoder, imputer, balancing, scaling, feature_selection, model]
# select encoder and set hyperparameters
# issue 1: https://github.com/PanyiDong/My_AutoML/issues/1
# HyperOpt hyperparameter space conflicts with ray.tune
# while setting hyperparameters space,
# the method name is injected into the hyperparameter space
# so, before fitting, these indications are removed
# must have encoder
self._encoder_hyper = params["encoder"].copy()
# find corresponding encoder key
for key in self._encoder_hyper.keys():
if "encoder_" in key:
_encoder_key = key
break
self._encoder = self._encoder_hyper[_encoder_key]
del self._encoder_hyper[_encoder_key]
# remvoe indcations
self._encoder_hyper = {
k.replace(self._encoder + "_", ""): self._encoder_hyper[k]
for k in self._encoder_hyper
}
self.enc = self.encoder[self._encoder](**self._encoder_hyper)
# select imputer and set hyperparameters
self._imputer_hyper = params["imputer"].copy()
# find corresponding imputer key
for key in self._imputer_hyper.keys():
if "imputer_" in key:
_imputer_key = key
break
self._imputer = self._imputer_hyper[_imputer_key]
del self._imputer_hyper[_imputer_key]
# remvoe indcations
self._imputer_hyper = {
k.replace(self._imputer + "_", ""): self._imputer_hyper[k]
for k in self._imputer_hyper
}
self.imp = self.imputer[self._imputer](**self._imputer_hyper)
# select balancing and set hyperparameters
# must have balancing, since no_preprocessing is included
self._balancing_hyper = params["balancing"].copy()
# find corresponding balancing key
for key in self._balancing_hyper.keys():
if "balancing_" in key:
_balancing_key = key
break
self._balancing = self._balancing_hyper[_balancing_key]
del self._balancing_hyper[_balancing_key]
# remvoe indcations
self._balancing_hyper = {
k.replace(self._balancing + "_", ""): self._balancing_hyper[k]
for k in self._balancing_hyper
}
self.blc = self.balancing[self._balancing](**self._balancing_hyper)
# select scaling and set hyperparameters
# must have scaling, since no_preprocessing is included
self._scaling_hyper = params["scaling"].copy()
# find corresponding scaling key
for key in self._scaling_hyper.keys():
if "scaling_" in key:
_scaling_key = key
break
self._scaling = self._scaling_hyper[_scaling_key]
del self._scaling_hyper[_scaling_key]
# remvoe indcations
self._scaling_hyper = {
k.replace(self._scaling + "_", ""): self._scaling_hyper[k]
for k in self._scaling_hyper
}
self.scl = self.scaling[self._scaling](**self._scaling_hyper)
# select feature selection and set hyperparameters
# must have feature selection, since no_preprocessing is included
self._feature_selection_hyper = params["feature_selection"].copy()
# find corresponding feature_selection key
for key in self._feature_selection_hyper.keys():
if "feature_selection_" in key:
_feature_selection_key = key
break
self._feature_selection = self._feature_selection_hyper[_feature_selection_key]
del self._feature_selection_hyper[_feature_selection_key]
# remvoe indcations
self._feature_selection_hyper = {
k.replace(self._feature_selection + "_", ""): self._feature_selection_hyper[
k
]
for k in self._feature_selection_hyper
}
self.fts = self.feature_selection[self._feature_selection](
**self._feature_selection_hyper
)
# select model model and set hyperparameters
# must have a model
self._model_hyper = params["model"].copy()
# find corresponding model key
for key in self._model_hyper.keys():
if "model_" in key:
_model_key = key
break
self._model = self._model_hyper[_model_key]
del self._model_hyper[_model_key]
# remvoe indcations
self._model_hyper = {
k.replace(self._model + "_", ""): self._model_hyper[k]
for k in self._model_hyper
}
self.mol = self.models[self._model](
**self._model_hyper
) # call the model using passed parameters
# obj_tmp_directory = self.temp_directory # + "/iter_" + str(self._iter + 1)
# if not os.path.isdir(obj_tmp_directory):
# os.makedirs(obj_tmp_directory)
# with open(obj_tmp_directory + "/hyperparameter_settings.txt", "w") as f:
# if already exists, use append mode
# else, write mode
if not os.path.exists("hyperparameter_settings.txt"):
write_type = "w"
else:
write_type = "a"
with open("hyperparameter_settings.txt", write_type) as f:
f.write("Encoding method: {}\n".format(self._encoder))
f.write("Encoding Hyperparameters:")
print(self._encoder_hyper, file=f, end="\n\n")
f.write("Imputation method: {}\n".format(self._imputer))
f.write("Imputation Hyperparameters:")
print(self._imputer_hyper, file=f, end="\n\n")
f.write("Balancing method: {}\n".format(self._balancing))
f.write("Balancing Hyperparameters:")
print(self._balancing_hyper, file=f, end="\n\n")
f.write("Scaling method: {}\n".format(self._scaling))
f.write("Scaling Hyperparameters:")
print(self._scaling_hyper, file=f, end="\n\n")
f.write("Feature Selection method: {}\n".format(self._feature_selection))
f.write("Feature Selection Hyperparameters:")
print(self._feature_selection_hyper, file=f, end="\n\n")
f.write("Model: {}\n".format(self._model))
f.write("Model Hyperparameters:")
print(self._model_hyper, file=f, end="\n\n")
def save_checkpoint(self, tmp_checkpoint_dir):
checkpoint_path = os.path.join(tmp_checkpoint_dir, "status.json")
with open(checkpoint_path, "w") as out_f:
json.dump(self.status_dict, out_f)
return tmp_checkpoint_dir
def load_checkpoint(self, tmp_checkpoint_dir):
checkpoint_path = os.path.join(tmp_checkpoint_dir, "status.json")
with open(checkpoint_path, "r") as inp_f:
self.status_dict = json.load(inp_f)
# # wrapped timeout decorator
# def wrap_timeout(f):
# def wrapper(*args):
# timeout(args[0].timeout)
# return f(*args)
# return wrapper
# # actual objective function
# @wrap_timeout
@ignore_warnings(category=ConvergenceWarning)
def _objective(
self,
):
# different evaluation metrics for classification and regression
# notice: if add metrics that is larger the better, need to add - sign
# at actual fitting process below (since try to minimize the loss)
if self.task_mode == "regression":
# evaluation for predictions
if self.objective == "MSE":
from sklearn.metrics import mean_squared_error
_obj = mean_squared_error
elif self.objective == "MAE":
from sklearn.metrics import mean_absolute_error
_obj = mean_absolute_error
elif self.objective == "MSLE":
from sklearn.metrics import mean_squared_log_error
_obj = mean_squared_log_error
elif self.objective == "R2":
from sklearn.metrics import r2_score
_obj = r2_score
elif self.objective == "MAX":
from sklearn.metrics import (
max_error,
) # focus on reducing extreme losses
_obj = max_error
elif isinstance(self.objective, Callable):
# if callable, use the callable
_obj = self.objective
else:
raise ValueError(
'Mode {} only support ["MSE", "MAE", "MSLE", "R2", "MAX", callable], get{}'.format(
self.task_mode, self.objective
)
)
elif self.task_mode == "classification":
# evaluation for predictions
if self.objective == "accuracy":
from sklearn.metrics import accuracy_score
_obj = accuracy_score
elif self.objective == "precision":
from sklearn.metrics import precision_score
_obj = precision_score
elif self.objective == "auc":
from sklearn.metrics import roc_auc_score
_obj = roc_auc_score
elif self.objective == "hinge":
from sklearn.metrics import hinge_loss
_obj = hinge_loss
elif self.objective == "f1":
from sklearn.metrics import f1_score
_obj = f1_score
elif isinstance(self.objective, Callable):
# if callable, use the callable
_obj = self.objective
else:
raise ValueError(
'Mode {} only support ["accuracy", "precision", "auc", "hinge", "f1", callable], get{}'.format(
self.task_mode, self.objective
)
)
if self.validation:
# only perform train_test_split when validation
# train test split so the performance of model selection and
# hyperparameter optimization can be evaluated
X_train, X_test, y_train, y_test = train_test_split(
self._X, self._y, test_perc=self.valid_size, seed=self.seed
)
if self.reset_index:
# reset index to avoid indexing order error
X_train.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
y_test.reset_index(drop=True, inplace=True)
_X_train_obj, _X_test_obj = X_train.copy(), X_test.copy()
_y_train_obj, _y_test_obj = y_train.copy(), y_test.copy()
# encoding
_X_train_obj = self.enc.fit(_X_train_obj)
_X_test_obj = self.enc.refit(_X_test_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_train_obj = self.imp.fill(_X_train_obj)
_X_test_obj = self.imp.fill(_X_test_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_train_obj, _y_train_obj = self.blc.fit_transform(
_X_train_obj, _y_train_obj
)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Balancing finished, in scaling process.")
# make sure the classes are integers (belongs to certain classes)
if self.task_mode == "classification":
_y_train_obj = _y_train_obj.astype(int)
_y_test_obj = _y_test_obj.astype(int)
# scaling
self.scl.fit(_X_train_obj, _y_train_obj)
_X_train_obj = self.scl.transform(_X_train_obj)
_X_test_obj = self.scl.transform(_X_test_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Scaling finished, in feature selection process.")
# feature selection
self.fts.fit(_X_train_obj, _y_train_obj)
_X_train_obj = self.fts.transform(_X_train_obj)
_X_test_obj = self.fts.transform(_X_test_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write(
"Feature selection finished, in {} model.".format(self.task_mode)
)
# fit model
if scipy.sparse.issparse(_X_train_obj): # check if returns sparse matrix
_X_train_obj = _X_train_obj.toarray()
if scipy.sparse.issparse(_X_test_obj):
_X_test_obj = _X_test_obj.toarray()
# store the preprocessed train/test datasets
if isinstance(_X_train_obj, np.ndarray): # in case numpy array is returned
pd.concat(
[pd.DataFrame(_X_train_obj), _y_train_obj],
axis=1,
ignore_index=True,
).to_csv("train_preprocessed.csv", index=False)
elif isinstance(_X_train_obj, pd.DataFrame):
pd.concat([_X_train_obj, _y_train_obj], axis=1).to_csv(
"train_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
if isinstance(_X_test_obj, np.ndarray):
pd.concat(
[pd.DataFrame(_X_test_obj), _y_test_obj],
axis=1,
ignore_index=True,
).to_csv("test_preprocessed.csv", index=False)
elif isinstance(_X_test_obj, pd.DataFrame):
pd.concat([_X_test_obj, _y_test_obj], axis=1).to_csv(
"test_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
self.mol.fit(_X_train_obj, _y_train_obj.values.ravel())
os.remove("objective_process.txt")
y_pred = self.mol.predict(_X_test_obj)
if self.objective in [
"R2",
"accuracy",
"precision",
"auc",
"hinge",
"f1",
]:
# special treatment for ["R2", "accuracy", "precision", "auc", "hinge", "f1"]
# larger the better, since to minimize, add negative sign
_loss = -_obj(_y_test_obj.values, y_pred)
else:
_loss = _obj(_y_test_obj.values, y_pred)
# save the fitted model objects
save_methods(
self.model_name,
[self.enc, self.imp, self.blc, self.scl, self.fts, self.mol],
)
with open("testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {:.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
# since we tries to minimize the objective function, take negative accuracy here
if self.full_status:
# tune.report(
# encoder=_encoder,
# encoder_hyperparameter=_encoder_hyper,
# imputer=_imputer,
# imputer_hyperparameter=_imputer_hyper,
# balancing=_balancing,
# balancing_hyperparameter=_balancing_hyper,
# scaling=_scaling,
# scaling_hyperparameter=_scaling_hyper,
# feature_selection=_feature_selection,
# feature_selection_hyperparameter=_feature_selection_hyper,
# model=_model,
# model_hyperparameter=_model_hyper,
# fitted_model=_model,
# training_status="fitted",
# loss=_loss,
# )
# only for possible checks
return {
"encoder": self._encoder,
"encoder_hyperparameter": self._encoder_hyper,
"imputer": self._imputer,
"imputer_hyperparameter": self._imputer_hyper,
"balancing": self._balancing,
"balancing_hyperparameter": self._balancing_hyper,
"scaling": self._scaling,
"scaling_hyperparameter": self._scaling_hyper,
"feature_selection": self._feature_selection,
"feature_selection_hyperparameter": self._feature_selection_hyper,
"model": self._model,
"model_hyperparameter": self._model_hyper,
"fitted_model": self._model,
"training_status": "fitted",
"loss": _loss,
}
else:
# tune.report(
# fitted_model=_model,
# training_status="fitted",
# loss=_loss,
# )
# only for possible checks
return {
"fitted_model": self._model,
"training_status": "fitted",
"loss": _loss,
}
else:
_X_obj = self._X.copy()
_y_obj = self._y.copy()
# encoding
_X_obj = self.enc.fit(_X_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Encoding finished, in imputation process.")
# imputer
_X_obj = self.imp.fill(_X_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Imputation finished, in scaling process.")
# balancing
_X_obj, _y_obj = self.blc.fit_transform(_X_obj, _y_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Balancing finished, in feature selection process.")
# scaling
self.scl.fit(_X_obj, _y_obj)
_X_obj = self.scl.transform(_X_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write("Scaling finished, in balancing process.")
# feature selection
self.fts.fit(_X_obj, _y_obj)
_X_obj = self.fts.transform(_X_obj)
# with open(obj_tmp_directory + "/objective_process.txt", "w") as f:
with open("objective_process.txt", "w") as f:
f.write(
"Feature selection finished, in {} model.".format(self.task_mode)
)
# fit model
if scipy.sparse.issparse(_X_obj): # check if returns sparse matrix
_X_obj = _X_obj.toarray()
# store the preprocessed train/test datasets
if isinstance(_X_obj, np.ndarray): # in case numpy array is returned
pd.concat(
[pd.DataFrame(_X_obj), _y_obj],
axis=1,
ignore_index=True,
).to_csv("train_preprocessed.csv", index=False)
elif isinstance(_X_obj, pd.DataFrame):
pd.concat([_X_obj, _y_obj], axis=1).to_csv(
"train_preprocessed.csv", index=False
)
else:
raise TypeError("Only accept numpy array or pandas dataframe!")
self.mol.fit(_X_obj, _y_obj.values.ravel())
os.remove("objective_process.txt")
y_pred = self.mol.predict(_X_obj)
if self.objective in [
"R2",
"accuracy",
"precision",
"auc",
"hinge",
"f1",
]:
# special treatment for ["R2", "accuracy", "precision", "auc", "hinge", "f1"]
# larger the better, since to minimize, add negative sign
_loss = -_obj(_y_obj.values, y_pred)
else:
_loss = _obj(_y_obj.values, y_pred)
# save the fitted model objects
save_methods(
self.model_name,
[self.enc, self.imp, self.blc, self.scl, self.fts, self.mol],
)
# with open(obj_tmp_directory + "/testing_objective.txt", "w") as f:
with open("testing_objective.txt", "w") as f:
f.write("Loss from objective function is: {:.6f}\n".format(_loss))
f.write("Loss is calculate using {}.".format(self.objective))
self._iter += 1
if self.full_status:
# tune.report(
# encoder=_encoder,
# encoder_hyperparameter=_encoder_hyper,
# imputer=_imputer,
# imputer_hyperparameter=_imputer_hyper,
# balancing=_balancing,
# balancing_hyperparameter=_balancing_hyper,
# scaling=_scaling,
# scaling_hyperparameter=_scaling_hyper,
# feature_selection=_feature_selection,
# feature_selection_hyperparameter=_feature_selection_hyper,
# model=_model,
# model_hyperparameter=_model_hyper,
# fitted_model=_model,
# training_status="fitted",
# loss=_loss,
# )
# only for possible checks
return {
"encoder": self._encoder,
"encoder_hyperparameter": self._encoder_hyper,
"imputer": self._imputer,
"imputer_hyperparameter": self._imputer_hyper,
"balancing": self._balancing,
"balancing_hyperparameter": self._balancing_hyper,
"scaling": self._scaling,
"scaling_hyperparameter": self._scaling_hyper,
"feature_selection": self._feature_selection,
"feature_selection_hyperparameter": self._feature_selection_hyper,
"model": self._model,
"model_hyperparameter": self._model_hyper,
"fitted_model": self._model,
"training_status": "fitted",
"loss": _loss,
}
else:
# tune.report(
# fitted_model=_model,
# training_status="fitted",
# loss=_loss,
# )
# only for possible checks
return {
"fitted_model": self._model,
"training_status": "fitted",
"loss": _loss,
}
|
PanyiDong/AutoML | tests/test_utils/test_utils.py | """
File: test_utils.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_utils/test_utils.py
File Created: Friday, 15th April 2022 7:42:15 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 8:10:40 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import numpy as np
import pandas as pd
def test_load_data():
from My_AutoML import load_data
data = load_data().load("Appendix", "insurance")
assert isinstance(
data, dict
), "load_data should return a dict database, get {}".format(type(data))
assert isinstance(
data["insurance"], pd.DataFrame
), "load_data should return a dict database containing dataframes, get {}".format(
type(data["insurance"])
)
def test_random_guess():
from My_AutoML._utils._base import random_guess
assert random_guess(1) == 1, "random_guess(1) should be 1, get {}".format(
random_guess(1)
)
assert random_guess(0) == 0, "random_guess(0) should be 0, get {}".format(
random_guess(0)
)
assert (
random_guess(0.5) == 0 or random_guess(0.5) == 1
), "random_guess(0.5) should be either 0 or 1, get {}".format(random_guess(0.5))
def test_random_index():
from My_AutoML._utils._base import random_index
assert (
np.sort(random_index(5)) == np.array([0, 1, 2, 3, 4])
).all(), "random_index(5) should contain [0, 1, 2, 3, 4], get {}".format(
random_index(5)
)
def test_random_list():
from My_AutoML._utils._base import random_list
assert (
np.sort(random_list([0, 1, 2, 3, 4])) == np.array([0, 1, 2, 3, 4])
).all(), "random_index(5) should contain [0, 1, 2, 3, 4], get {}".format(
random_list([0, 1, 2, 3, 4])
)
def test_is_date():
from My_AutoML._utils._base import is_date
test = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [
"2020-01-01",
"2020-01-02",
"2020-01-03",
"2020-01-04",
"2020-01-05",
],
}
)
assert is_date(test, rule="all"), "The is_date method is not correctly done."
def test_feature_rounding():
from My_AutoML._utils._base import feature_rounding
test = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [1.2, 2.2, 3.2, 4.2, 5.2],
}
)
target_data = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [1.0, 2.0, 3.0, 4.0, 5.0],
}
)
assert (
feature_rounding(test) == target_data
).all().all() == True, "The feature_rounding method is not correctly done."
def test_timer():
from My_AutoML._utils._base import Timer
import time
timer = Timer()
timer.start()
time.sleep(4)
timer.stop()
timer.start()
time.sleep(3)
timer.stop()
assert timer.sum() / timer.avg() == 2.0, "The timer is not correctly done."
assert timer.cumsum()[-1] == timer.sum(), "The timer is not correctly done."
def test_minloc():
from My_AutoML._utils._base import minloc
assert (
minloc([4, 2, 6, 2, 1]) == 4
), "minloc([4, 2, 6, 2, 1]) should be 5, get {}".format(minloc([4, 2, 6, 2, 1]))
def test_maxloc():
from My_AutoML._utils._base import maxloc
assert (
maxloc([4, 2, 6, 2, 1]) == 2
), "maxloc([4, 2, 6, 2, 1]) should be 5, get {}".format(maxloc([4, 2, 6, 2, 1]))
def test_True_index():
from My_AutoML._utils._base import True_index
assert True_index([True, False, 1, 0, "hello", 5]) == [
0,
2,
], "True_index([True, False, 1, 0, 'hello', 5]) should be [0, 2], get {}".format(
True_index([True, False, 1, 0, "hello", 5])
)
def test_type_of_script():
from My_AutoML._utils._base import type_of_script
assert (
type_of_script() == "terminal"
), "type_of_script() should be 'terminal', get {}".format(type_of_script())
def test_as_dataframe():
from My_AutoML._utils._data import as_dataframe
converter = as_dataframe()
_array = converter.to_array(pd.DataFrame([1, 2, 3, 4]))
_df = converter.to_df(_array)
assert isinstance(
_array, np.ndarray
), "as_dataframe.to_array should return a np.ndarray, get {}".format(type(_array))
assert isinstance(
_df, pd.DataFrame
), "as_dataframe.to_df should return a pd.DataFrame, get {}".format(type(_df))
def test_unify_nan():
from My_AutoML._utils._data import unify_nan
data = np.arange(15).reshape(5, 3)
data = pd.DataFrame(data, columns=["column_1", "column_2", "column_3"])
data.loc[:, "column_1"] = "novalue"
data.loc[3, "column_2"] = "None"
target_data = pd.DataFrame(
{
"column_1": ["novalue", "novalue", "novalue", "novalue", "novalue"],
"column_2": [1, 4, 7, "None", 13],
"column_3": [2, 5, 8, 11, 14],
"column_1_useNA": [np.nan, np.nan, np.nan, np.nan, np.nan],
"column_2_useNA": [1, 4, 7, np.nan, 13],
}
)
assert (
(unify_nan(data).astype(str) == target_data.astype(str)).all().all()
), "unify_nan should return target dataframe {}, get {}".format(
target_data, unify_nan(data)
)
def test_remove_index_columns():
from My_AutoML._utils._data import remove_index_columns
data = pd.DataFrame(
{
"col_1": [1, 1, 1, 1, 1],
"col_2": [1, 2, 3, 4, 5],
"col_3": [1, 2, 3, 4, 5],
"col_4": [1, 2, 3, 4, 5],
"col_5": [1, 2, 3, 4, 5],
}
)
remove_data_0 = remove_index_columns(data.values, axis=0, threshold=0.8)
remove_data_1 = remove_index_columns(data, axis=1, threshold=0.8, save=True)
assert isinstance(
remove_data_0, pd.DataFrame
), "remove_index_columns should return a pd.DataFrame, get {}".format(
type(remove_data_0)
)
assert isinstance(
remove_data_1, pd.DataFrame
), "remove_index_columns should return a pd.DataFrame, get {}".format(
type(remove_data_1)
)
remove_data_0 = remove_index_columns(
data, axis=0, threshold=[0.8, 0.8, 0.8, 0.8, 0.8]
)
remove_data_1 = remove_index_columns(
data, axis=1, threshold=[0.8, 0.8, 0.8, 0.8, 0.8], save=True
)
assert isinstance(
remove_data_0, pd.DataFrame
), "remove_index_columns should return a pd.DataFrame, get {}".format(
type(remove_data_0)
)
assert isinstance(
remove_data_1, pd.DataFrame
), "remove_index_columns should return a pd.DataFrame, get {}".format(
type(remove_data_1)
)
def test_nan_cov():
from My_AutoML._utils._stat import nan_cov
assert (
nan_cov(pd.DataFrame([4, 5, 6, np.nan, 1, np.nan]))[0, 0] == 2.8
), "nan_cov returns not as expected."
def test_class_means():
from My_AutoML._utils._stat import class_means
X = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [1, 2, 3, 4, 5],
}
)
y = pd.Series([1, 1, 1, 0, 0])
assert isinstance(
class_means(X, y), list
), "class_means should return a list, get {}".format(type(class_means(X, y)))
def test_empirical_covariance():
from My_AutoML._utils import empirical_covariance
cov = empirical_covariance(10 * np.random.random(size=(10, 10)))
assert isinstance(
cov, np.ndarray
), "empirical_covariance should return a np.ndarray, get {}".format(type(cov))
def test_class_cov():
from My_AutoML._utils._stat import class_cov
X = np.arange(10)
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
cov = class_cov(X, y, priors=[0.2, 0.8])
assert isinstance(
cov, np.ndarray
), "class_cov should return a numpy array, get {}".format(type(cov))
def test_MI():
from My_AutoML._utils._stat import MI
X = pd.DataFrame(np.arange(20).reshape(10, 2), columns=["X_1", "X_2"])
y = pd.DataFrame(np.random.randint(0, 2, size=(10, 2)), columns=["y_1", "y_2"])
mi = MI(X, y)
assert len(mi) == 2, "MI should return a list of length 2, get {}".format(len(mi))
def test_t_score():
from My_AutoML._utils._stat import t_score
X = pd.DataFrame(np.arange(20).reshape(10, 2), columns=["X_1", "X_2"])
y = pd.DataFrame(np.random.randint(0, 2, size=(10, 2)), columns=["y_1", "y_2"])
score = t_score(X, y)
fvalue, pvalue = t_score(X, y, pvalue=True)
assert len(score) == 2, "t_score should return a list of length 2, get {}".format(
len(score)
)
def test_ANOVA():
from My_AutoML._utils._stat import ANOVA
X = pd.DataFrame(np.arange(20).reshape(10, 2), columns=["X_1", "X_2"])
y = pd.DataFrame(np.random.randint(0, 5, size=(10, 2)), columns=["y_1", "y_2"])
score = ANOVA(X, y)
fvalue, pvalue = ANOVA(X, y, pvalue=True)
assert len(score) == 2, "ANOVA should return a list of length 2, get {}".format(
len(score)
)
def test_get_algo():
from My_AutoML._utils._optimize import get_algo
get_algo("GridSearch")
get_algo("HyperOpt")
get_algo("Repeater")
get_algo("ConcurrencyLimiter")
try:
get_algo("AxSearch")
except ImportError:
pass
try:
get_algo("BlendSearch")
except ImportError:
pass
try:
get_algo("CFO")
except ImportError:
pass
try:
get_algo("HEBO")
except ImportError:
pass
try:
get_algo("Nevergrad")
except ImportError:
pass
get_algo(get_algo)
assert True, "The get_algo method is not correctly done."
def test_get_scheduler():
from My_AutoML._utils._optimize import get_scheduler
get_scheduler("FIFOScheduler")
get_scheduler("ASHAScheduler")
get_scheduler("HyperBandScheduler")
get_scheduler("MedianStoppingRule")
get_scheduler("PopulationBasedTraining")
get_scheduler("PopulationBasedTrainingReplay")
try:
get_scheduler("PB2")
except ImportError:
pass
try:
get_scheduler("HyperBandForBOHB")
except ImportError:
pass
get_scheduler(get_scheduler)
assert True, "The get_scheduler method is not correctly done."
def test_get_progress_reporter():
from My_AutoML._utils._optimize import get_progress_reporter
get_progress_reporter("CLIReporter", max_evals=64, max_error=4)
get_progress_reporter("JupyterNotebookReporter", max_evals=64, max_error=4)
def test_get_logger():
from My_AutoML._utils._optimize import get_logger
get_logger(["Logger", "TBX", "JSON", "CSV", "MLflow"])
def test_save_model():
from My_AutoML._utils._file import save_model
save_model(
"encoder",
"encoder_hyperparameters",
"imputer",
"imputer_hyperparameters",
"balancing",
"balancing_hyperparameters",
"scaling",
"scaling_hyperparameters",
"feature_selection",
"feature_selection_hyperparameters",
"model",
"model_hyperparameters",
"model_name",
)
assert os.path.exists("model_name") == True, "The model is not saved."
def test_formatting():
from My_AutoML._utils._data import formatting
data = pd.read_csv("Appendix/insurance.csv")
train = data.iloc[100:, :]
test = data.iloc[:100, :]
formatter = formatting()
formatter.fit(train)
formatter.refit(train)
formatter.refit(test)
assert True, "The formatting is not correctly done."
def test_get_missing_matrix():
from My_AutoML._utils._data import get_missing_matrix
test = pd.DataFrame(
{
"col_1": [1, 2, 3, np.nan, 4, "NA"],
"col_2": [7, "novalue", "none", 10, 11, None],
"col_3": [
np.nan,
"3/12/2000",
"3/13/2000",
np.nan,
"3/12/2000",
"3/13/2000",
],
}
)
test["col_3"] = pd.to_datetime(test["col_3"])
target_test = pd.DataFrame(
{
"col_1": [0, 0, 0, 1, 0, 1],
"col_2": [0, 1, 1, 0, 0, 1],
"col_3": [1, 0, 0, 1, 0, 0],
}
)
assert (
(get_missing_matrix(test) == target_test).all().all()
), "The missing matrix is not correct."
def test_extremeclass():
from My_AutoML._utils._data import ExtremeClass
cutter = ExtremeClass(extreme_threshold=0.9)
test = pd.DataFrame(
np.random.randint(0, 10, size=(100, 10)),
columns=["col_" + str(i) for i in range(10)],
)
test = cutter.cut(test)
assert True, "The extreme class is not correctly done."
def test_assign_classes():
from My_AutoML._utils._data import assign_classes
test = [[0.9, 0.1], [0.2, 0.8]]
assert (
assign_classes(test) == np.array([0, 1])
).all(), "The classes are not correctly assigned."
def test_has_method():
from My_AutoML._utils._base import has_method
from sklearn.linear_model import LogisticRegression
mol = LogisticRegression()
assert has_method(mol, "fit") == True, "The has_method function is not correct."
assert has_method(mol, "__fit") == False, "The has_method function is not correct."
def test_neg_metrics():
from My_AutoML._utils._stat import (
neg_R2,
neg_accuracy,
neg_precision,
neg_auc,
neg_hinge,
neg_f1,
)
from sklearn.metrics import (
r2_score,
accuracy_score,
precision_score,
roc_auc_score,
hinge_loss,
f1_score,
)
y_true = np.random.randint(0, 2, size=(100,))
y_pred = np.random.randint(0, 2, size=(100,))
assert neg_R2(y_true, y_pred) == -1 * r2_score(
y_true, y_pred
), "The neg_R2 function is not correct."
assert neg_accuracy(y_true, y_pred) == -1 * accuracy_score(
y_true, y_pred
), "The neg_accuracy function is not correct."
assert neg_precision(y_true, y_pred) == -1 * precision_score(
y_true, y_pred
), "The neg_precision function is not correct."
assert neg_auc(y_true, y_pred) == -1 * roc_auc_score(
y_true, y_pred
), "The neg_auc function is not correct."
assert neg_hinge(y_true, y_pred) == -1 * hinge_loss(
y_true, y_pred
), "The neg_hinge function is not correct."
assert neg_f1(y_true, y_pred) == -1 * f1_score(
y_true, y_pred
), "The neg_f1 function is not correct."
def test_get_estimator():
from My_AutoML._utils._optimize import get_estimator
from sklearn.linear_model import LinearRegression
from My_AutoML._utils._base import has_method
test_list = [
"Lasso",
"Ridge",
"ExtraTreeRegressor",
"RandomForestRegressor",
"LogisticRegression",
"ExtraTreeClassifier",
"RandomForestClassifier",
LinearRegression,
]
for item in test_list:
estimator = get_estimator(item)
assert has_method(estimator, "fit") and has_method(
estimator, "predict"
), "The estimator is not correctly called."
def test_get_metrics():
from My_AutoML._utils._optimize import get_metrics
from sklearn.metrics import accuracy_score
from typing import Callable
test_list = [
"neg_accuracy",
"accuracy",
"neg_precision",
"precision",
"neg_auc",
"auc",
"neg_hinge",
"hinge",
"neg_f1",
"f1",
"MSE",
"MAE",
"MSLE",
"neg_R2",
"R2",
"MAX",
accuracy_score,
]
for item in test_list:
assert isinstance(
get_metrics(item), Callable
), "The metrics are not correctly called."
def test_is_none():
from My_AutoML._utils._base import is_none
assert is_none(None) == True, "The is_none function is not correct."
assert is_none("not none") == False, "The is_none function is not correct."
def test_softmax():
from My_AutoML._utils._data import softmax
a = np.array([0.1, -0.1, 1])
assert (
softmax(a).sum(axis=1) == np.ones(3)
).all(), "The softmax function is not correct."
a = np.array([[0.1, 0.2], [0.1, -0.2], [1, 0]])
assert (
softmax(a).sum(axis=1) == np.ones(3)
).all(), "The softmax function is not correct."
|
PanyiDong/AutoML | My_AutoML/_base.py | """
File: _base.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_base.py
File Created: Friday, 8th April 2022 12:15:11 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 30th April 2022 12:49:24 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import glob
import numpy as np
import pandas as pd
import warnings
# R environment
# check if rpy2 available
# if not, can not read R type data
import importlib
rpy2_spec = importlib.util.find_spec("rpy2")
if rpy2_spec is not None:
import rpy2
import rpy2.robjects as ro
from rpy2.robjects import Formula, pandas2ri
from rpy2.robjects.conversion import localconverter
from rpy2.robjects.packages import importr
class no_processing:
"""
No processing on data, asa comparison
"""
def __init__(self):
self._fitted = False # record whether the method has been fitted
def fit(self, X, y=None):
self._fitted = True
return self
def fill(self, X):
self._fitted = True
return X
def transform(self, X):
return X
def fit_transform(self, X, y=None):
self._fitted = True
if isinstance(y, pd.DataFrame):
_empty = y.isnull().all().all()
elif isinstance(y, pd.Series):
_empty = y.isnull().all()
elif isinstance(y, np.ndarray):
_empty = np.all(np.isnan(y))
else:
_empty = y == None
if _empty:
return X
else:
return X, y
def inverse_transform(self, X):
self._fitted = False
return X
class load_data:
"""
load all data files to dict
Parameters
----------
path: path of the files to search for, can be list of paths
data_type: matching data file types, default = 'all'
supported types ('all', '.csv', '.asc', '.data', '.rda', '.rdata')
"""
def __init__(self, data_type="all"):
self.data_type = data_type
self.database = {}
def load(self, path, filename=None):
if isinstance(path, list): # add / at the end of path
path = [
(_path if (_path == "" or _path[-1] == "/") else _path + "/")
for _path in path
]
else:
path = [(path if (path == "" or path[-1] == "/") else path + "/")]
for _path in path:
self._main(_path, filename)
return self.database
def _main(self, path, filename):
# initialize path sets
_csv_files = []
_data_files = []
_rda_files = []
_rdata_files = []
# load .csv/.data files in the path
if (
self.data_type == ".csv"
or self.data_type == ".data"
or self.data_type == ".asc"
or self.data_type == "all"
):
if self.data_type == ".csv" or self.data_type == "all":
if filename == None:
_csv_files = glob.glob(path + "*.csv")
elif isinstance(filename, list):
_csv_files = []
for _filename in filename:
_csv_files += glob.glob(path + _filename + ".csv")
else:
_csv_files = glob.glob(path + filename + ".csv")
if self.data_type == ".data" or self.data_type == "all":
if filename == None:
_data_files = glob.glob(path + "*.data")
elif isinstance(filename, list):
_data_files = []
for _filename in filename:
_data_files += glob.glob(path + _filename + ".data")
else:
_data_files = glob.glob(path + filename + ".data")
if self.data_type == ".asc" or self.data_type == "all":
if filename == None:
_data_files = glob.glob(path + "*.asc")
elif isinstance(filename, list):
_data_files = []
for _filename in filename:
_data_files += glob.glob(path + _filename + ".asc")
else:
_data_files = glob.glob(path + filename + ".asc")
if not _csv_files and self.data_type == ".csv":
warnings.warn("No .csv files found!")
elif not _data_files and self.data_type == ".data":
warnings.warn("No .data file found!")
elif not _data_files and self.data_type == ".asc":
warnings.warn("No .asc file found!")
elif _csv_files + _data_files:
for _data_path in _csv_files + _data_files:
# in linux path, the path separator is '/'
# in windows path, the path separator is '\\'
# _filename = (
# _data_path.split("/")[-1]
# if "/" in _data_path
# else _data_path.split("\\")[-1]
# )
# use os.path.split for unify path separator
_filename = os.path.split(_data_path)[-1]
self.database[_filename.split(".")[0]] = pd.read_csv(_data_path)
# load .rda/.rdata files in the path
# will not read any files if rpy2 is not available
if rpy2_spec is None:
if self.data_type == ".rda" or self.data_type == ".rdata":
raise ImportError("Require rpy2 package, package not found!")
if self.data_type == "all":
pass
else:
if (
self.data_type == ".rda"
or self.data_type == ".rdata"
or self.data_type == "all"
):
if self.data_type == ".rda" or self.data_type == "all":
if filename == None:
_rda_files = glob.glob(path + "*.rda")
elif isinstance(filename, list):
_rda_files = []
for _filename in filename:
_rda_files += glob.glob(path + _filename + ".rda")
else:
_rda_files = glob.glob(path + filename + ".rda")
if self.data_type == ".rdata" or self.data_type == "all":
if filename == None:
_rdata_files = glob.glob(path + "*.rdata")
elif isinstance(filename, list):
_rdata_files = []
for _filename in filename:
_rdata_files += glob.glob(path + _filename + ".rdata")
else:
_rdata_files = glob.glob(path + filename + ".rdata")
if not _rda_files and self.data_type == ".rda":
warnings.warn("No .rda file found!")
elif not _rdata_files and self.data_type == ".rdata":
warnings.warn("No .rdata file found!")
elif _rda_files + _rdata_files:
for _data_path in _rda_files + _rdata_files:
# in linux path, the path separator is '/'
# in windows path, the path separator is '\\'
# _filename = (
# _data_path.split("/")[-1]
# if "/" in _data_path
# else _data_path.split("\\")[-1]
# )
# use os.path.split for unify path separator
_filename = os.path.split(_data_path)[-1]
ro.r('load("' + _data_path + '")')
ro.r("rdata = " + _filename.split(".")[0])
with localconverter(ro.default_converter + pandas2ri.converter):
self.database[
_filename.split(".")[0]
] = ro.conversion.rpy2py(ro.r.rdata)
if self.data_type == "all" and not self.database:
warnings.warn("No file found!")
|
PanyiDong/AutoML | tests/test_base.py | """
File: test_base.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_base.py
File Created: Sunday, 17th April 2022 6:46:02 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 30th April 2022 11:03:27 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def test_load_data():
from My_AutoML._base import load_data
import importlib
rpy2_spec = importlib.util.find_spec("rpy2")
database = load_data().load("Appendix")
database_names = [
"heart_2020_cleaned",
"Employee",
"insurance",
"Medicalpremium",
"TravelInsurancePrediction",
"healthcare-dataset-stroke-data",
"heart",
"hurricanehist",
# "credit",
]
if rpy2_spec is not None:
database_names.append("credit")
assert set(database.keys()) == set(
database_names
), "Not all databases are loaded."
else:
assert set(database.keys()) == set(
database_names
), "Not all databases are loaded."
|
PanyiDong/AutoML | My_AutoML/_balancing/_over_sampling.py | """
File: _over_sampling.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_balancing/_over_sampling.py
File Created: Wednesday, 6th April 2022 12:20:56 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 9th April 2022 11:01:48 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import warnings
import sklearn
import sklearn.utils
from My_AutoML._utils._data import is_imbalance, LinkTable
"""
Reference for: Simple Random Over Sampling, Simple Random Under Sampling, Tomek Link, \
Edited Nearest Neighbor, Condensed Nearest Neighbor, One Sided Selection, CNN_TomekLink, \
Smote, Smote_TomekLink, Smote_ENN
<NAME>., <NAME>. and <NAME>., 2004. A study of the behavior of several methods for
balancing machine learning training data. ACM SIGKDD explorations newsletter, 6(1), pp.20-29.
"""
class SimpleRandomOverSampling:
"""
Simple Random Over-Sampling
Randomly draw samples from minority class and replicate the sample
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the minority class will increase the random seed by 1
"""
def __init__(
self,
imbalance_threshold=0.9,
all=False,
max_iter=1000,
seed=1,
):
self.imbalance_threshold = imbalance_threshold
self.all = all
self.max_iter = max_iter
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty: # return balanced X and y if y is also inputted
return _data[features], _data[response]
else:
return _data
def _fit_transform(
self, X
): # using random over-sampling to balance the first imbalanced feature
features = list(X.columns)
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_minority_class = X.loc[X[_imbalanced_feature] != _majority]
X = pd.concat([X, _minority_class.sample(n=1, random_state=_seed)])
_seed += 1
_iter += 1
X = sklearn.utils.shuffle(X.reset_index(drop=True)).reset_index(drop=True)
return X
class Smote:
"""
Synthetic Minority Over-sampling Technique (Smote)
use over-sampling to generate minority class points using nearest neighbors
Parameters
----------
imbalance_threshold: determine to what extent will the data be considered as imbalanced data, default = 0.9
norm: how the distance between different samples calculated, default = 'l2'
all supported norm ['l1', 'l2']
all: whether to stop until all features are balanced, default = False
max_iter: Maximum number of iterations for over-/under-sampling, default = 1000
seed: random seed, default = 1
every random draw from the minority class will increase the random seed by 1
k: number of nearest neighbors to choose from, default = 5
the link sample will be chosen from these k nearest neighbors
generation: how to generation new sample, default = 'mean'
use link sample and random sample to generate the new sample
"""
def __init__(
self,
imbalance_threshold=0.9,
norm="l2",
all=False,
max_iter=1000,
seed=1,
k=5,
generation="mean",
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self.k = k
self.generation = generation
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
if self.all == True:
while is_imbalance(_data, self.imbalance_threshold):
_data = self._fit_transform(_data)
else:
_data = self._fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
def _fit_transform(self, X):
_imbalanced_feature, _majority = is_imbalance(
X, self.imbalance_threshold, value=True
)
_seed = self.seed
_iter = 0
while (
is_imbalance(X[[_imbalanced_feature]], self.imbalance_threshold)
and _iter <= self.max_iter
):
_minority_class = X.loc[X[_imbalanced_feature] != _majority]
_sample = _minority_class.sample(n=1, random_state=_seed)
_link_table = LinkTable(_sample, X, self.norm)
for _link_item in _link_table:
_k_nearest = [
_link_item.index(item)
for item in sorted(_link_item)[1 : (self.k + 1)]
]
_link = _k_nearest[np.random.randint(0, len(_k_nearest))]
if self.generation == "mean":
X.loc[len(X), :] = X.loc[
[_sample.index[0], X.index[_link]], :
].mean()
elif self.generation == "random":
X.loc[len(X), :] = X.loc[_sample.index, :] + np.random.rand() * (
X.loc[X.index[_link], :] - X.lox[_sample.index, :]
)
else:
raise ValueError(
'Not recognizing generation method! Should be in \
["mean", "random"], get {}'.format(
self.generation
)
)
_seed += 1
_iter += 1
return X
|
PanyiDong/AutoML | My_AutoML/_model/_xgboost.py | """
File: _xgboost.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_xgboost.py
File Created: Friday, 15th April 2022 12:19:22 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 7:13:33 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import xgboost as xgb
from xgboost import XGBClassifier, XGBRegressor
#####################################################################################################################
# XGBoost support
class XGBoost_Base:
"""
XGBoost Base model
Parameters
----------
task_type: task type, take either "classification" or regression, default = "classification"
eta: step size shrinkage used in update to prevents overfitting, default = 0.3
alias: learning_rate
gamma: minimum loss reduction required to make a further partition, default = 0
max_depth: maximum depth of a tree, default = 6
min_child_weight: minimum sum of instance weight (hessian) needed in a child, default = 1
max_delta_step: maximum delta step we allow each leaf output to be, default = 0
reg_lambda: L2 regularization term on weights, default = 1
reg_alpha: L1 regularization term on weights, default = 0
"""
def __init__(
self,
task_type="classification",
eta=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
reg_lambda=1,
reg_alpha=0,
):
self.task_type = task_type
self.eta = eta
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.reg_lambda = reg_lambda
self.reg_alpha = reg_alpha
self._fitted = False
def fit(self, X, y):
if self.task_type == "classification":
self.model = XGBClassifier(
eta=self.eta,
gamma=self.gamma,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
reg_lambda=self.reg_lambda,
reg_alpha=self.reg_alpha,
)
elif self.task_type == "regression":
self.model = XGBRegressor(
eta=self.eta,
gamma=self.gamma,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
reg_lambda=self.reg_lambda,
reg_alpha=self.reg_alpha,
)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.model.predict(X)
def predict_proba(self, X):
return self.model.predict_proba(X)
class XGBoost_Classifier(XGBoost_Base):
"""
XGBoost Classification model
Parameters
----------
eta: step size shrinkage used in update to prevents overfitting, default = 0.3
alias: learning_rate
gamma: minimum loss reduction required to make a further partition, default = 0
max_depth: maximum depth of a tree, default = 6
min_child_weight: minimum sum of instance weight (hessian) needed in a child, default = 1
max_delta_step: maximum delta step we allow each leaf output to be, default = 0
reg_lambda: L2 regularization term on weights, default = 1
reg_alpha: L1 regularization term on weights, default = 0
"""
def __init__(
self,
eta=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
reg_lambda=1,
reg_alpha=0,
):
self.eta = eta
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.reg_lambda = reg_lambda
self.reg_alpha = reg_alpha
self._fitted = False
super().__init__(
task_type="classification",
eta=self.eta,
gamma=self.gamma,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
reg_lambda=self.reg_lambda,
reg_alpha=self.reg_alpha,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class XGBoost_Regressor(XGBoost_Base):
"""
XGBoost Regression model
Parameters
----------
eta: step size shrinkage used in update to prevents overfitting, default = 0.3
alias: learning_rate
gamma: minimum loss reduction required to make a further partition, default = 0
max_depth: maximum depth of a tree, default = 6
min_child_weight: minimum sum of instance weight (hessian) needed in a child, default = 1
max_delta_step: maximum delta step we allow each leaf output to be, default = 0
reg_lambda: L2 regularization term on weights, default = 1
reg_alpha: L1 regularization term on weights, default = 0
"""
def __init__(
self,
eta=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
reg_lambda=1,
reg_alpha=0,
):
self.eta = eta
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.reg_lambda = reg_lambda
self.reg_alpha = reg_alpha
self._fitted = False
super().__init__(
task_type="regression",
eta=self.eta,
gamma=self.gamma,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
reg_lambda=self.reg_lambda,
reg_alpha=self.reg_alpha,
)
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
|
PanyiDong/AutoML | My_AutoML/_imputation/_clustering.py | <reponame>PanyiDong/AutoML<filename>My_AutoML/_imputation/_clustering.py
"""
File: _clustering.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_imputation/_clustering.py
File Created: Tuesday, 5th April 2022 11:50:19 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 2:55:25 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import warnings
from functools import partial
import multiprocessing
from multiprocessing import Pool
from My_AutoML._utils import formatting
from My_AutoML._scaling import MinMaxScale
class AAI_kNN(formatting, MinMaxScale):
"""
kNN Imputation/Neighborhood-based Collaborative Filtering with
Auto-adaptive Imputation/AutAI [1]
AutAI's main idea is to distinguish what part of the dataset may
be important for imputation.
----
[1] <NAME>., <NAME>., <NAME>. and <NAME>., 2012, October. The
efficient imputation method for neighborhood-based collaborative
filtering. In Proceedings of the 21st ACM international conference
on Information and knowledge management (pp. 684-693).
Parameters
----------
k: k nearest neighbors selected, default = 3
Odd k preferred
scaling: whether to perform scaling on the features, default = True
similarity: how to calculate similarity among rows, default = 'PCC'
support ['PCC', 'COS']
AutAI: whether to use AutAI, default = True
AutAI_tmp: whether AutAI is temporary imputation or permanent
imputation, default = True
if True (temporary), AutAI imputation will not be preserved, but
can take much longer
threads: number of threads to use, default = -1
if -1, use all threads
deep_copy: whether to deep_copy dataframe, default = False
"""
def __init__(
self,
k=3,
scaling=True,
similarity="PCC",
AutAI=True,
AutAI_tmp=True,
threads=-1,
deep_copy=False,
):
self.k = k
self.scaling = scaling
self.similarity = similarity
self.AutAI = AutAI
self.AutAI_tmp = AutAI_tmp
self.threads = threads
self.deep_copy = deep_copy
self._fitted = False # whether fitted on train set
self.train = pd.DataFrame() # store the imputed train set
# calculate Pearson Correlation Coefficient/PCC
# PCC = \sum_{i}(x_{i}-\mu_{x})(y_{i}-\mu_{y}) /
# \sqrt{\sum_{i}(x_{i}-\mu_{x})^{2}\sum_{i}(y_{i}-\mu_{y})^{2}}
def Pearson_Correlation_Coefficient(self, x, y):
# convert to numpy array
x = np.array(x)
y = np.array(y)
# get mean of x and y
u_x = np.nanmean(x)
u_y = np.nanmean(y)
# get numerator and denominator
numerator = np.nansum((x - u_x) * (y - u_y))
denominator = np.sqrt(np.nansum((x - u_x) ** 2) * np.nansum((y - u_y) ** 2))
# special case of denominator being 0
if denominator == 0:
return 1
else:
return numerator / denominator
# calculate Cosine-based similarity/COS
# COS = x * y / (|x|*|y|)
def Cosine_based_similarity(self, x, y):
# convert to numpy array
x = np.array(x)
y = np.array(y)
# get numerator and denominator
numerator = np.nansum(x * y)
denominator = np.sqrt(np.nansum(x**2) * np.nansum(y**2))
# special case of denominator being 0
if denominator == 0:
return 1
else:
return numerator / denominator
# get column values from k nearest neighbors
def _get_k_neighbors(self, test, train, column):
similarity_list = []
for index in list(train.index):
# get similarity between test and all rows in train
if self.similarity == "PCC":
similarity_list.append(
self.Pearson_Correlation_Coefficient(
test.values, train.loc[index].values
)
)
elif self.similarity == "COS":
similarity_list.append(
self.Cosine_based_similarity(test.values, train.loc[index].values)
)
# get index of k largest similarity in list
k_order = np.argsort(similarity_list)[-self.k :]
# convert similarity list order to data index
k_index = [list(train.index)[i] for i in k_order]
# get k largest similarity
k_similarity = [similarity_list[_index] for _index in k_order]
# get the k largest values in the list
k_values = [train.loc[_index, column] for _index in k_index]
return k_values, k_index, k_similarity
# AutAI imputation
def _AAI_impute(self, X, index, column):
_X = X.copy(deep=self.deep_copy)
# (index, column) gives a location of missing value
# the goal is to find and impute relatively important data
# get indexes where column values are not missing
U_a = list(_X.loc[~_X[column].isnull()].index.astype(int))
# find union of columns where both non_missing (each from above)
# and missing rows have data
T_s = [] # ultimate union of features
loc_non_missing_column = set(_X.columns[~_X.loc[index, :].isnull()])
for _index in U_a:
# get all intersection from U_a rows
non_missing_columns = set(_X.columns[~_X.loc[_index, :].isnull()])
intersection_columns = loc_non_missing_column.intersection(
non_missing_columns
)
if not T_s: # if empty
T_s = intersection_columns
else:
T_s = T_s.union(intersection_columns)
T_s = list(T_s) # convert to list
# range (U_a, T_s) considered important data
# use kNN with weight of similarity for imputation
for _column in T_s:
for _index in list(set(X[X[_column].isnull()].index) & set(U_a)):
# get kNN column values, index, and similarity
k_values, k_index, k_similarity = self._get_k_neighbors(
_X.loc[_index, :], _X.loc[~_X[_column].isnull()], _column
)
# normalize k_similarity
k_similarity = [item / sum(k_similarity) for item in k_similarity]
# get kNN row mean
k_means = [np.nanmean(_X.loc[_index, :]) for _index in k_index]
# calculate impute value
_impute = np.nanmean(_X.loc[index, :])
for i in range(self.k):
_impute += k_similarity[i] * (k_values[i] - k_means[i])
_X.loc[_index, _column] = _impute
return _X
# pool tasks on the index chunks
# every pool task works on part of the chunks
def Pool_task(self, X, index_list):
_X = X.copy(deep=self.deep_copy)
for _column in self.columns:
# get missing rows
# select in index_list and get missing rows
missing = _X.loc[index_list].loc[_X[_column].isnull()]
# make sure k is at least not larger than rows of non_missing
self.k = min(self.k, len(_X) - len(missing))
if missing.empty: # if no missing found in the column, skip
pass
else:
for _index in list(missing.index):
# if need AutAI, perform AutAI imputation first
# if fitted, no need for AutAI, directly run kNN imputation
if self.AutAI and not self._fitted:
if self.AutAI_tmp:
_X_tmp = self._AAI_impute(_X, _index, _column)
# get non-missing (determined by _column) rows
non_missing = _X_tmp.loc[~_X_tmp[_column].isnull()]
else:
_X = self._AAI_impute(_X, _index, _column)
# get non-missing (determined by _column) rows
non_missing = _X.loc[~_X[_column].isnull()]
elif not self._fitted:
# get non-missing (determined by _column) rows
non_missing = _X.loc[~_X[_column].isnull()]
# use kNN imputation for (_index, _column)
# if fitted, use imputed dataset for imputation
if not self._fitted:
k_values, _, _ = self._get_k_neighbors(
_X.loc[_index, :], non_missing, _column
)
_X.loc[_index, _column] = np.mean(k_values)
else:
k_values, _, _ = self._get_k_neighbors(
_X.loc[_index, :], self.train, _column
)
_X.loc[_index, _column] = np.mean(k_values)
# return only the working part
return _X.loc[index_list, :]
def fill(self, X):
# make sure input is a dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise TypeError("Expect a dataframe, get {}.".format(X))
_X = X.copy(deep=self.deep_copy)
# initialize columns
self.columns = list(_X.columns)
# initialize number of working threads
self.threads = (
multiprocessing.cpu_count() if self.threads == -1 else int(self.threads)
)
if _X[self.columns].isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No missing values found, no change.")
return _X
def _fill(self, X):
_X = X.copy(deep=self.deep_copy)
# convert categorical to numerical
formatter = formatting(columns=self.columns, inplace=True)
formatter.fit(_X)
# if scaling, use MinMaxScale to scale the features
if self.scaling:
scaling = MinMaxScale()
_X = scaling.fit_transform(_X)
# kNN imputation
# parallelized pool workflow
pool = Pool(processes=self.threads)
# divide indexes to evenly sized chunks
divide_index = np.array_split(list(_X.index), self.threads)
# parallelized work
pool_data = pool.map(partial(self.Pool_task, _X), divide_index)
pool.close()
pool.join()
# concat the chunks of the datset
_X = pd.concat(pool_data).sort_index()
# convert self._fitted and store self.train
self._fitted = True
# only when empty need to store
# stored train is imputed, formatted, scaled dataset
self.train = _X.copy() if self.train.empty else self.train
# if scaling, scale back
if self.scaling:
_X = scaling.inverse_transform(_X)
# convert numerical back to categorical
formatter.refit(_X)
return _X
"""
For clustering methods, if get warning of empty mean (all cluster
have nan for the feature) or reduction in number of clusters (get
empty cluster), number of cluster is too large compared to the
data size and should be decreased. However, no error will be
raised, and all observations will be correctly assigned.
"""
class KMI(formatting, MinMaxScale):
"""
KMI/K-Means Imputation[1], the idea is to incorporate K-Means
Clustering with kNN imputation
----
[1] <NAME>., <NAME>. and <NAME>., 2004, December.
Towards efficient imputation by nearest-neighbors: A clustering-based
approach. In Australasian Joint Conference on Artificial Intelligence
(pp. 513-525). Springer, Berlin, Heidelberg.
Parameters
----------
"""
def __init__(
self,
scaling=True,
):
self.scaling = scaling
raise NotImplementedError("Not implemented!")
class CMI(formatting, MinMaxScale):
"""
Clustering-based Missing Value Imputation/CMI[1], introduces the idea
of clustering observations into groups and use the kernel statistsics
to impute the missing values for the groups.
----
[1] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2008. Missing
value imputation based on data clustering. In Transactions on computational
science I (pp. 128-138). Springer, Berlin, Heidelberg.
Parameters
----------
k: number of cluster groups, default = 10
distance: metrics of calculating the distance between two rows, default = 'l2'
used for selecting clustering groups,
support ['l1', 'l2']
delta: threshold to stop k Means Clustering, default = 0
delta defined as number of group assignment changes for clustering
0 stands for best k Means Clustering
scaling: whether to use scaling before imputation, default = True
seed: random seed, default = 1
used for k group initialization
threads: number of threads to use, default = -1
if -1, use all threads
deep_copy: whether to use deep copy, default = False
"""
def __init__(
self,
k=10,
distance="l2",
delta=0,
scaling=True,
seed=1,
threads=-1,
deep_copy=False,
):
self.k = k
self.distance = distance
self.delta = delta
self.scaling = scaling
self.seed = seed
self.threads = threads
self.deep_copy = deep_copy
np.random.seed(seed=self.seed)
self._fitted = False # whether fitted on train set
self.train = pd.DataFrame() # store the imputed train set
# calculate distance between row and k group mean
# 'l1' or 'l2' Euclidean distance
def _distance(self, row, k):
if self.distance == "l2":
return np.sqrt(np.nansum((row - self.k_means[k]) ** 2))
elif self.distance == "l1":
return np.nansum(np.abs(row - self.k_means[k]))
# get the Gaussian kernel values
def _kernel(self, row1, row2):
return np.prod(
np.exp(-(((row1 - row2) / self.bandwidth) ** 2) / 2) / np.sqrt(2 * np.pi)
)
# get k_means for group k
def _get_k_means(self, data, k):
# get observations in _k group
group_data = data.loc[np.where(self.group_assign == k)[0], :]
if not group_data.empty:
return np.nanmean(group_data, axis=0)
else:
# return None array
return np.array([None])
# get group assign for the chunk of data (by index_list)
def _get_group_assign(self, data, index_list):
result = []
for _index in index_list:
# get distance between row and every k groups
distance = [self._distance(data.loc[_index, :], _k) for _k in range(self.k)]
# assign the row to closest range group
result.append(np.argsort(distance)[0])
return result
# assign clustering groups according to Euclidean distance
# only support numerical features
def _k_means_clustering(self, X):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape # number of observations
# make sure self.k is smaller than n
self.k = min(self.k, n)
# if not fitted (on train dataset), run complete k Means clustering
# else, use train k_means for clustering
if not self._fitted:
# get bandwidth for the kernel function
self.bandwidth = np.sum(
np.abs(_X[self.columns].max() - _X[self.columns].min())
)
# initialize k groups
self.group_assign = np.random.randint(0, high=self.k, size=n)
# initialize k means
self.k_means = np.empty([self.k, p])
# parallelized k means calculation
pool = Pool(processes=min(int(self.k), self.threads))
self.k_means = pool.map(partial(self._get_k_means, _X), list(range(self.k)))
pool.close()
pool.join()
# if group empty, raise warning and set new k
self.k_means = [item for item in self.k_means if item.all()]
if len(self.k_means) < self.k:
warnings.warn("Empty cluster found and removed.")
self.k = len(self.k_means)
while True:
# store the group assignment
# need deep copy, or the stored assignment will change accordingly
previous_group_assign = self.group_assign.copy()
# assign each observation to new group based on k_means
pool = Pool(processes=self.threads)
divide_index = np.array_split(list(_X.index), self.threads)
self.group_assign = pool.map(
partial(self._get_group_assign, _X), divide_index
)
# flatten 2d list to 1d
self.group_assign = np.array(np.concatenate(self.group_assign).flat)
pool.close()
pool.join()
# calculate the new k_means
# parallelized k means calculation
pool = Pool(processes=min(int(self.k), self.threads))
self.k_means = pool.map(
partial(self._get_k_means, _X), list(range(self.k))
)
pool.close()
pool.join()
# if group empty, raise warning and set new k
self.k_means = [item for item in self.k_means if item.all()]
if len(self.k_means) < self.k:
warnings.warn("Empty cluster found and removed.")
self.k = len(self.k_means)
# if k Means constructed, break the loop
if (
np.sum(np.abs(previous_group_assign - self.group_assign))
<= self.delta
):
break
else:
# copy the train group assignment
self.group_assign_train = self.group_assign.copy()
self.group_assign = np.zeros(
n
) # re-initialize the group_assign (n may change)
# assign each observation to new group based on k_means
pool = Pool(processes=self.threads)
divide_index = np.array_split(list(_X.index), self.threads)
self.group_assign = pool.map(
partial(self._get_group_assign, _X), divide_index
)
# flatten 2d list to 1d
self.group_assign = np.array(np.concatenate(self.group_assign).flat)
pool.close()
pool.join()
# pool tasks on the column chunks
# every pool task works on part of the chunks
def Pool_task(self, X, _column, non_missing_index, n, index_list):
_X = X.copy(deep=self.deep_copy)
# find missing indexes belongs to _k group
for _index in index_list:
if not self._fitted:
# get the kernel values
kernel = np.array(
[
self._kernel(
_X.loc[_index, _X.columns != _column],
_X.loc[__index, _X.columns != _column],
)
for __index in non_missing_index
]
)
# impute the missing_values
_X.loc[_index, _column] = np.sum(
_X.loc[non_missing_index, _column] * kernel
) / (np.sum(kernel) + n ** (-2))
else:
# get the kernel values
kernel = np.array(
[
self._kernel(
_X.loc[_index, _X.columns != _column],
self.train.loc[__index, self.train.columns != _column],
)
for __index in non_missing_index
]
)
# impute the missing_values
_X.loc[_index, _column] = np.sum(
self.train.loc[non_missing_index, _column] * kernel
) / (np.sum(kernel) + n ** (-2))
# return group data
return _X.loc[index_list, _column]
def fill(self, X):
# make sure input is a dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise TypeError("Expect a dataframe, get {}.".format(type(X)))
_X = X.copy(deep=self.deep_copy)
# initialize columns
self.columns = list(_X.columns)
# initialize number of working threads
self.threads = (
multiprocessing.cpu_count() if self.threads == -1 else int(self.threads)
)
if _X[self.columns].isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No missing values found, no change.")
return _X
def _fill(self, X):
_X = X.copy(deep=self.deep_copy)
# convert categorical to numerical
formatter = formatting(columns=self.columns, inplace=True)
formatter.fit(_X)
# if scaling, use MinMaxScale to scale the features
if self.scaling:
scaling = MinMaxScale()
_X = scaling.fit_transform(_X)
# imputation on formatted, scaled datasets
# assign observations to self.k groups
# get self.group_assign and self.k_means
# if already fitted (working on test data now), get
# k_means from train dataset and the group assignment
# for train dataset
self._k_means_clustering(_X)
for _column in self.columns:
for _k in range(self.k):
group_index = np.where(self.group_assign == _k)[0]
n = len(group_index) # number of observations in the group
# get the missing/non-missing indexes
missing_index = list(
set(_X[_X[_column].isnull()].index) & set(group_index)
)
if not self._fitted: # if not fitted, use _X
non_missing_index = list(
set(_X[~_X[_column].isnull()].index) & set(group_index)
)
else: # if fitted, use self.train
# after imputation, there should be no missing in train
# so take all of them
non_missing_index = np.where(self.group_assign_train == _k)[0]
# parallelize imputation
divide_missing_index = np.array_split(missing_index, self.threads)
pool = Pool(self.threads)
imputation = pool.map(
partial(self.Pool_task, _X, _column, non_missing_index, n),
divide_missing_index,
)
pool.close()
pool.join()
imputation = pd.concat(imputation).sort_index()
_X.loc[missing_index, _column] = imputation
# convert self._fitted and store self.train
self._fitted = True
# only when empty need to store
# stored train is imputed, formatted, scaled dataset
self.train = _X.copy() if self.train.empty else self.train
# if scaling, scale back
if self.scaling:
_X = scaling.inverse_transform(_X)
# convert numerical back to categorical
formatter.refit(_X)
return _X
class k_Prototype_NN(formatting, MinMaxScale):
"""
Three clustering models are provided: k-Means Paradigm, k-Modes Paradigm,
k-Prototypes Paradigm [1]
Parameters
----------
k: number of cluster groups, default = 10
distance: metrics of calculating the distance between two rows, default = 'l2'
used for selecting clustering groups,
support ['l1', 'l2']
dissimilarity: how to calculate the dissimilarity for categorical columns, default = 'weighted'
support ['simple', 'weighted']
scaling: whether to use scaling before imputation, default = True
numerics: numerical columns
seed: random seed, default = 1
used for k group initialization
threads: number of threads to use, default = -1
if -1, use all threads
deep_copy: whether to use deep copy, default = False
----
[1] <NAME>., <NAME>., <NAME>., <NAME>. and Satapathy,
S.C., 2014. Cluster analysis on different data sets using K-modes and
K-prototype algorithms. In ICT and Critical Infrastructure: Proceedings
of the 48th Annual Convention of Computer Society of India-Vol II
(pp. 137-144). Springer, Cham.
"""
def __init__(
self,
k=10,
distance="l2",
dissimilarity="weighted",
scaling=True,
numerics=["int16", "int32", "int64", "float16", "float32", "float64"],
threads=-1,
deep_copy=False,
seed=1,
):
self.k = k
self.distance = distance
self.dissimilarity = dissimilarity
self.scaling = scaling
self.numerics = numerics
self.threads = threads
self.deep_copy = deep_copy
self.seed = seed
np.random.seed(self.seed)
self._fitted = False # check whether fitted on train data
self.models = {} # save fit models
# calculate distance between row and k group mean
# 'l1' or 'l2' Euclidean distance
def _distance(self, row, k_centroids):
# if not defining np.float64, may get float and
# raise Error not able of iterating
if self.distance == "l2":
return np.sqrt(
np.nansum((row - k_centroids) ** 2, axis=1, dtype=np.float64)
)
elif self.distance == "l1":
return np.nansum(np.abs(row - k_centroids), axis=1, dtype=np.float64)
# calculate dissimilarity difference between row
# and k group
def _dissimilarity(self, row, k_centroids):
k, p = k_centroids.shape
# simple dissimilarity, number of different categories
if self.dissimilarity == "simple":
return np.sum((row.values != k_centroids.values).astype(int), axis=1)
# weighted dissimilarity, weighted based on number of unique categories
elif self.dissimilarity == "weighted":
# set difference between row and k_centroids
different_matrix = (row.values != k_centroids.values).astype(int)
# initialize number counts
row_count = np.empty(p)
centroids_count = np.empty([k, p])
for idx, _column in enumerate(list(k_centroids.columns)):
# get the category
cate = row[_column]
# find the corresponding count
# if get missing value, count as 0
row_count[idx] = (
self.categorical_table[_column][cate] if not cate else 0
)
for _k in range(k):
# get the category
cate = k_centroids.loc[_k, _column]
# find the corresponding count
centroids_count[_k, idx] = self.categorical_table[_column][cate]
# calculate the weights based on number of categories
weight = np.empty([k, p])
# in case get denominator of 0
for _p in range(p):
for _k in range(k):
weight[_k, _p] = (
(row_count[_p] + centroids_count[_k, _p])
/ (row_count[_p] * centroids_count[_k, _p])
if row_count[_p] != 0
else 0
)
return np.nansum(np.multiply(weight, different_matrix), axis=1)
# calculate the measurement for given index
def _get_group_assign(
self, data, numerical_columns, categorical_columns, index_list
):
group_assign = []
for _index in index_list:
measurement = self._distance(
data.loc[_index, numerical_columns], self.k_centroids[numerical_columns]
) + self._dissimilarity(
data.loc[_index, categorical_columns],
self.k_centroids[categorical_columns],
)
# assign the observations to closest centroids
group_assign.append(np.argsort(measurement)[0])
return group_assign
# calculate the k_centroids for group k
def _get_k_centroids(self, data, numerical_columns, categorical_columns, k):
k_centroids = pd.DataFrame(index=[k], columns=data.columns)
group_data = data.loc[np.where(self.group_assign == k)[0], :]
# get column means of the group
# if get empty group_data, no return
if not group_data.empty:
# centroids for numerical columns are mean of the features
k_centroids[numerical_columns] = np.nanmean(
group_data[numerical_columns], axis=0
)
# centroids for categorical columns are modes of the features
# in case multiple modes occur, get the first one
k_centroids[categorical_columns] = (
group_data[categorical_columns].mode(dropna=True).values[0]
)
return k_centroids
# assign clustering groups according to dissimilarity
# compared to modes
# best support categorical features
def _k_modes_clustering(self, X):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape # number of observations
# make sure self.k is smaller than n
self.k = min(self.k, n)
# combine k_Means and k_Modes clustering for mixed
# numerical/categorical datasets
# numerical columns will use k_means with distance
# categorical columns will use k_modes with dissimilarity
def _k_prototypes_clustering(self, X, numerical_columns, categorical_columns):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape # number of observations
# make sure self.k is smaller than n
self.k = min(self.k, n)
if not self._fitted:
# create categorical count table
# unique values descending according to number of observations
self.categorical_table = {}
for _column in categorical_columns:
self.categorical_table[_column] = (
_X[_column]
.value_counts(sort=True, ascending=False, dropna=True)
.to_dict()
)
# initialize clustering group assignments
self.group_assign = np.zeros(n)
# initialize the corresponding centroids
# use dataframe to match the column names
self.k_centroids = pd.DataFrame(index=range(self.k), columns=_X.columns)
# random initialization
for _column in list(_X.columns):
self.k_centroids[_column] = (
_X.loc[~_X[_column].isnull(), _column]
.sample(n=self.k, replace=True, random_state=self.seed)
.values
)
while True:
# calculate sum of Euclidean distance (numerical features) and
# dissimilarity difference (categorical features) for every
# observations among all centroids
# parallelized calculation for group assignment
pool = Pool(processes=self.threads)
divide_list = np.array_split(list(_X.index), self.threads)
self.group_assign = pool.map(
partial(
self._get_group_assign,
_X,
numerical_columns,
categorical_columns,
),
divide_list,
)
# flatten 2d list to 1d
self.group_assign = np.array(np.concatenate(self.group_assign).flat)
pool.close()
pool.join()
# save k_centroids for comparison
previous_k_centroids = self.k_centroids.copy()
# recalculate the k_centroids
# calculate the new k_means
pool = Pool(processes=min(int(self.k), self.threads))
self.k_centroids = pool.map(
partial(
self._get_k_centroids,
_X,
numerical_columns,
categorical_columns,
),
list(range(self.k)),
)
# concat the k centroids to one dataframe
self.k_centroids = pd.concat(self.k_centroids).sort_index()
pool.close()
pool.join()
# if get empty cluster, sort index and renew k
self.k_centroids.dropna(inplace=True)
if len(self.k_centroids) < self.k:
self.k_centroids.reset_index(drop=True, inplace=True)
self.k = len(self.k_centroids)
# stopping criteria
# check whether same k (in case delete centroids in the process)
if len(previous_k_centroids) == len(self.k_centroids):
if np.all(previous_k_centroids.values == self.k_centroids.values):
break
# if fitted, use the trained k_centroids assigning groups
else:
# parallelized calculation for group assignment
pool = Pool(processes=self.threads)
divide_list = np.array_split(list(_X.index), self.threads)
self.group_assign = pool.map(
partial(
self._get_group_assign, _X, numerical_columns, categorical_columns
),
divide_list,
)
# flatten 2d list to 1d
self.group_assign = np.array(np.concatenate(self.group_assign).flat)
pool.close()
pool.join()
# impute on cluster k
def _kNN_impute(self, data, k):
from sklearn.impute import KNNImputer
# use 1-NN imputer with clustered groups
# on train dataset, fit the models
if k not in self.models.keys():
self.models[k] = KNNImputer(n_neighbors=1)
self.models[k].fit(data.loc[np.where(self.group_assign == k)[0], :])
# impute the missing values
data.loc[np.where(self.group_assign == k)[0], :] = self.models[k].transform(
data.loc[np.where(self.group_assign == k)[0], :]
)
return data.loc[np.where(self.group_assign == k)[0], :]
def fill(self, X):
# make sure input is a dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise TypeError("Expect a dataframe, get {}.".format(type(X)))
_X = X.copy(deep=self.deep_copy)
# initialize columns
self.columns = list(_X.columns)
# initialize number of working threads
self.threads = (
multiprocessing.cpu_count() if self.threads == -1 else int(self.threads)
)
if _X[self.columns].isnull().values.any():
_X = self._fill(_X)
else:
warnings.warn("No missing values found, no change.")
return _X
def _fill(self, X):
_X = X.copy(deep=self.deep_copy)
# all numerical columns
numeric_columns = list(_X.select_dtypes(include=self.numerics).columns)
# select numerical columns in self.columns
numeric_columns = list(set(_X.columns) & set(numeric_columns))
# select categorical columns in self.columns
categorical_columns = list(set(_X.columns) - set(numeric_columns))
# format columns
# convert categorical to numerical,
# but no numerical manipulation
formatter = formatting(columns=list(_X.columns), inplace=True)
formatter.fit(_X)
# if scaling, scaling the numerical columns
if self.scaling:
scaling = MinMaxScale()
_X = scaling.fit_transform(_X)
# imputation procedure
# assign observations to clustering groups using
# k_Prototypes clustering
self._k_prototypes_clustering(_X, numeric_columns, categorical_columns)
# use the clustered groups to impute
# parallelized the imputation process according to clusters
pool = Pool(processes=min(int(self.k), self.threads))
pool_data = pool.map(partial(self._kNN_impute, _X), list(range(self.k)))
# concat pool_data and order according to index
_X = pd.concat(pool_data).sort_index()
pool.close()
pool.join()
# set fitted to true
self._fitted = True
# if scaling, scale back
if self.scaling:
_X = scaling.inverse_transform(_X)
# make sure column types retains
formatter.refit(_X)
return _X
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_ray/_regressor_hyperparameter.py | <gh_stars>0
"""
File: _regressor_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_ray/_regressor_hyperparameter.py
File Created: Friday, 8th April 2022 9:04:05 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 3rd May 2022 7:21:33 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# NOTE:
# As sklearn enters version 1.0, some of the losses have changed its name,
# hyperparameters will change accordingly
import sklearn
sklearn_1_0_0 = sklearn.__version__ <= "1.0.0"
from ray import tune
from My_AutoML._constant import (
LIGHTGBM_REGRESSION,
LIGHTGBM_BOOSTING,
LIGHTGBM_TREE_LEARNER,
)
# regressor hyperparameters
regressor_hyperparameter = [
# extract from autosklearn
{
"model_1": "AdaboostRegressor",
"AdaboostRegressor_n_estimators": tune.qrandint(50, 500, 1),
"AdaboostRegressor_learning_rate": tune.loguniform(0.01, 2),
"AdaboostRegressor_loss": tune.choice(["linear", "square", "exponential"]),
# for base_estimator of Decision Tree
"AdaboostRegressor_max_depth": tune.qrandint(1, 10, 1),
},
{
"model_2": "ARDRegression",
"ARDRegression_n_iter": tune.choice([300]),
"ARDRegression_tol": tune.loguniform(1e-5, 1e-1),
"ARDRegression_alpha_1": tune.loguniform(1e-10, 1e-3),
"ARDRegression_alpha_2": tune.loguniform(1e-10, 1e-3),
"ARDRegression_lambda_1": tune.loguniform(1e-10, 1e-3),
"ARDRegression_lambda_2": tune.loguniform(1e-10, 1e-3),
"ARDRegression_threshold_lambda": tune.loguniform(1e3, 1e5),
"ARDRegression_fit_intercept": tune.choice([True]),
},
{
"model_3": "DecisionTree",
"DecisionTree_criterion": tune.choice(["mse", "friedman_mse", "mae"])
if sklearn_1_0_0
else tune.choice(
["squared_error", "friedman_mse", "absolute_error", "poisson"]
),
"DecisionTree_max_features": tune.choice([1.0]),
"DecisionTree_max_depth_factor": tune.uniform(0.0, 2.0),
"DecisionTree_min_samples_split": tune.qrandint(2, 20, 1),
"DecisionTree_min_samples_leaf": tune.qrandint(1, 20, 1),
"DecisionTree_min_weight_fraction_leaf": tune.choice([0.0]),
"DecisionTree_max_leaf_nodes": tune.choice([None]),
"DecisionTree_min_impurity_decrease": tune.choice([0.0]),
},
{
"model_4": "ExtraTreesRegressor",
"ExtraTreesRegressor_criterion": tune.choice(["mse", "friedman_mse", "mae"])
if sklearn_1_0_0
else tune.choice(["squared_error", "absolute_error"]),
"ExtraTreesRegressor_min_samples_leaf": tune.qrandint(1, 20, 1),
"ExtraTreesRegressor_min_samples_split": tune.qrandint(2, 20, 1),
"ExtraTreesRegressor_max_features": tune.uniform(0.0, 1.0),
"ExtraTreesRegressor_bootstrap": tune.choice([True, False]),
"ExtraTreesRegressor_max_leaf_nodes": tune.choice([None]),
"ExtraTreesRegressor_max_depth": tune.choice([None]),
"ExtraTreesRegressor_min_weight_fraction_leaf": tune.choice([0.0]),
"ExtraTreesRegressor_min_impurity_decrease": tune.choice([0.0]),
},
{
"model_5": "GaussianProcess",
"GaussianProcess_alpha": tune.loguniform(1e-14, 1),
"GaussianProcess_thetaL": tune.loguniform(1e-10, 1e-3),
"GaussianProcess_thetaU": tune.loguniform(1, 1e5),
},
{
"model_6": "HistGradientBoostingRegressor",
# n_iter_no_change only selected for early_stop in ['valid', 'train']
# validation_fraction only selected for early_stop = 'valid'
"HistGradientBoostingRegressor_loss": tune.choice(["least_squares"])
if sklearn_1_0_0
else tune.choice(["squared_error"]),
"HistGradientBoostingRegressor_learning_rate": tune.loguniform(0.01, 1),
"HistGradientBoostingRegressor_min_samples_leaf": tune.qlograndint(1, 200, 1),
"HistGradientBoostingRegressor_max_depth": tune.choice([None]),
"HistGradientBoostingRegressor_max_leaf_nodes": tune.qlograndint(3, 2047, 1),
"HistGradientBoostingRegressor_max_bins": tune.choice([255]),
"HistGradientBoostingRegressor_l2_regularization": tune.loguniform(1e-10, 1),
"HistGradientBoostingRegressor_early_stop": tune.choice(
["off", "train", "valid"]
),
"HistGradientBoostingRegressor_tol": tune.choice([1e-7]),
"HistGradientBoostingRegressor_scoring": tune.choice(["loss"]),
"HistGradientBoostingRegressor_n_iter_no_change": tune.qrandint(1, 20, 1),
"HistGradientBoostingRegressor_validation_fraction": tune.uniform(0.01, 0.4),
},
{
"model_7": "KNearestNeighborsRegressor",
"KNearestNeighborsRegressor_n_neighbors": tune.qrandint(1, 100, 1),
"KNearestNeighborsRegressor_weights": tune.choice(["uniform", "distance"]),
"KNearestNeighborsRegressor_p": tune.choice([1, 2]),
},
{
"model_8": "LibLinear_SVR",
# forbid loss = 'epsilon_insensitive' and dual = False
"LibLinear_SVR_epsilon": tune.loguniform(0.001, 1),
"LibLinear_SVR_loss": tune.choice(
["squared_epsilon_insensitive"],
),
"LibLinear_SVR_dual": tune.choice([False]),
"LibLinear_SVR_tol": tune.loguniform(1e-5, 1e-1),
"LibLinear_SVR_C": tune.loguniform(0.03125, 32768),
"LibLinear_SVR_fit_intercept": tune.choice([True]),
"LibLinear_SVR_intercept_scaling": tune.choice([1]),
},
{
"model_9": "LibSVM_SVR",
# degree only selected for kernel in ['poly', 'rbf', 'sigmoid']
# gamma only selected for kernel in ['poly', 'rbf']
# coef0 only selected for kernel in ['poly', 'sigmoid']
"LibSVM_SVR_kernel": tune.choice(["linear", "poly", "rbf", "sigmoid"]),
"LibSVM_SVR_C": tune.loguniform(0.03125, 32768),
"LibSVM_SVR_epsilon": tune.uniform(1e-5, 1),
"LibSVM_SVR_tol": tune.loguniform(1e-5, 1e-1),
"LibSVM_SVR_shrinking": tune.choice([True, False]),
"LibSVM_SVR_degree": tune.qrandint(2, 5, 1),
"LibSVM_SVR_gamma": tune.loguniform(3.0517578125e-5, 8),
"LibSVM_SVR_coef0": tune.uniform(-1, 1),
"LibSVM_SVR_max_iter": tune.choice([-1]),
},
{
"model_10": "MLPRegressor",
# validation_fraction only selected for early_stopping = 'valid'
"MLPRegressor_hidden_layer_depth": tune.qrandint(1, 3, 1),
"MLPRegressor_num_nodes_per_layer": tune.qlograndint(16, 264, 1),
"MLPRegressor_activation": tune.choice(["tanh", "relu"]),
"MLPRegressor_alpha": tune.loguniform(1e-7, 1e-1),
"MLPRegressor_learning_rate_init": tune.loguniform(1e-4, 0.5),
"MLPRegressor_early_stopping": tune.choice(["valid", "train"]),
"MLPRegressor_solver": tune.choice(["adam"]),
"MLPRegressor_batch_size": tune.choice(["auto"]),
"MLPRegressor_n_iter_no_change": tune.choice([32]),
"MLPRegressor_tol": tune.choice([1e-4]),
"MLPRegressor_shuffle": tune.choice([True]),
"MLPRegressor_beta_1": tune.choice([0.9]),
"MLPRegressor_beta_2": tune.choice([0.999]),
"MLPRegressor_epsilon": tune.choice([1e-8]),
"MLPRegressor_validation_fraction": tune.choice([0.1]),
},
{
"model_11": "RandomForest",
"RandomForest_criterion": tune.choice(["mse", "friedman_mse", "mae"])
if sklearn_1_0_0
else tune.choice(["squared_error", "absolute_error", "poisson"]),
"RandomForest_max_features": tune.uniform(0.1, 1.0),
"RandomForest_max_depth": tune.choice([None]),
"RandomForest_min_samples_split": tune.qrandint(2, 20, 1),
"RandomForest_min_samples_leaf": tune.qrandint(1, 20, 1),
"RandomForest_min_weight_fraction_leaf": tune.choice([0.0]),
"RandomForest_bootstrap": tune.choice([True, False]),
"RandomForest_max_leaf_nodes": tune.choice([None]),
"RandomForest_min_impurity_decrease": tune.choice([0.0]),
},
{
"model_12": "SGD",
# l1_ratio only selected for penalty = 'elasticnet'
# epsilon only selected for loss in ['huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']
# eta0 only selected for learning_rate in ['constant', 'invscaling']
# power_t only selected for learning_rate = 'invscaling'
"SGD_loss": tune.choice(
[
"squared_loss",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
)
if sklearn_1_0_0
else tune.choice(
[
"squared_error",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
]
),
"SGD_penalty": tune.choice(["l1", "l2", "elasticnet"]),
"SGD_alpha": tune.loguniform(1e-7, 1e-1),
"SGD_fit_intercept": tune.choice([True]),
"SGD_tol": tune.loguniform(1e-5, 1e-1),
"SGD_learning_rate": tune.choice(["constant", "optimal", "invscaling"]),
"SGD_l1_ratio": tune.loguniform(1e-9, 1.0),
"SGD_epsilon": tune.loguniform(1e-5, 1e-1),
"SGD_power_t": tune.uniform(1e-5, 1),
"SGD_average": tune.choice([True, False]),
},
{
"model_13": "LinearRegression",
},
{
"model_14": "Lasso",
"Lasso_alpha": tune.loguniform(1e-7, 1e3),
"Lasso_tol": tune.loguniform(1e-5, 1e-1),
},
{
"model_15": "RidgeRegression",
"RidgeRegression_alpha": tune.loguniform(1e-7, 1e3),
"RidgeRegression_tol": tune.loguniform(1e-5, 1e-1),
"RidgeRegression_solver": tune.choice(
["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"]
),
},
{
"model_16": "ElasticNet",
"ElasticNet_alpha": tune.loguniform(1e-7, 1e3),
"ElasticNet_l1_ratio": tune.uniform(0, 1.0),
"ElasticNet_tol": tune.loguniform(1e-5, 1e-1),
"ElasticNet_selection": tune.choice(["cyclic", "random"]),
},
{
"model_17": "BayesianRidge",
"BayesianRidge_tol": tune.loguniform(1e-5, 1e-1),
"BayesianRidge_alpha_1": tune.loguniform(1e-7, 1e-1),
"BayesianRidge_alpha_2": tune.loguniform(1e-7, 1e-1),
"BayesianRidge_lambda_1": tune.loguniform(1e-7, 1e-1),
"BayesianRidge_lambda_2": tune.loguniform(1e-7, 1e-1),
},
# {
# "model_18": "HistGradientBoostingRegressor",
# # "HistGradientBoostingRegressor_loss": tune.choice(
# # ["squared_error", "absolute_error", "poisson"]
# # ),
# "HistGradientBoostingRegressor_loss": tune.choice(
# ["least_squares", "least_absolute_deviation", "poisson"]
# ),
# "HistGradientBoostingRegressor_learning_rate": tune.loguniform(1e-7, 1e-1),
# "HistGradientBoostingRegressor_max_leaf_nodes": tune.choice([None]),
# "HistGradientBoostingRegressor_max_depth": tune.choice([None]),
# "HistGradientBoostingRegressor_min_samples_leaf": tune.qrandint(1, 20, 1),
# "HistGradientBoostingRegressor_l2_regularization": tune.uniform(0, 1),
# "HistGradientBoostingRegressor_tol": tune.loguniform(1e-5, 1e-1),
# },
{
"model_18": "GradientBoostingRegressor",
"GradientBoostingRegressor_loss": tune.choice(
["ls", "lad", "huber", "quantile"]
)
if sklearn_1_0_0
else tune.choice(["squared_error", "absolute_error", "huber", "quantile"]),
"GradientBoostingRegressor_learning_rate": tune.loguniform(0.01, 1),
"GradientBoostingRegressor_n_estimators": tune.qlograndint(10, 500, 1),
"GradientBoostingRegressor_subsample": tune.uniform(0.1, 1),
"GradientBoostingRegressor_criterion": tune.choice(["mse", "mae"])
if sklearn_1_0_0
else tune.choice(["friedman_mse", "squared_error"]),
"GradientBoostingRegressor_min_samples_split": tune.qrandint(2, 20, 1),
"GradientBoostingRegressor_min_samples_leaf": tune.qlograndint(1, 200, 1),
"GradientBoostingRegressor_min_weight_fraction_leaf": tune.uniform(0.0, 0.5),
"GradientBoostingRegressor_max_depth": tune.randint(1, 31),
"GradientBoostingRegressor_min_impurity_decrease": tune.uniform(0.0, 1.0),
"GradientBoostingRegressor_max_features": tune.choice(
["sqrt", "log2", "auto", tune.uniform(0.0, 1.0)]
),
"GradientBoostingRegressor_max_leaf_nodes": tune.qlograndint(3, 2047, 1),
"GradientBoostingRegressor_validation_fraction": tune.uniform(0.01, 0.4),
"GradientBoostingRegressor_n_iter_no_change": tune.qrandint(1, 20, 1),
"GradientBoostingRegressor_tol": tune.choice([1e-7]),
},
# self-defined models
{
"model_19": "MLP_Regressor",
"MLP_Regressor_hidden_layer": tune.qrandint(1, 5, 1),
"MLP_Regressor_hidden_size": tune.qrandint(1, 10, 1),
"MLP_Regressor_activation": tune.choice(["ReLU"]),
"MLP_Regressor_learning_rate": tune.uniform(1e-5, 1),
"MLP_Regressor_optimizer": tune.choice(["Adam", "SGD"]),
"MLP_Regressor_criteria": tune.choice(["MSE", "MAE"]),
"MLP_Regressor_batch_size": tune.choice([16, 32, 64]),
"MLP_Regressor_num_epochs": tune.qrandint(5, 30, 1),
},
{
"model_20": "RNN_Regressor",
"RNN_Regressor_hidden_size": tune.choice([16, 32, 64, 128, 256]),
"RNN_Regressor_n_layers": tune.qrandint(1, 5, 1),
"RNN_Regressor_RNN_unit": tune.choice(["RNN", "LSTM", "GRU"]),
"RNN_Regressor_activation": tune.choice(["ReLU"]),
"RNN_Regressor_dropout": tune.loguniform(1e-7, 0.8),
"RNN_Regressor_learning_rate": tune.loguniform(1e-7, 1),
"RNN_Regressor_optimizer": tune.choice(["Adam", "SGD"]),
"RNN_Regressor_criteria": tune.choice(["MSE", "MAE"]),
"RNN_Regressor_batch_size": tune.choice([16, 32, 64]),
"RNN_Regressor_num_epochs": tune.qrandint(5, 30, 1),
},
{
"model_21": "LightGBM_Regressor",
"LightGBM_Regressor_objective": tune.choice(LIGHTGBM_REGRESSION),
"LightGBM_Regressor_boosting": tune.choice(LIGHTGBM_BOOSTING),
"LightGBM_Regressor_n_estimators": tune.qlograndint(50, 500, 1),
# max_depth == -1 for no limit
"LightGBM_Regressor_max_depth": tune.randint(-1, 31),
"LightGBM_Regressor_num_leaves": tune.qlograndint(3, 2047, 1),
"LightGBM_Regressor_min_data_in_leaf": tune.qrandint(1, 20, 1),
"LightGBM_Regressor_learning_rate": tune.loguniform(1e-7, 1),
"LightGBM_Regressor_tree_learner": tune.choice(LIGHTGBM_TREE_LEARNER),
"LightGBM_Regressor_num_iterations": tune.qlograndint(50, 500, 1),
},
{
"model_22": "XGBoost_Regressor",
"XGBoost_Regressor_eta": tune.uniform(0, 1),
"XGBoost_Regressor_gamma": tune.loguniform(1e-4, 1e3),
"XGBoost_Regressor_max_depth": tune.randint(1, 12),
"XGBoost_Regressor_min_child_weight": tune.loguniform(1e-4, 1e3),
"XGBoost_Regressor_max_delta_step": tune.loguniform(1e-3, 1e1),
"XGBoost_Regressor_reg_lambda": tune.uniform(0, 1),
"XGBoost_Regressor_reg_alpha": tune.uniform(0, 1),
},
{
"model_23": "GAM_Regressor",
"GAM_Regressor_type": tune.choice(
["linear", "gamma", "poisson", "inverse_gaussian"]
),
"GAM_Regressor_tol": tune.loguniform(1e-4, 1),
},
]
|
PanyiDong/AutoML | My_AutoML/_model/_sklearn.py | <filename>My_AutoML/_model/_sklearn.py<gh_stars>0
"""
File: _sklearn.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_sklearn.py
File Created: Monday, 18th April 2022 12:14:53 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 8:23:59 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import scipy
import sklearn
import sklearn.linear_model
import sklearn.neighbors
import sklearn.tree
import sklearn.svm
import sklearn.naive_bayes
import sklearn.discriminant_analysis
import sklearn.ensemble
import sklearn.gaussian_process
import sklearn.neural_network
import sklearn.calibration
# need to enable hist gradient boosting features first
# no need for sklearn version >= 1.0.0
sklearn_1_0_0 = sklearn.__version__ < "1.0.0"
if sklearn_1_0_0:
from sklearn.experimental import enable_hist_gradient_boosting
from My_AutoML._constant import MAX_ITER
from My_AutoML._utils._base import is_none
from My_AutoML._utils._data import softmax
####################################################################################################################
# models from sklearn
# wrap for some-degree of flexibility (initialization, _fitted, etc.)
####################################################################################################################
# classifiers
class AdaboostClassifier(sklearn.ensemble.AdaBoostClassifier):
def __init__(
self,
n_estimators=50,
learning_rate=0.1,
algorithm="SAMME.R",
max_depth=1,
):
self.n_estimators = int(n_estimators)
self.learning_rate = float(learning_rate)
self.algorithm = algorithm
self.max_depth = int(max_depth)
from sklearn.tree import DecisionTreeClassifier
super().__init__(
base_estimator=DecisionTreeClassifier(max_depth=self.max_depth),
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
algorithm=self.algorithm,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class BernoulliNB(sklearn.naive_bayes.BernoulliNB):
def __init__(
self,
alpha=1,
fit_prior=True,
):
self.alpha = float(alpha)
self.fit_prior = fit_prior
super().__init__(
alpha=self.alpha,
fit_prior=self.fit_prior,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class DecisionTreeClassifier(sklearn.tree.DecisionTreeClassifier):
def __init__(
self,
criterion="gini",
max_depth_factor=0.5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=1.0,
max_leaf_nodes="None",
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_depth_factor = max_depth_factor
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_features = float(max_features)
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else max_leaf_nodes
self.min_impurity_decrease = float(min_impurity_decrease)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().__init__(
criterion=self.criterion,
max_depth=None
if is_none(self.max_depth_factor)
else max(int(self.max_depth_factor * X.shape[1]), 1),
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
)
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class ExtraTreesClassifier:
def __init__(
self,
criterion="gini",
max_depth="None",
max_leaf_nodes="None",
min_samples_leaf=1,
min_samples_split=2,
max_features=0.5,
bootstrap=False,
min_weight_fraction_leaf=0.0,
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_features = float(max_features)
self.bootstrap = bootstrap
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else max_leaf_nodes
self.min_impurity_decrease = float(min_impurity_decrease)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.ensemble import ExtraTreesClassifier
self.estimator = ExtraTreesClassifier(
n_estimators=n_iter,
criterion=self.criterion,
max_depth=self.max_depth,
max_leaf_nodes=self.max_leaf_nodes,
min_samples_leaf=self.min_samples_leaf,
min_samples_split=self.min_samples_split,
max_features=max(1, int(X.shape[1] ** self.max_features)),
bootstrap=self.bootstrap,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.max_iter
)
self.estimator.fit(X, y)
if (
self.estimator.n_estimators >= self.max_iter
or self.estimator.n_estimators >= len(self.estimator.estimators_)
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class GaussianNB(sklearn.naive_bayes.GaussianNB):
def __init__(
self,
):
super().__init__()
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class HistGradientBoostingClassifier:
def __init__(
self,
loss="auto",
learning_rate=0.1,
min_samples_leaf=20,
max_depth="None",
max_leaf_nodes=31,
max_bins=255,
l2_regularization=1e-10,
early_stop="off",
tol=1e-7,
scoring="loss",
n_iter_no_change=10,
validation_fraction=0.1,
):
self.loss = loss
self.learning_rate = float(learning_rate)
self.min_samples_leaf = int(min_samples_leaf)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.max_leaf_nodes = int(max_leaf_nodes)
self.max_bins = int(max_bins)
self.l2_regularization = float(l2_regularization)
self.early_stop = early_stop
self.tol = float(tol)
self.scoring = scoring
self.n_iter_no_change = int(n_iter_no_change)
self.validation_fraction = float(validation_fraction)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2, sample_weight=None):
if self.estimator is None:
from sklearn.ensemble import HistGradientBoostingClassifier
# map from autosklearn parameter space to sklearn parameter space
if self.early_stop == "off":
self.n_iter_no_change = 0
self.validation_fraction_ = None
self.early_stopping_ = False
elif self.early_stop == "train":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction_ = None
self.early_stopping_ = True
elif self.early_stop == "valid":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction = float(self.validation_fraction)
self.early_stopping_ = True
n_classes = len(np.unique(y))
if self.validation_fraction * X.shape[0] < n_classes:
self.validation_fraction_ = n_classes
else:
self.validation_fraction_ = self.validation_fraction
else:
raise ValueError("early_stop should be either off, train or valid")
self.estimator = HistGradientBoostingClassifier(
loss=self.loss,
learning_rate=self.learning_rate,
min_samples_leaf=self.min_samples_leaf,
max_depth=self.max_depth,
max_leaf_nodes=self.max_leaf_nodes,
max_bins=self.max_bins,
l2_regularization=self.l2_regularization,
early_stopping=self.early_stopping_,
tol=self.tol,
scoring=self.scoring,
n_iter_no_change=self.n_iter_no_change,
validation_fraction=self.validation_fraction,
max_iter=n_iter,
)
else:
self.estimator.max_iter += n_iter # add n_iter to each step
self.estimator.max_iter = min(
self.estimator.max_iter, self.max_iter
) # limit the number of iterations
self.estimator.fit(X, y, sample_weight=sample_weight)
# check whether fully fitted or need to add more iterations
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class KNearestNeighborsClassifier(sklearn.neighbors.KNeighborsClassifier):
def __init__(
self,
n_neighbors=5,
weights="uniform",
p=2,
):
self.n_neighbors = int(n_neighbors)
self.weights = weights
self.p = int(p)
super().__init__(
n_neighbors=self.n_neighbors,
weights=self.weights,
p=self.p,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class LDA(sklearn.discriminant_analysis.LinearDiscriminantAnalysis):
def __init__(
self,
shrinkage="auto",
shrinkage_factor=0.5,
tol=1e-4,
):
self.shrinkage = shrinkage
self.shrinkage_factor = float(shrinkage_factor)
self.tol = float(tol)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
if self.shrinkage is None or self.shrinkage == "None":
self.shrinkage = None
solver = "svd"
elif self.shrinkage == "auto":
self.shrinkage = "auto"
solver = "lsqr"
elif self.shrinkage == "manual":
self.shrinkage = float(self.shrinkage_factor)
solver = "lsqr"
else:
raise ValueError(
"Not a valid shrinkage parameter, should be None, auto or manual"
)
super().__init__(
shrinkage=self.shrinkage,
solver=solver,
tol=self.tol,
)
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class LibLinear_SVC:
def __init__(
self,
penalty="l2",
loss="squared_hinge",
dual=False,
tol=1e-4,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = float(tol)
self.C = float(C)
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = float(intercept_scaling)
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
base_estimator = LinearSVC(
penalty=self.penalty,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
multi_class=self.multi_class,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
)
# wrap the base estimator to make predict_proba available
self.estimator = CalibratedClassifierCV(base_estimator)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
self.estimator.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class LibSVM_SVC:
def __init__(
self,
C=1.0,
kernel="rbf",
degree=3,
gamma=0.1,
coef0=0,
tol=1e-3,
shrinking=True,
max_iter=-1,
):
self.C = float(C)
self.kernel = kernel
self.degree = 3 if is_none(degree) else int(degree)
self.gamma = 0.0 if is_none(gamma) else float(gamma)
self.coef0 = 0.0 if is_none(coef0) else float(coef0)
self.shrinking = shrinking
self.tol = float(tol)
self.max_iter = float(max_iter)
from sklearn.svm import SVC
from sklearn.calibration import CalibratedClassifierCV
base_estimator = SVC(
C=self.C,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
shrinking=self.shrinking,
tol=self.tol,
max_iter=self.max_iter,
)
# wrap the base estimator to make predict_proba available
self.estimator = CalibratedClassifierCV(base_estimator)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
self.estimator.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class MLPClassifier:
def __init__(
self,
hidden_layer_depth=1,
num_nodes_per_layer=32,
activation="relu",
alpha=1e-4,
learning_rate_init=1e-3,
early_stopping="valid",
n_iter_no_change=32,
validation_fraction=0.1,
tol=1e-4,
solver="adam",
batch_size="auto",
shuffle=True,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
):
self.hidden_layer_depth = int(hidden_layer_depth)
self.num_nodes_per_layer = int(num_nodes_per_layer)
self.hidden_layer_sizes = tuple(
self.num_nodes_per_layer for _ in range(self.hidden_layer_depth)
)
self.activation = str(activation)
self.alpha = float(alpha)
self.learning_rate_init = float(learning_rate_init)
self.early_stopping = str(early_stopping)
self.n_iter_no_change = int(n_iter_no_change)
self.validation_fraction = float(validation_fraction)
self.tol = float(tol)
self.solver = solver
self.batch_size = batch_size
self.shuffle = shuffle
self.beta_1 = float(beta_1)
self.beta_2 = float(beta_2)
self.epsilon = float(epsilon)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.neural_network import MLPClassifier
# map from autosklearn parameter space to sklearn parameter space
if self.early_stopping == "train":
self.validation_fraction = 0.0
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping = False
elif self.early_stopping == "valid":
self.validation_fraction = float(self.validation_fraction)
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping = True
else:
raise ValueError(
"Early stopping only supports 'train' and 'valid'. Got {}".format(
self.early_stopping
)
)
self.estimator = MLPClassifier(
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation,
alpha=self.alpha,
learning_rate_init=self.learning_rate_init,
early_stopping=self.early_stopping,
n_iter_no_change=self.n_iter_no_change,
validation_fraction=self.validation_fraction,
tol=self.tol,
solver=self.solver,
batch_size=self.batch_size,
shuffle=self.shuffle,
beta_1=self.beta_1,
beta_2=self.beta_2,
epsilon=self.epsilon,
max_iter=n_iter,
)
else:
# MLPClassifier can record previous training
self.estimator.max_iter = min(
self.estimator.max_iter - self.estimator.n_iter_, self.max_iter
) # limit the number of iterations
self.estimator.fit(X, y)
# check whether fully fitted or need to add more iterations
if (
self.estimator.n_iter_ >= self.estimator.max_iter
or self.estimator._no_improvement_count > self.n_iter_no_change
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class MultinomialNB(sklearn.naive_bayes.MultinomialNB):
def __init__(self, alpha=1, fit_prior=True):
self.alpha = float(alpha)
self.fit_prior = fit_prior
super().__init__(
alpha=self.alpha,
fit_prior=self.fit_prior,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
# make sure the data contains only non-negative values
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0.0
else:
X[X < 0] = 0.0
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class PassiveAggressive:
def __init__(
self,
C=1.0,
fit_intercept=True,
average=False,
tol=1e-4,
loss="hinge",
):
self.C = float(C)
self.fit_intercept = fit_intercept
self.average = average
self.tol = float(tol)
self.loss = loss
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.linear_model import PassiveAggressiveClassifier
self.estimator = PassiveAggressiveClassifier(
C=self.C,
fit_intercept=self.fit_intercept,
average=self.average,
tol=self.tol,
loss=self.loss,
max_iter=n_iter,
)
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter, self.max_iter)
self.estimator.fit(X, y)
# check whether fully fitted or need to add more iterations
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return softmax(self.estimator.decision_function(X))
class QDA(sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis):
def __init__(
self,
reg_param=0.0,
):
self.reg_param = float(reg_param)
super().__init__(
reg_param=self.reg_param,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class RandomForestClassifier:
def __init__(
self,
criterion="gini",
max_features=0.5,
max_depth="None",
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
bootstrap=True,
max_leaf_nodes="None",
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_features = max_features
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.bootstrap = bootstrap
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.min_impurity_decrease = float(min_impurity_decrease)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.ensemble import RandomForestClassifier
self.estimator = RandomForestClassifier(
n_estimators=n_iter,
criterion=self.criterion,
max_features=self.max_features,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
bootstrap=self.bootstrap,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.max_iter
)
self.estimator.fit(X, y)
if (
self.estimator.n_estimators >= self.max_iter
or self.estimator.n_estimators > len(self.estimator.estimators_)
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class SGDClassifier:
def __init__(
self,
loss="log",
penalty="l2",
alpha=0.0001,
fit_intercept=True,
tol=1e-4,
learning_rate="invscaling",
l1_ratio=0.15,
epsilon=1e-4,
eta0=0.01,
power_t=0.5,
average=False,
):
self.loss = loss
self.penalty = penalty
self.alpha = float(alpha)
self.fit_intercept = fit_intercept
self.tol = float(tol)
self.learning_rate = learning_rate
self.l1_ratio = 0.15 if is_none(l1_ratio) else float(l1_ratio)
self.epsilon = 0.1 if is_none(epsilon) else float(epsilon)
self.eta0 = float(eta0)
self.power_t = 0.5 if is_none(power_t) else float(power_t)
self.average = average
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.linear_model import SGDClassifier
self.estimator = SGDClassifier(
loss=self.loss,
penalty=self.penalty,
alpha=self.alpha,
fit_intercept=self.fit_intercept,
tol=self.tol,
learning_rate=self.learning_rate,
l1_ratio=self.l1_ratio,
epsilon=self.epsilon,
eta0=self.eta0,
power_t=self.power_t,
average=self.average,
max_iter=n_iter,
)
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter, self.max_iter)
self.estimator.fit(X, y)
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
class LogisticRegression(sklearn.linear_model.LogisticRegression):
def __init__(
self,
penalty="l2",
tol=1e-4,
C=1.0,
):
self.penalty = penalty
self.tol = float(tol)
self.C = float(C)
super().__init__(
penalty=self.penalty,
tol=self.tol,
C=self.C,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
class ComplementNB(sklearn.naive_bayes.ComplementNB):
def __init__(
self,
alpha=1.0,
fit_prior=True,
norm=False,
):
self.alpha = float(alpha)
self.fit_prior = fit_prior
self.norm = norm
super().__init__(
alpha=self.alpha,
fit_prior=self.fit_prior,
norm=self.norm,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
# combined with autosklearn version, thus deprecated here
# class HistGradientBoostingClassifier(sklearn.ensemble.HistGradientBoostingClassifier):
# def __init__(
# self,
# loss="auto",
# learning_rate=0.1,
# max_leaf_nodes=31,
# max_depth=None,
# min_samples_leaf=20,
# l2_regularization=0,
# tol=1e-7,
# ):
# self.loss = loss
# self.learning_rate = learning_rate
# self.max_leaf_nodes = max_leaf_nodes
# self.max_depth = max_depth
# self.min_samples_leaf = min_samples_leaf
# self.l2_regularization = l2_regularization
# self.tol = tol
# super().__init__(
# loss=self.loss,
# learning_rate=self.learning_rate,
# max_leaf_nodes=self.max_leaf_nodes,
# max_depth=self.max_depth,
# min_samples_leaf=self.min_samples_leaf,
# l2_regularization=self.l2_regularization,
# tol=self.tol,
# )
# self._fitted = False
# def fit(self, X, y):
# super().fit(X, y)
# self._fitted = True
# return self
# def predict(self, X):
# return super().predict(X)
class GradientBoostingClassifier(sklearn.ensemble.GradientBoostingClassifier):
def __init__(
self,
loss="deviance",
learning_rate=0.1,
n_estimators=100,
subsample=1.0,
criterion="friedman_mse",
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=3,
min_impurity_decrease=0.0,
max_features="auto",
max_leaf_nodes=31,
validation_fraction=0.1,
n_iter_no_change=10,
tol=1e-7,
):
self.loss = loss
self.learning_rate = float(learning_rate)
self.n_estimators = int(n_estimators)
self.subsample = float(subsample)
self.criterion = criterion
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_impurity_decrease = float(min_impurity_decrease)
self.max_features = max_features
self.max_leaf_nodes = int(max_leaf_nodes)
self.validation_fraction = float(validation_fraction)
self.n_iter_no_change = int(n_iter_no_change)
self.tol = float(tol)
super().__init__(
loss=self.loss,
learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
subsample=self.subsample,
criterion=self.criterion,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_depth=self.max_depth,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
validation_fraction=self.validation_fraction,
n_iter_no_change=self.n_iter_no_change,
tol=self.tol,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
####################################################################################################################
# regressors
class AdaboostRegressor(sklearn.ensemble.AdaBoostRegressor):
def __init__(
self,
n_estimators=50,
learning_rate=0.1,
loss="linear",
max_depth=1,
):
self.n_estimators = int(n_estimators)
self.learning_rate = float(learning_rate)
self.loss = loss
self.max_depth = int(max_depth)
from sklearn.tree import DecisionTreeRegressor
super().__init__(
base_estimator=DecisionTreeRegressor(max_depth=self.max_depth),
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
loss=self.loss,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class ARDRegression(sklearn.linear_model.ARDRegression):
def __init__(
self,
n_iter=300,
tol=1e-3,
alpha_1=1e-6,
alpha_2=1e-6,
lambda_1=1e-6,
lambda_2=1e-6,
threshold_lambda=1e4,
fit_intercept=True,
):
self.n_iter = int(n_iter)
self.tol = float(tol)
self.alpha_1 = float(alpha_1)
self.alpha_2 = float(alpha_2)
self.lambda_1 = float(lambda_1)
self.lambda_2 = float(lambda_2)
self.threshold_lambda = float(threshold_lambda)
self.fit_intercept = fit_intercept
super().__init__(
n_iter=self.n_iter,
tol=self.tol,
alpha_1=self.alpha_1,
alpha_2=self.alpha_2,
lambda_1=self.lambda_1,
lambda_2=self.lambda_2,
threshold_lambda=self.threshold_lambda,
fit_intercept=self.fit_intercept,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class DecisionTreeRegressor(sklearn.tree.DecisionTreeRegressor):
def __init__(
self,
criterion="mse",
max_features=1.0,
max_depth_factor=0.5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_leaf_nodes="None",
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_depth_factor = max_depth_factor
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_features = float(max_features)
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.min_impurity_decrease = float(min_impurity_decrease)
super().__init__(
criterion=self.criterion,
max_depth=self.max_depth_factor,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class ExtraTreesRegressor:
def __init__(
self,
criterion="mse",
max_depth="None",
max_leaf_nodes="None",
min_samples_leaf=1,
min_samples_split=2,
max_features=1,
bootstrap=False,
min_weight_fraction_leaf=0.0,
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.min_samples_leaf = int(min_samples_leaf)
self.min_samples_split = int(min_samples_split)
self.max_features = float(max_features)
self.bootstrap = bootstrap
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.min_impurity_decrease = float(min_impurity_decrease)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.ensemble import ExtraTreesRegressor
self.estimator = ExtraTreesRegressor(
n_estimators=n_iter,
criterion=self.criterion,
max_depth=self.max_depth,
max_leaf_nodes=self.max_leaf_nodes,
min_samples_leaf=self.min_samples_leaf,
min_samples_split=self.min_samples_split,
max_features=self.max_features,
bootstrap=self.bootstrap,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.max_iter
)
self.estimator.fit(X, y)
if (
self.estimator.n_estimators >= self.max_iter
or self.estimator.n_estimators >= len(self.estimator.estimators_)
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class GaussianProcess(sklearn.gaussian_process.GaussianProcessRegressor):
def __init__(
self,
alpha=1e-8,
thetaL=1e-6,
thetaU=100000.0,
):
self.alpha = float(alpha)
self.thetaL = float(thetaL)
self.thetaU = float(thetaU)
self._fitted = False
def fit(self, X, y):
n_features = X.shape[1]
kernel = sklearn.gaussian_process.kernels.RBF(
length_scale=[1.0] * n_features,
length_scale_bounds=[(self.thetaL, self.thetaU)] * n_features,
)
super().__init__(
kernel=kernel,
n_restarts_optimizer=10,
optimizer="fmin_l_bfgs_b",
alpha=self.alpha,
copy_X_train=True,
normalize_y=True,
)
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class HistGradientBoostingRegressor:
def __init__(
self,
loss="least_squares",
learning_rate=0.1,
min_samples_leaf=20,
max_depth="None",
max_leaf_nodes=31,
max_bins=255,
l2_regularization=1e-10,
early_stop="off",
tol=1e-7,
scoring="loss",
n_iter_no_change=10,
validation_fraction=0.1,
):
self.loss = loss
self.learning_rate = float(learning_rate)
self.min_samples_leaf = int(min_samples_leaf)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.max_leaf_nodes = int(max_leaf_nodes)
self.max_bins = int(max_bins)
self.l2_regularization = float(l2_regularization)
self.early_stop = early_stop
self.tol = float(tol)
self.scoring = scoring
self.n_iter_no_change = int(n_iter_no_change)
self.validation_fraction = float(validation_fraction)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2, sample_weight=None):
if self.estimator is None:
from sklearn.ensemble import HistGradientBoostingRegressor
# map from autosklearn parameter space to sklearn parameter space
if self.early_stop == "off":
self.n_iter_no_change = 0
self.validation_fraction_ = None
self.early_stopping_ = False
elif self.early_stop == "train":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction_ = None
self.early_stopping_ = True
elif self.early_stop == "valid":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction = float(self.validation_fraction)
self.early_stopping_ = True
n_classes = len(np.unique(y))
if self.validation_fraction * X.shape[0] < n_classes:
self.validation_fraction_ = n_classes
else:
self.validation_fraction_ = self.validation_fraction
else:
raise ValueError("early_stop should be either off, train or valid")
self.estimator = HistGradientBoostingRegressor(
loss=self.loss,
learning_rate=self.learning_rate,
min_samples_leaf=self.min_samples_leaf,
max_depth=self.max_depth,
max_leaf_nodes=self.max_leaf_nodes,
max_bins=self.max_bins,
l2_regularization=self.l2_regularization,
early_stopping=self.early_stopping_,
tol=self.tol,
scoring=self.scoring,
n_iter_no_change=self.n_iter_no_change,
validation_fraction=self.validation_fraction,
max_iter=n_iter,
)
else:
self.estimator.max_iter += n_iter # add n_iter to each step
self.estimator.max_iter = min(
self.estimator.max_iter, self.max_iter
) # limit the number of iterations
self.estimator.fit(X, y, sample_weight=sample_weight)
# check whether fully fitted or need to add more iterations
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class KNearestNeighborsRegressor(sklearn.neighbors.KNeighborsRegressor):
def __init__(
self,
n_neighbors=1,
weights="uniform",
p=2,
):
self.n_neighbors = int(n_neighbors)
self.weights = weights
self.p = int(p)
super().__init__(
n_neighbors=self.n_neighbors,
weights=self.weights,
p=self.p,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class LibLinear_SVR(sklearn.svm.LinearSVR):
def __init__(
self,
epsilon=0.1,
loss="squared_epsilon_insensitive",
dual=False,
tol=1e-4,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
):
self.epsilon = float(epsilon)
self.loss = loss
self.dual = dual
self.tol = float(tol)
self.C = float(C)
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
super().__init__(
epsilon=self.epsilon,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class LibSVM_SVR(sklearn.svm.SVR):
def __init__(
self,
kernel="rbf",
C=1.0,
epsilon=0.1,
degree=3,
gamma=0.1,
coef0=0,
tol=1e-3,
shrinking=True,
max_iter=-1,
):
self.C = float(C)
self.kernel = kernel
self.epsilon = float(epsilon)
self.degree = int(degree)
self.gamma = float(gamma)
self.coef0 = float(coef0)
self.shrinking = shrinking
self.tol = float(tol)
self.max_iter = max_iter
super().__init__(
C=self.C,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
shrinking=self.shrinking,
tol=self.tol,
max_iter=self.max_iter,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class MLPRegressor:
def __init__(
self,
hidden_layer_depth=1,
num_nodes_per_layer=32,
activation="tanh",
alpha=1e-4,
learning_rate_init=1e-3,
early_stopping="valid",
n_iter_no_change=32,
validation_fraction=0.1,
tol=1e-4,
solver="adam",
batch_size="auto",
shuffle=True,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
):
self.hidden_layer_depth = int(hidden_layer_depth)
self.num_nodes_per_layer = int(num_nodes_per_layer)
self.hidden_layer_sizes = tuple(
self.num_nodes_per_layer for _ in range(self.hidden_layer_depth)
)
self.activation = str(activation)
self.alpha = float(alpha)
self.learning_rate_init = float(learning_rate_init)
self.early_stopping = str(early_stopping)
self.n_iter_no_change = int(n_iter_no_change)
self.validation_fraction = float(validation_fraction)
self.tol = float(tol)
self.solver = solver
self.batch_size = batch_size
self.shuffle = shuffle
self.beta_1 = float(beta_1)
self.beta_2 = float(beta_2)
self.epsilon = float(epsilon)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.neural_network import MLPRegressor
# map from autosklearn parameter space to sklearn parameter space
if self.early_stopping == "train":
self.validation_fraction = 0.0
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping = False
elif self.early_stopping == "valid":
self.validation_fraction = float(self.validation_fraction)
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping = True
else:
raise ValueError(
"Early stopping only supports 'train' and 'valid'. Got {}".format(
self.early_stopping
)
)
self.estimator = MLPRegressor(
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation,
alpha=self.alpha,
learning_rate_init=self.learning_rate_init,
early_stopping=self.early_stopping,
n_iter_no_change=self.n_iter_no_change,
validation_fraction=self.validation_fraction,
tol=self.tol,
solver=self.solver,
batch_size=self.batch_size,
shuffle=self.shuffle,
beta_1=self.beta_1,
beta_2=self.beta_2,
epsilon=self.epsilon,
max_iter=n_iter,
)
else:
# MLPClassifier can record previous training
self.estimator.max_iter = min(
self.estimator.max_iter - self.estimator.n_iter_, self.max_iter
) # limit the number of iterations
self.estimator.fit(X, y)
# check whether fully fitted or need to add more iterations
if (
self.estimator.n_iter_ >= self.estimator.max_iter
or self.estimator._no_improvement_count > self.n_iter_no_change
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class RandomForestRegressor:
def __init__(
self,
criterion="mse",
max_features=0.5,
max_depth="None",
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
bootstrap=True,
max_leaf_nodes="None",
min_impurity_decrease=0.0,
):
self.criterion = criterion
self.max_features = max_features
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.bootstrap = bootstrap
self.max_leaf_nodes = None if is_none(max_leaf_nodes) else int(max_leaf_nodes)
self.min_impurity_decrease = float(min_impurity_decrease)
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.ensemble import RandomForestRegressor
self.estimator = RandomForestRegressor(
n_estimators=n_iter,
criterion=self.criterion,
max_features=self.max_features,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
bootstrap=self.bootstrap,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.max_iter
)
self.estimator.fit(X, y)
if (
self.estimator.n_estimators >= self.max_iter
or self.estimator.n_estimators > len(self.estimator.estimators_)
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class SGDRegressor:
def __init__(
self,
loss="squared_loss",
penalty="l2",
alpha=0.0001,
fit_intercept=True,
tol=1e-4,
learning_rate="invscaling",
l1_ratio=0.15,
epsilon=1e-4,
eta0=0.01,
power_t=0.5,
average=False,
):
self.loss = loss
self.penalty = penalty
self.alpha = float(alpha)
self.fit_intercept = fit_intercept
self.tol = float(tol)
self.learning_rate = learning_rate
self.l1_ratio = 0.15 if is_none(l1_ratio) else float(l1_ratio)
self.epsilon = 0.1 if is_none(epsilon) else float(epsilon)
self.eta0 = float(eta0)
self.power_t = 0.5 if is_none(power_t) else float(power_t)
self.average = average
self.estimator = None # the fitted estimator
self.max_iter = self._get_max_iter() # limit the number of iterations
self._fitted = False # whether the model is fitted
@staticmethod
def _get_max_iter(): # define global max_iter
return MAX_ITER
def _fit_iteration(self, X, y, n_iter=2):
if self.estimator is None:
from sklearn.linear_model import SGDRegressor
self.estimator = SGDRegressor(
loss=self.loss,
penalty=self.penalty,
alpha=self.alpha,
fit_intercept=self.fit_intercept,
tol=self.tol,
learning_rate=self.learning_rate,
l1_ratio=self.l1_ratio,
epsilon=self.epsilon,
eta0=self.eta0,
power_t=self.power_t,
average=self.average,
max_iter=n_iter,
)
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter, self.max_iter)
self.estimator.fit(X, y)
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self._fitted = True
return self
def fit(self, X, y):
self._fit_iteration(X, y, n_iter=2)
# accelerate iteration process
iteration = 2
while not self._fitted:
n_iter = int(2**iteration / 2)
self._fit_iteration(X, y, n_iter=n_iter)
iteration += 1
return self
def predict(self, X):
return self.estimator.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class LinearRegression(sklearn.linear_model.LinearRegression):
def __init__(
self,
):
super().__init__()
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class Lasso(sklearn.linear_model.Lasso):
def __init__(
self,
alpha=1.0,
tol=1e-4,
):
self.alpha = alpha
self.tol = tol
super().__init__(
alpha=self.alpha,
tol=self.tol,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class Ridge(sklearn.linear_model.Ridge):
def __init__(
self,
alpha=1.0,
tol=1e-3,
solver="auto",
):
self.alpha = alpha
self.tol = tol
self.solver = solver
super().__init__(
alpha=self.alpha,
tol=self.tol,
solver=self.solver,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class ElasticNet(sklearn.linear_model.ElasticNet):
def __init__(
self,
alpha=1.0,
l1_ratio=0.5,
tol=1e-4,
selection="cyclic",
):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.tol = tol
self.selection = selection
super().__init__(
alpha=self.alpha,
l1_ratio=self.l1_ratio,
tol=self.tol,
selection=self.selection,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
class BayesianRidge(sklearn.linear_model.BayesianRidge):
def __init__(
self,
tol=1e-3,
alpha_1=1e-6,
alpha_2=1e-6,
lambda_1=1e-6,
lambda_2=1e-6,
):
self.tol = float(tol)
self.alpha_1 = float(alpha_1)
self.alpha_2 = float(alpha_2)
self.lambda_1 = float(lambda_1)
self.lambda_2 = float(lambda_2)
super().__init__(
tol=self.tol,
alpha_1=self.alpha_1,
alpha_2=self.alpha_2,
lambda_1=self.lambda_1,
lambda_2=self.lambda_2,
)
self._fitted = False
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
# combined with autosklearn version, thus deprecated here
# class HistGradientBoostingRegressor(sklearn.ensemble.HistGradientBoostingRegressor):
# def __init__(
# self,
# loss="least_squares",
# learning_rate=0.1,
# max_leaf_nodes=31,
# max_depth=None,
# min_samples_leaf=20,
# l2_regularization=0,
# tol=1e-7,
# ):
# self.loss = loss
# self.learning_rate = learning_rate
# self.max_leaf_nodes = max_leaf_nodes
# self.max_depth = max_depth
# self.min_samples_leaf = min_samples_leaf
# self.l2_regularization = l2_regularization
# self.tol = tol
# super().__init__(
# loss=self.loss,
# learning_rate=self.learning_rate,
# max_leaf_nodes=self.max_leaf_nodes,
# max_depth=self.max_depth,
# min_samples_leaf=self.min_samples_leaf,
# l2_regularization=self.l2_regularization,
# tol=self.tol,
# )
# self._fitted = False
# def fit(self, X, y):
# super().fit(X, y)
# self._fitted = True
# return self
# def predict(self, X):
# return super().predict(X)
class GradientBoostingRegressor(sklearn.ensemble.GradientBoostingRegressor):
def __init__(
self,
loss="ls" if sklearn_1_0_0 else "squared_error", # for default arguments
learning_rate=0.1,
n_estimators=100,
subsample=1.0,
criterion="friedman_mse",
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=3,
min_impurity_decrease=0.0,
max_features="auto",
max_leaf_nodes=31,
validation_fraction=0.1,
n_iter_no_change=10,
tol=1e-7,
):
self.loss = loss
self.learning_rate = float(learning_rate)
self.n_estimators = int(n_estimators)
self.subsample = float(subsample)
self.criterion = criterion
self.min_samples_split = int(min_samples_split)
self.min_samples_leaf = int(min_samples_leaf)
self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)
self.max_depth = None if is_none(max_depth) else int(max_depth)
self.min_impurity_decrease = float(min_impurity_decrease)
self.max_features = max_features
self.max_leaf_nodes = int(max_leaf_nodes)
self.validation_fraction = float(validation_fraction)
self.n_iter_no_change = int(n_iter_no_change)
self.tol = float(tol)
super().__init__(
loss=self.loss,
learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
subsample=self.subsample,
criterion=self.criterion,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_depth=self.max_depth,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
validation_fraction=self.validation_fraction,
n_iter_no_change=self.n_iter_no_change,
tol=self.tol,
)
self._fitted = False # whether the model is fitted
def fit(self, X, y):
super().fit(X, y)
self._fitted = True
return self
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_hyperopt/_classifier_hyperparameter.py | """
File: _classifier_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_hyperopt/_classifier_hyperparameter.py
File Created: Tuesday, 5th April 2022 11:05:31 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:22:42 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
# classifier hyperparameters
classifier_hyperparameter = [
# extract from autosklearn
{
"model": "AdaboostClassifier",
"n_estimators": scope.int(
hp.quniform("AdaboostClassifier_n_estimators", 10, 500, 1)
),
"learning_rate": hp.uniform("AdaboostClassifier_learning_rate", 0.01, 2),
"algorithm": hp.choice("AdaboostClassifier_algorithm", ["SAMME", "SAMME.R"]),
# for base_estimator of Decision Tree
"max_depth": scope.int(hp.quniform("AdaboostClassifier_max_depth", 1, 10, 1)),
},
{
"model": "BernoulliNB",
"alpha": hp.loguniform("BernoulliNB_alpha", np.log(1e-2), np.log(100)),
"fit_prior": hp.choice("BernoulliNB_fit_prior", [True, False]),
},
{
"model": "DecisionTree",
"criterion": hp.choice("DecisionTree_criterion", ["gini", "entropy"]),
"max_features": hp.choice("DecisionTree_max_features", [1.0]),
"max_depth_factor": hp.uniform("DecisionTree_max_depth_factor", 0.0, 2.0),
"min_samples_split": scope.int(
hp.quniform("DecisionTree_min_samples_split", 2, 20, 1)
),
"min_samples_leaf": scope.int(
hp.quniform("DecisionTree_min_samples_leaf", 1, 20, 1)
),
"min_weight_fraction_leaf": hp.choice(
"DecisionTree_min_weight_fraction_leaf", [0.0]
),
"max_leaf_nodes": hp.choice("DecisionTree_max_leaf_nodes", ["None"]),
"min_impurity_decrease": hp.choice("DecisionTree_min_impurity_decrease", [0.0]),
},
{
"model": "ExtraTreesClassifier",
"criterion": hp.choice("ExtraTreesClassifier_criterion", ["gini", "entropy"]),
"min_samples_leaf": scope.int(
hp.quniform("ExtraTreesClassifier_min_samples_leaf", 1, 20, 1)
),
"min_samples_split": scope.int(
hp.quniform("ExtraTreesClassifier_min_samples_split", 2, 20, 1)
),
"max_features": hp.uniform("ExtraTreesClassifier_max_features", 0.0, 1.0),
"bootstrap": hp.choice("ExtraTreesClassifier_bootstrap", [True, False]),
"max_leaf_nodes": hp.choice("ExtraTreesClassifier_max_leaf_nodes", ["None"]),
"max_depth": hp.choice("ExtraTreesClassifier_max_depth", ["None"]),
"min_weight_fraction_leaf": hp.choice(
"ExtraTreesClassifier_min_weight_fraction_leaf", [0.0]
),
"min_impurity_decrease": hp.choice(
"ExtraTreesClassifier_min_impurity_decrease", [0.0]
),
},
{"model": "GaussianNB"},
{
"model": "GradientBoostingClassifier",
"loss": hp.choice("GradientBoostingClassifier_loss", ["auto"]),
"learning_rate": hp.loguniform(
"GradientBoostingClassifier_learning_rate", np.log(0.01), np.log(1)
),
"min_samples_leaf": scope.int(
hp.loguniform(
"GradientBoostingClassifier_min_samples_leaf", np.log(1), np.log(200)
)
),
"max_depth": hp.choice("GradientBoostingClassifier_max_depth", ["None"]),
"max_leaf_nodes": scope.int(
hp.loguniform(
"GradientBoostingClassifier_max_leaf_nodes", np.log(3), np.log(2047)
)
),
"max_bins": hp.choice("GradientBoostingClassifier_max_bins", [255]),
"l2_regularization": hp.loguniform(
"GradientBoostingClassifier_l2_regularization", np.log(1e-10), np.log(1)
),
"early_stop": hp.choice(
"GradientBoostingClassifier_early_stop", ["off", "train", "valid"]
),
"tol": hp.choice("GradientBoostingClassifier_tol", [1e-7]),
"scoring": hp.choice("GradientBoostingClassifier_scoring", ["loss"]),
"validation_fraction": hp.uniform(
"GradientBoostingClassifier_validation_fraction", 0.01, 0.4
),
},
{
"model": "KNearestNeighborsClassifier",
"n_neighbors": scope.int(
hp.quniform("KNearestNeighborsClassifier_n_neighbors", 1, 100, 1)
),
"weights": hp.choice(
"KNearestNeighborsClassifier_weights", ["uniform", "distance"]
),
"p": hp.choice("KNearestNeighborsClassifier_p", [1, 2]),
},
{
"model": "LDA",
"shrinkage": hp.choice("LDA_shrinkage", [None, "auto", "manual"]),
"shrinkage_factor": hp.uniform("LDA", 0.0, 1.0),
"tol": hp.loguniform("LDA_tol", np.log(1e-5), np.log(1e-1)),
},
{
"model": "LibLinear_SVC",
# forbid penalty = 'l1' and loss = 'hinge'
# forbid penalty = 'l2', loss = 'hinge' and dual = False
# forbid penalty = 'l1' and dual = False
"penalty": hp.choice("LibLinear_SVC_penalty", ["l2"]),
"loss": hp.choice("LibLinear_SVC_loss", ["squared_hinge"]),
"dual": hp.choice("LibLinear_SVC_dual", [False]),
"tol": hp.loguniform("LibLinear_SVC_tol", np.log(1e-5), np.log(1e-1)),
"C": hp.loguniform("LibLinear_SVC_C", np.log(0.03125), np.log(32768)),
"multi_class": hp.choice("LibLinear_SVC_multi_class", ["ovr"]),
"fit_intercept": hp.choice("LibLinear_SVC_fit_intercept", [True]),
"intercept_scaling": hp.choice("LibLinear_SVC_intercept_scaling", [1]),
},
{
"model": "LibSVM_SVC",
# degree only selected when kernel = 'poly'
# coef0 only selected when kernel = ['poly', 'sigmoid']
"C": hp.loguniform("LibSVM_SVC_C", np.log(0.03125), np.log(32768)),
"kernel": hp.choice("LibSVM_SVC_kernel", ["poly", "rbf", "sigmoid"]),
"gamma": hp.loguniform("LibSVM_SVC_gamma", np.log(3.0517578125e-05), np.log(8)),
"shrinking": hp.choice("LibSVM_SVC_shrinking", [True, False]),
"tol": hp.loguniform("LibSVM_SVC_tol", np.log(1e-5), np.log(1e-1)),
"max_iter": hp.choice("LibSVM_SVC_max_iter", [-1]),
"degree": scope.int(hp.quniform("LibSVM_SVC_degree", 2, 5, 1)),
"coef0": hp.uniform("LibSVM_SVC_coef0", -1, 1),
},
{
"model": "MLPClassifier",
"hidden_layer_depth": scope.int(
hp.quniform("MLPClassifier_hidden_layer_depth", 1, 3, 1)
),
"num_nodes_per_layer": scope.int(
hp.loguniform("MLPClassifier_num_nodes_per_layer", np.log(16), np.log(264))
),
"activation": hp.choice("MLPClassifier_activation", ["tanh", "relu"]),
"alpha": hp.loguniform("MLPClassifier_alpha", np.log(1e-7), np.log(1e-1)),
"learning_rate_init": hp.loguniform(
"MLPClassifier_learning_rate_init", np.log(1e-4), np.log(0.5)
),
"early_stopping": hp.choice("MLPClassifier_early_stopping", ["train", "valid"]),
#'solver' : hp.choice('MLPClassifier_solver', ['lbfgs', 'sgd', 'adam']),
# autosklearn must include _no_improvement_count, where only supported by 'sgd' and 'adam'
"solver": hp.choice("MLPClassifier_solver", ["adam"]),
"batch_size": hp.choice("MLPClassifier_batch_size", ["auto"]),
"n_iter_no_change": hp.choice("MLPClassifier_n_iter_no_change", [32]),
"tol": hp.choice("MLPClassifier_tol", [1e-4]),
"shuffle": hp.choice("MLPClassifier_shuffle", [True]),
"beta_1": hp.choice("MLPClassifier_beta_1", [0.9]),
"beta_2": hp.choice("MLPClassifier_beta_2", [0.999]),
"epsilon": hp.choice("MLPClassifier_epsilon", [1e-8]),
"validation_fraction": hp.choice("MLPClassifier_validation_fraction", [0.1]),
},
{
"model": "MultinomialNB",
"alpha": hp.loguniform("MultinomialNB_alpha", np.log(1e-2), np.log(100)),
"fit_prior": hp.choice("MultinomialNB_fit_prior", [True, False]),
},
{
"model": "PassiveAggressive",
"C": hp.loguniform("PassiveAggressive_C", np.log(1e-5), np.log(10)),
"fit_intercept": hp.choice("PassiveAggressive_fit_intercept", [True]),
"tol": hp.loguniform("PassiveAggressive_tol", np.log(1e-5), np.log(1e-1)),
"loss": hp.choice("PassiveAggressive_loss", ["hinge", "squared_hinge"]),
"average": hp.choice("PassiveAggressive_average", [True, False]),
},
{"model": "QDA", "reg_param": hp.uniform("QDA_reg_param", 0.0, 1.0)},
{
"model": "RandomForest",
"criterion": hp.choice("RandomForest_criterion", ["gini", "entropy"]),
"max_features": hp.uniform("RandomForest_max_features", 0.0, 1.0),
"max_depth": hp.choice("RandomForest_max_depth", [None]),
"min_samples_split": scope.int(
hp.quniform("RandomForest_min_samples_split", 2, 20, 1)
),
"min_samples_leaf": scope.int(
hp.quniform("RandomForest_min_samples_leaf", 1, 20, 1)
),
"min_weight_fraction_leaf": hp.choice(
"RandomForest_min_weight_fraction_leaf", [0.0]
),
"bootstrap": hp.choice("RandomForest_bootstrap", [True, False]),
"max_leaf_nodes": hp.choice("RandomForest_max_leaf_nodes", [None]),
"min_impurity_decrease": hp.choice("RandomForest_min_impurity_decrease", [0.0]),
},
{
"model": "SGD",
# l1_ratio only selected for penalty = 'elasticnet'
# epsilon only selected for loss = 'modified_huber'
# power_t only selected for learning_rate = 'invscaling'
# eta0 only selected for learning_rate in ['constant', 'invscaling']
"loss": hp.choice(
"SGD_loss",
["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
),
"penalty": hp.choice("SGD_penalty", ["l1", "l2", "elasticnet"]),
"alpha": hp.loguniform("SGD_alpha", np.log(1e-7), np.log(1e-1)),
"fit_intercept": hp.choice("SGD_fit_intercept", [True]),
"tol": hp.loguniform("SGD_tol", np.log(1e-5), np.log(1e-1)),
"learning_rate": hp.choice(
"SGD_learning_rate", ["constant", "optimal", "invscaling"]
),
"l1_ratio": hp.loguniform("SGD_l1_ratio", np.log(1e-9), np.log(1)),
"epsilon": hp.loguniform("SGD_epsilon", np.log(1e-5), np.log(1e-1)),
"eta0": hp.loguniform("SGD_eta0", np.log(1e-7), np.log(1e-1)),
"power_t": hp.uniform("SGD_power_t", 1e-5, 1),
"average": hp.choice("SGD_average", [True, False]),
},
# self-defined models
{
"model": "MLP_Classifier",
"hidden_layer": scope.int(hp.quniform("MLP_Classifier_hidden_layer", 1, 5, 1)),
"hidden_size": scope.int(hp.quniform("MLP_Classifier_hidden_size", 1, 20, 1)),
"activation": hp.choice(
"MLP_Classifier_activation", ["ReLU", "Tanh", "Sigmoid"]
),
"learning_rate": hp.uniform("MLP_Classifier_learning_rate", 1e-5, 1),
"optimizer": hp.choice("MLP_Classifier_optimizer", ["Adam", "SGD"]),
"criteria": hp.choice("MLP_Classifier_criteria", ["CrossEntropy"]),
"batch_size": hp.choice("MLP_Classifier_batch_size", [16, 32, 64]),
"num_epochs": scope.int(hp.quniform("MLP_Classifier_num_epochs", 5, 50, 1)),
},
]
|
PanyiDong/AutoML | My_AutoML/_feature_selection/_base.py | """
File: _base.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_feature_selection/_base.py
File Created: Tuesday, 5th April 2022 11:33:04 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Sunday, 17th April 2022 5:18:18 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import numbers
import numpy as np
import pandas as pd
import scipy
import scipy.linalg
from sklearn.utils.extmath import stable_cumsum, svd_flip
from My_AutoML._utils import class_means, class_cov, empirical_covariance
import warnings
class PCA_FeatureSelection:
"""
Principal Component Analysis
Use Singular Value Decomposition (SVD) to project data to a lower dimensional
space, and thus achieve feature selection.
Methods used:
Full SVD: LAPACK, scipy.linalg.svd
Trucated SVD: ARPACK, scipy.sparse.linalg.svds
Randomized truncated SVD:
Parameters
----------
n_components: remaining features after selection, default = None
solver: the method to perform SVD, default = 'auto'
all choices ('auto', 'full', 'truncated', 'randomized')
tol: Tolerance for singular values computed for truncated SVD
n_iter: Number of iterations for randomized solver, default = 'auto'
seed: random seed, default = 1
"""
def __init__(
self, n_components=None, solver="auto", tol=0.0, n_iter="auto", seed=1
):
self.n_components = n_components
self.solver = solver
self.tol = tol
self.n_iter = n_iter
self.seed = seed
self._fitted = False
def fit(self, X, y=None):
n, p = X.shape
# Deal with default n_componets = None
if self.n_components == None:
if self.solver != "truncated":
n_components = min(n, p)
else:
n_components = min(n, p) - 1
else:
n_components = self.n_components
if n_components <= 0:
raise ValueError("Selection components must be larger than 0!")
# Deal with solver
self.fit_solver = self.solver
if self.solver == "auto":
if max(n, p) < 500:
self.fit_solver = "full"
elif n_components >= 1 and n_components < 0.8 * min(n, p):
self.fit_solver = "randomized"
else:
self.fit_solver = "full"
else:
self.fit_solver = self.solver
if self.fit_solver == "full":
self.U_, self.S_, self.V_ = self._fit_full(X, n_components)
elif self.fit_solver in ["truncated", "randomized"]:
self.U_, self.S_, self.V_ = self._fit_truncated(
X, n_components, self.fit_solver
)
else:
raise ValueError("Not recognizing solver = {}!".format(self.fit_solver))
self._fitted = True
return self
def transform(self, X):
_features = list(X.columns)
U = self.U_[:, : self.n_components]
# X_new = X * V = U * S * Vt * V = U * S
X_new = U * self.S_[: self.n_components]
# return dataframe format
# X_new = pd.DataFrame(U, columns = _features[:self.n_components])
return X_new
def _fit_full(self, X, n_components):
n, p = X.shape
if n_components < 0 or n_components > min(n, p):
raise ValueError(
"n_components must between 0 and {0:d}, but get {1:d}".format(
min(n, p), n_components
)
)
elif not isinstance(n_components, numbers.Integral):
raise ValueError(
"Expect integer n_components, but get {:.6f}".format(n_components)
)
# center the data
self._x_mean = np.mean(X, axis=0)
X -= self._x_mean
# solve for svd
from scipy.linalg import svd
U, S, V = svd(X, full_matrices=False)
# make sure the max column values of U are positive, if not flip the column
# and flip corresponding V rows
max_abs_col = np.argmax(np.max(U), axis=0)
signs = np.sign(U[max_abs_col, range(U.shape[1])])
U *= signs
V *= signs.reshape(-1, 1)
_var = (S**2) / (n - 1)
total_var = _var.sum()
_var_ratio = _var / total_var
if 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed.
ratio_cumsum = stable_cumsum(_var_ratio)
n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n, p):
self._noise_variance_ = _var[n_components:].mean()
else:
self._noise_variance_ = 0.0
self.n_samples, self.n_features = n, p
self.components_ = V[:n_components]
self.n_components = n_components
self._var = _var[:n_components]
self._var_ratio = _var_ratio[:n_components]
self.singular_values = S[:n_components]
return U, S, V
def _fit_truncated(self, X, n_components, solver):
n, p = X.shape
self._x_mean = np.mean(X, axis=0)
X -= self._x_mean
if solver == "truncated":
from scipy.sparse.linalg import svds
np.random.seed(self.seed)
v0 = np.random.uniform(-1, 1, size=min(X.shape))
U, S, V = svds(X.values, k=n_components, tol=self.tol, v0=v0)
S = S[::-1]
U, V = svd_flip(U[:, ::-1], V[::-1])
elif solver == "randomized":
from sklearn.utils.extmath import randomized_svd
U, S, V = randomized_svd(
np.array(X),
n_components=n_components,
n_iter=self.n_iter,
flip_sign=True,
random_state=self.seed,
)
self.n_samples, self.n_features = n, p
self.components_ = V
self.n_components = n_components
# Get variance explained by singular values
self._var = (S**2) / (n - 1)
total_var = np.var(X, ddof=1, axis=0)
self._var_ratio = self._var / total_var.sum()
self.singular_values = S.copy() # Store the singular values.
if self.n_components < min(n, p):
self._noise_variance_ = total_var.sum() - self._var.sum()
self._noise_variance_ /= min(n, p) - n_components
else:
self._noise_variance_ = 0.0
return U, S, V
# class LDASelection:
# def __init__(
# self,
# priors=None,
# n_components=None,
# ):
# self.priors = priors
# self.n_components = n_components
# self._fitted = False
# def _eigen(self, X, y):
# self.means_ = class_means(X, y)
# self.covariance_ = class_cov(X, y, self.priors_)
# Sw = self.covariance_ # within scatter
# St = empirical_covariance(X) # total scatter
# Sb = St - Sw # between scatter
# evals, evecs = scipy.linalg.eigh(Sb, Sw)
# self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][
# : self._max_components
# ]
# evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# self.scalings_ = evecs
# self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
# self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
# self.priors_
# )
# def fit(self, X, y):
# self.classes_ = np.unique(y)
# n, p = X.shape
# if len(self.classes_) == n:
# raise ValueError("Classes must be smaller than number of samples!")
# if self.priors is None: # estimate priors from sample
# _y_uni = np.unique(y) # non-negative ints
# self.priors_ = []
# for _value in _y_uni:
# if isinstance(y, pd.DataFrame):
# self.priors_.append(y.loc[y.values == _value].count()[0] / len(y))
# elif isinstance(y, pd.Series):
# self.priors_.append(y.loc[y.values == _value].count() / len(y))
# self.priors_ = np.asarray(self.priors_)
# else:
# self.priors_ = np.asarray(self.priors)
# if (self.priors_ < 0).any():
# raise ValueError("priors must be non-negative")
# if not np.isclose(self.priors_.sum(), 1.0):
# warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
# self.priors_ = self.priors_ / self.priors_.sum()
# max_components = min(
# len(self.classes_) - 1, X.shape[1]
# ) # maximum number of components
# if self.n_components is None:
# self._max_components = max_components
# else:
# if self.n_components > max_components:
# raise ValueError(
# "n_components cannot be larger than min(n_features, n_classes - 1)."
# )
# self._max_components = self.n_components
# self._fitted = True
# return self
# def transform(self, X):
# X_new = np.dot(X, self.scalings_)
# return X_new[:, : self._max_components]
class RBFSampler:
"""
Implement of Weighted Sums of Random Kitchen Sinks
Parameters
----------
gamma: use to determine standard variance of random weight table, default = 1
Parameter of RBF kernel: exp(-gamma * x^2).
n_components: number of samples per original feature, default = 100
seed: random generation seed, default = None
"""
def __init__(self, gamma=1.0, n_components=100, seed=1):
self.gamma = gamma
self.n_components = n_components
self.seed = seed
self._fitted = False
def fit(self, X, y=None):
if isinstance(X, list):
n_features = len(X[0])
else:
n_features = X.shape[1]
if self.n_components > n_features:
warnings.warn(
"N_components {} is larger than n_features {}, will set to n_features.".format(
self.n_components, n_features
)
)
self.n_components = n_features
else:
self.n_components = self.n_components
if not self.seed:
self.seed = np.random.seed(int(time.time()))
elif not isinstance(self.seed, int):
raise ValueError("Seed must be integer, receive {}".format(self.seed))
self._random_weights = np.random.normal(
0, np.sqrt(2 * self.gamma), size=(n_features, self.n_components)
)
self._random_offset = np.random.uniform(
0, 2 * np.pi, size=(1, self.n_components)
)
self._fitted = True
return self
def transform(self, X):
projection = np.dot(X, self._random_weights)
projection += self._random_offset
np.cos(projection, projection)
projection *= np.sqrt(2.0 / self.n_components)
# return dataframe
# projection = pd.DataFrame(projection, columns = list(X.columns)[:self.n_components])
return projection
|
PanyiDong/AutoML | tests/test_hpo/test_hpo.py | """
File: test_regression.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_hpo/test_regression.py
File Created: Sunday, 10th April 2022 12:00:04 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 11th May 2022 9:45:00 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
from ray import tune
import My_AutoML
from My_AutoML import load_data
# use command line interaction to run the model
# apparently, same class object called in one test case will not be able
# to run the model correctly after the first time
# detect whether optimal setting exists as method of determining whether
# the model is fitted correctly
# def test_stroke():
# os.system(
# "python main.py --data_folder Appendix --train_data healthcare-dataset-stroke-data --response stroke"
# )
# assert (
# os.path.exists("tmp/healthcare-dataset-stroke-data_model/init.txt") == True
# ), "Classification for Stroke data failed to initiated."
# # assert (
# # mol_heart._fitted == True
# # ), "Classification for Heart data failed to fit."
# assert (
# os.path.exists("tmp/healthcare-dataset-stroke-data_model/optimal_setting.txt")
# == True
# ), "Classification for Stroke data failed to find optimal setting."
def test_objective_1():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LogisticRegression
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LogisticRegression": LogisticRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_17": "LogisticRegression",
"LogisticRegression_penalty": "l2",
"LogisticRegression_tol": 1e-4,
"LogisticRegression_C": 1,
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_1",
task_mode="classification",
objective="accuracy",
validation=True,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_2():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LogisticRegression
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LogisticRegression": LogisticRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_17": "LogisticRegression",
"LogisticRegression_penalty": "l2",
"LogisticRegression_tol": 1e-4,
"LogisticRegression_C": 1,
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_2",
task_mode="classification",
objective="auc",
validation=False,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_3():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LinearRegression
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LinearRegression": LinearRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_13": "LinearRegression",
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_3",
task_mode="regression",
objective="MAE",
validation=True,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_4():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LinearRegression
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LinearRegression": LinearRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_13": "LinearRegression",
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_4",
task_mode="regression",
objective="R2",
validation=True,
valid_size=0.15,
full_status=True,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_5():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LogisticRegression
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LogisticRegression": LogisticRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_17": "LogisticRegression",
"LogisticRegression_penalty": "l2",
"LogisticRegression_tol": 1e-4,
"LogisticRegression_C": 1,
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_5",
task_mode="classification",
objective="precision",
validation=True,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_6():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LogisticRegression
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LogisticRegression": LogisticRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_17": "LogisticRegression",
"LogisticRegression_penalty": "l2",
"LogisticRegression_tol": 1e-4,
"LogisticRegression_C": 1,
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_6",
task_mode="classification",
objective="hinge",
validation=False,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_7():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LogisticRegression
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LogisticRegression": LogisticRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_17": "LogisticRegression",
"LogisticRegression_penalty": "l2",
"LogisticRegression_tol": 1e-4,
"LogisticRegression_C": 1,
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_7",
task_mode="classification",
objective="f1",
validation=False,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_8():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LinearRegression
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LinearRegression": LinearRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_13": "LinearRegression",
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_8",
task_mode="regression",
objective="MSE",
validation=False,
valid_size=0.15,
full_status=False,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_objective_9():
from My_AutoML._hpo._utils import TabularObjective
from My_AutoML._encoding import DataEncoding
from My_AutoML._imputation import SimpleImputer
from My_AutoML._base import no_processing
from My_AutoML._scaling import Standardize
from My_AutoML._model import LinearRegression
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
encoder = {"DataEncoding": DataEncoding}
imputer = {"SimpleImputer": SimpleImputer}
balancing = {"no_processing": no_processing}
scaling = {"Standardize": Standardize}
feature_selection = {"no_processing": no_processing}
models = {"LinearRegression": LinearRegression}
params = {
"encoder": {
"encoder_1": "DataEncoding",
},
"imputer": {
"imputer_1": "SimpleImputer",
"SimpleImputer_method": "mean",
},
"balancing": {"balancing_1": "no_processing"},
"scaling": {"scaling_2": "Standardize"},
"feature_selection": {"feature_selection_1": "no_processing"},
"model": {
"model_13": "LinearRegression",
},
}
clf = TabularObjective(
params,
)
clf.setup(
params,
_X=data[features],
_y=data[response],
encoder=encoder,
imputer=imputer,
balancing=balancing,
scaling=scaling,
feature_selection=feature_selection,
models=models,
model_name="obj_9",
task_mode="regression",
objective="MAX",
validation=True,
valid_size=0.15,
full_status=True,
reset_index=True,
_iter=1,
seed=1,
)
result = clf.step()
clf.reset_config(params)
assert isinstance(result, dict), "Objective function should return a dict."
assert "loss" in result.keys(), "Objective function should return loss."
assert (
"fitted_model" in result.keys()
), "Objective function should return fitted model."
assert (
"training_status" in result.keys()
), "Objective function should return training status."
def test_heart():
# test load_data here
data = load_data().load("example/example_data", "heart")
data = data["heart"]
features = list(data.columns)
features.remove("HeartDisease")
response = ["HeartDisease"]
mol = My_AutoML.AutoTabular(
model_name="heart",
search_algo="GridSearch",
timeout=60,
)
mol.fit(data[features], data[response])
y_pred = mol.predict(data[features])
assert (
os.path.exists("tmp/heart/init.txt") == True
), "Classification for Heart data failed to initiated."
assert mol._fitted == True, "Classification for Heart data failed to fit."
assert (
os.path.exists("tmp/heart/optimal_setting.txt") == True
), "Classification for Heart data failed to find optimal setting."
def test_insurance():
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
mol = My_AutoML.AutoTabular(
model_name="insurance",
objective="MAE",
timeout=60,
)
mol.fit(data[features], data[response])
y_pred = mol.predict(data[features])
assert (
os.path.exists("tmp/insurance/init.txt") == True
), "Regression for Insurance data failed to initiated."
assert mol._fitted == True, "Regression for Insurance data failed to fit."
assert (
os.path.exists("tmp/insurance/optimal_setting.txt") == True
), "Regression for Insurance data failed to find optimal setting."
def test_insurance_R2():
from My_AutoML._hpo._base import AutoTabularBase
# test load_data here
data = load_data().load("example/example_data", "insurance")
data = data["insurance"]
features = list(data.columns)
features.remove("expenses")
response = ["expenses"]
mol = AutoTabularBase(
model_name="insurance_R2",
task_mode="regression",
objective="R2",
max_evals=8,
timeout=60,
)
mol.fit(data[features], data[response])
assert (
os.path.exists("tmp/insurance_R2/init.txt") == True
), "Regression for Insurance data failed to initiated."
assert mol._fitted == True, "Regression for Insurance data failed to fit."
assert (
os.path.exists("tmp/insurance_R2/optimal_setting.txt") == True
), "Regression for Insurance data failed to find optimal setting."
def test_stroke_import_version():
# test load_data here
data = load_data().load("Appendix", "healthcare-dataset-stroke-data")
data = data["healthcare-dataset-stroke-data"]
features = list(data.columns)
features.remove("stroke")
response = ["stroke"]
mol = My_AutoML.AutoTabular(
model_name="stroke",
objective="auc",
timeout=60,
)
mol.fit(data[features], data[response])
assert (
os.path.exists("tmp/stroke/init.txt") == True
), "Classification for Stroke data (import_version) failed to initiated."
assert (
mol._fitted == True
), "Classification for Stroke data (import_version) failed to fit."
assert (
os.path.exists("tmp/stroke/optimal_setting.txt") == True
), "Classification for Stroke data (import_version) failed to find optimal setting."
def test_stroke_loading():
# test load_data here
data = load_data().load("Appendix", "healthcare-dataset-stroke-data")
data = data["healthcare-dataset-stroke-data"]
features = list(data.columns)
features.remove("stroke")
response = ["stroke"]
mol = My_AutoML.AutoTabular(
model_name="stroke",
timeout=60,
)
mol.fit(data[features], data[response])
assert mol._fitted == True, "AutoTabular with loading failed to fit."
def test_stroke_with_limit():
# test load_data here
data = load_data().load("Appendix", "healthcare-dataset-stroke-data")
data = data["healthcare-dataset-stroke-data"]
features = list(data.columns)
features.remove("stroke")
response = ["stroke"]
mol = My_AutoML.AutoTabular(
model_name="no_valid",
encoder=["DataEncoding"],
imputer=["SimpleImputer"],
balancing=["no_processing"],
scaling=["no_processing"],
feature_selection=["no_processing"],
models=["DecisionTree"],
validation=False,
search_algo="GridSearch",
objective="precision",
timeout=60,
)
mol.fit(data[features], data[response])
assert mol._fitted == True, "AutoTabular with limited space failed to fit."
|
PanyiDong/AutoML | My_AutoML/_feature_selection/_imported.py | <gh_stars>1-10
"""
File: _imported.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_feature_selection/_imported.py
File Created: Tuesday, 5th April 2022 11:38:17 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:21:05 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
import numpy as np
from functools import partial
######################################################################################################################
# Modified Feature Selection from autosklearn
class Densifier:
"""
from autosklearn.pipeline.components.feature_preprocessing.densifier import Densifier
Parameters
----------
seed: random seed, default = 1
"""
def __init__(self, seed=1):
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
return self
def transform(self, X):
from scipy import sparse
if sparse.issparse(X):
return X.todense().getA()
else:
return X
class ExtraTreesPreprocessorClassification:
"""
from autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_classification import ExtraTreesPreprocessorClassification
using sklearn.ensemble.ExtraTreesClassifier
Parameters
----------
n_estimators: Number of trees in forest, default = 100
criterion: Function to measure the quality of a split, default = 'gini'
supported ("gini", "entropy")
min_samples_leaf: Minimum number of samples required to be at a leaf node, default = 1
min_samples_split: Minimum number of samples required to split a node, default = 2
max_features: Number of features to consider, default = 'auto'
supported ("auto", "sqrt", "log2")
bootstrap: Whether bootstrap samples, default = False
max_leaf_nodes: Maximum number of leaf nodes accepted, default = None
max_depth: Maximum depth of the tree, default = None
min_weight_fraction_leaf: Minimum weighted fraction of the sum total of weights, default = 0.0
min_impurity_decrease: Threshold to split if this split induces a decrease of the impurity, default = 0.0
oob_score: Whether to use out-of-bag samples, default = False
n_jobs: Parallel jobs to run, default = 1
verbose: Controls the verbosity, default = 0
class weight: Weights associated with classes, default = None
supported ("balanced", "balanced_subsample"), dict or list of dicts
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=100,
criterion="gini",
min_samples_leaf=1,
min_samples_split=2,
max_features="auto",
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
min_impurity_decrease=0.0,
oob_score=False,
n_jobs=1,
verbose=0,
class_weight=None,
seed=1,
):
self.n_estimators = n_estimators
self.estimator_increment = 10
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.oob_score = oob_score
self.n_jobs = n_jobs
self.verbose = verbose
self.class_weight = class_weight
self.seed = seed
self.preprocessor = None
def fit(self, X, y, sample_weight=None):
import sklearn.ensemble
import sklearn.feature_selection
self.n_estimators = int(self.n_estimators)
self.max_leaf_nodes = (
None if self.max_leaf_nodes is None else int(self.max_leaf_nodes)
)
self.max_depth = None if self.max_depth is None else int(self.max_depth)
self.bootstrap = True if self.bootstrap is True else False
self.n_jobs = int(self.n_jobs)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.max_features = self.max_features
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.verbose = int(self.verbose)
max_features = int(X.shape[1] ** float(self.max_features))
estimator = sklearn.ensemble.ExtraTreesClassifier(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.seed,
class_weight=self.class_weight,
)
estimator.fit(X, y, sample_weight=sample_weight)
self.preprocessor = sklearn.feature_selection.SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError
return self.preprocessor.transform(X)
class ExtraTreesPreprocessorRegression:
"""
from autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_regression import ExtraTreesPreprocessorRegression
using sklearn.ensemble.ExtraTreesRegressor
Parameters
----------
n_estimators: Number of trees in forest, default = 100
criterion: Function to measure the quality of a split, default = 'squared_error'
supported ("squared_error", "mse", "absolute_error", "mae")
min_samples_leaf: Minimum number of samples required to be at a leaf node, default = 1
min_samples_split: Minimum number of samples required to split a node, default = 2
max_features: Number of features to consider, default = 'auto'
supported ("auto", "sqrt", "log2")
bootstrap: Whether bootstrap samples, default = False
max_leaf_nodes: Maximum number of leaf nodes accepted, default = None
max_depth: Maximum depth of the tree, default = None
min_weight_fraction_leaf: Minimum weighted fraction of the sum total of weights, default = 0.0
oob_score: Whether to use out-of-bag samples, default = False
n_jobs: Parallel jobs to run, default = 1
verbose: Controls the verbosity, default = 0
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=100,
criterion="squared_error",
min_samples_leaf=1,
min_samples_split=2,
max_features="auto",
bootstrap=False,
max_leaf_nodes=None,
max_depth=None,
min_weight_fraction_leaf=0.0,
oob_score=False,
n_jobs=1,
verbose=0,
seed=1,
):
self.n_estimators = n_estimators
self.estimator_increment = 10
if criterion not in ("mse", "friedman_mse", "mae"):
raise ValueError(
"'criterion' is not in ('mse', 'friedman_mse', "
"'mae'): %s" % criterion
)
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.oob_score = oob_score
self.n_jobs = n_jobs
self.verbose = verbose
self.seed = seed
self.preprocessor = None
def fit(self, X, y):
import sklearn.ensemble
import sklearn.feature_selection
self.n_estimators = int(self.n_estimators)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.max_features = float(self.max_features)
self.bootstrap = True if self.bootstrap is True else False
self.n_jobs = int(self.n_jobs)
self.verbose = int(self.verbose)
self.max_leaf_nodes = (
None if self.max_leaf_nodes is None else int(self.max_leaf_nodes)
)
self.max_depth = None if self.max_depth is None else int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
num_features = X.shape[1]
max_features = int(float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
estimator = sklearn.ensemble.ExtraTreesRegressor(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
random_state=self.seed,
)
estimator.fit(X, y)
self.preprocessor = sklearn.feature_selection.SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError
return self.preprocessor.transform(X)
class FastICA:
"""
from autosklearn.pipeline.components.feature_preprocessing.fast_ica import FastICA
using
Parameters
----------
algorithm: Apply parallel or deflational algorithm, default = 'parallel'
supported ('parallel', 'deflation')
whiten: If false, no whitening is performed, default = True
fun: Functional form of the G function used, default = 'logcosh'
supported ('logcosh', 'exp', 'cube') or callable
n_components: Number of components to retain, default = None
seed: random seed, default = 1
"""
def __init__(
self,
algorithm="parallel",
whiten=True,
fun="logcosh",
n_components=None,
seed=1,
):
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.n_components = n_components
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.decomposition
self.n_components = (
None if self.n_components is None else int(self.n_components)
)
self.preprocessor = sklearn.decomposition.FastICA(
n_components=self.n_components,
algorithm=self.algorithm,
fun=self.fun,
whiten=self.whiten,
random_state=self.seed,
)
# Make the RuntimeWarning an Exception!
with warnings.catch_warnings():
warnings.filterwarnings(
"error", message="array must not contain infs or NaNs"
)
try:
self.preprocessor.fit(X)
except ValueError as e:
if "array must not contain infs or NaNs" in e.args[0]:
raise ValueError(
"Bug in scikit-learn: "
"https://github.com/scikit-learn/scikit-learn/pull/2738"
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class FeatureAgglomeration:
"""
from autosklearn.pipeline.components.feature_preprocessing.feature_agglomeration import FeatureAgglomeration
using
Parameters
----------
n_clusters: Number of clusters, default = 2
affinity: Metric used to compute the linkage, default = 'euclidean'
supported ("euclidean", "l1", "l2", "manhattan", "cosine", or 'precomputed')
linkage: Linkage criterion, default = 'ward'
supported ("ward", "complete", "average", "single")
pooling_func: Combines the values of agglomerated features into a single value, default = np.mean
seed: random seed, default = 1
"""
def __init__(
self,
n_clusters=2,
affinity="euclidean",
linkage="ward",
pooling_func=np.mean,
seed=1,
):
self.n_clusters = n_clusters
self.affinity = affinity
self.linkage = linkage
self.pooling_func = pooling_func
self.seed = seed
self.pooling_func_mapping = dict(mean=np.mean, median=np.median, max=np.max)
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.cluster
self.n_clusters = int(self.n_clusters)
n_clusters = min(self.n_clusters, X.shape[1])
if not callable(self.pooling_func):
self.pooling_func = self.pooling_func_mapping[self.pooling_func]
self.preprocessor = sklearn.cluster.FeatureAgglomeration(
n_clusters=n_clusters,
affinity=self.affinity,
linkage=self.linkage,
pooling_func=self.pooling_func,
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class KernelPCA:
"""
from autosklearn.pipeline.components.feature_preprocessing.kernel_pca import KernelPCA
using sklearn.decomposition.KernelPCA
Parameters
----------
n_components: number of features to retain, default = None
kernel: kernel used, default = 'linear'
supported (linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed')
degree: Degree for poly kernels, default = 3
gamma: Kernel coefficient, default = 0.25
coef0: Independent term in poly and sigmoid kernels, default = 0.0
seed: random seed, default = 1
"""
def __init__(
self,
n_components=None,
kernel="linear",
degree=3,
gamma=0.25,
coef0=0.0,
seed=1,
):
self.n_components = n_components
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.decomposition
self.n_components = (
None if self.n_components is None else int(self.n_components)
)
self.degree = int(self.degree)
self.gamma = float(self.gamma)
self.coef0 = float(self.coef0)
self.preprocessor = sklearn.decomposition.KernelPCA(
n_components=self.n_components,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
remove_zero_eig=True,
random_state=self.seed,
)
with warnings.catch_warnings():
warnings.filterwarnings("error")
self.preprocessor.fit(X)
if len(self.preprocessor.alphas_ / self.preprocessor.lambdas_) == 0:
raise ValueError("All features removed.")
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
with warnings.catch_warnings():
warnings.filterwarnings("error")
_X = self.preprocessor.transform(X)
if _X.shape[1] == 0:
raise ValueError("KernelPCA removed all features!")
return _X
class RandomKitchenSinks:
"""
from autosklearn.pipeline.components.feature_preprocessing.kitchen_sinks import RandomKitchenSinks
using sklearn.kernel_approximation.RBFSampler
Parameters
----------
gamma: use to determine standard variance of random weight table, default = 1
Parameter of RBF kernel: exp(-gamma * x^2).
n_components: number of samples per original feature, default = 100
seed: random seed, default = 1
"""
def __init__(self, gamma=1.0, n_components=100, seed=1):
self.gamma = gamma
self.n_components = n_components
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.kernel_approximation
self.n_components = int(self.n_components)
self.gamma = float(self.gamma)
self.preprocessor = sklearn.kernel_approximation.RBFSampler(
self.gamma, self.n_components, self.seed
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class LibLinear_Preprocessor:
"""
from autosklearn.pipeline.components.feature_preprocessing.liblinear_svc_preprocessor import LibLinear_Preprocessor
using import sklearn.svm, from sklearn.feature_selection import SelectFromModel
Parameters
----------
penalty: Norm used in the penalization, default = 'l2'
supported ('l1', 'l2')
loss: Loss function, default = 'squared_hinge'
supported ('hinge', 'squared_hinge')
dual: Whether to solve the dual or primal, default = True
tol: Stopping criteria, default = 1e-4
C: Regularization parameter, default = 1.0
multi_class: Multi-class strategy, default = 'ovr'
supported ('ovr', 'crammer_singer')
fit_intercept: Whether to calculate the intercept, default = True
intercept_scaling: Intercept scaling rate, default = 1
class_weight: Class weight, default = None
supported dict or 'balanced'
seed: random seed, default = 1
"""
def __init__(
self,
penalty="l2",
loss="squared_hinge",
dual=True,
tol=1e-4,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
seed=1,
):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.seed = seed
self.preprocessor = None
def fit(self, X, y, sample_weight=None):
import sklearn.svm
from sklearn.feature_selection import SelectFromModel
self.C = float(self.C)
self.tol = float(self.tol)
self.intercept_scaling = float(self.intercept_scaling)
estimator = sklearn.svm.LinearSVC(
penalty=self.penalty,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
class_weight=self.class_weight,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
multi_class=self.multi_class,
random_state=self.seed,
)
estimator.fit(X, y)
self.preprocessor = SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class Nystroem:
"""
from autosklearn.pipeline.components.feature_preprocessing.nystroem_sampler import Nystroem
using sklearn.kernel_approximation.Nystroem
Parameters
----------
kernel: Kernel map to be approximated, default = 'rbf'
supported: ('additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian',
'sigmoid', 'cosine' )
n_components: Number of features to retain, default = 100
gamma: Gamma parameter, default = 1.0
degree: Degree of the polynomial kernel, default = 3
coef0: Zero coefficient for polynomial and sigmoid kernels, default = 1
seed: random seed, default = 1
"""
def __init__(
self, kernel="rbf", n_components=100, gamma=1.0, degree=3, coef0=1, seed=1
):
self.kernel = kernel
self.n_components = (n_components,)
self.gamma = (gamma,)
self.degree = (degree,)
self.coef0 = (coef0,)
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.kernel_approximation
self.n_components = int(self.n_components)
self.gamma = float(self.gamma)
self.degree = int(self.degree)
self.coef0 = float(self.coef0)
if self.kernel == "chi2":
X[X < 0] = 0.0
self.preprocessor = sklearn.kernel_approximation.Nystroem(
kernel=self.kernel,
n_components=self.n_components,
gamma=self.gamma,
degree=self.degree,
coef0=self.coef0,
random_state=self.seed,
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
if self.kernel == "chi2":
X[X < 0] = 0.0
return self.preprocessor.transform(X)
class PCA:
"""
from autosklearn.pipeline.components.feature_preprocessing.pca import PCA
using sklearn.decomposition.PCA
Parameters
----------
n_components: numer of features to retain, default = None
all features will be retained
whiten: default = False
if True, the `components_` vectors will be modified to ensure uncorrelated outputs
seed: random seed, default = 1
"""
def __init__(self, n_components=None, whiten=False, seed=1):
self.n_components = n_components
self.whiten = whiten
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.decomposition
self.n_components = (
None if self.n_components is None else int(self.n_components)
)
self.preprocessor = sklearn.decomposition.PCA(
n_components=self.n_components, whiten=self.whiten, copy=True
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class PolynomialFeatures:
"""
from autosklearn.pipeline.components.feature_preprocessing.polynomial import PolynomialFeatures
using sklearn.preprocessing.PolynomialFeatures
Parameters
----------
degree: degree of polynomial features, default = 2
interaction_only: if to only to conclude interaction terms, default = False
include_bias: if to conclude bias term, default = True
seed: random seed, default = 1
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True, seed=1):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
self.seed = seed
self.preprocessor = None
def fit(self, X, y):
import sklearn.preprocessing
self.degree = int(self.degree)
self.preprocessor = sklearn.preprocessing.PolynomialFeatures(
degree=self.degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
self.preprocessor.fit(X, y)
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
class RandomTreesEmbedding:
"""
from autosklearn.pipeline.components.feature_preprocessing.random_trees_embedding import RandomTreesEmbedding
using sklearn.ensemble.RandomTreesEmbedding
Parameters
----------
n_estimators: Number of trees in the forest to train, deafult = 100
max_depth: Maximum depth of the tree, default = 5
min_samples_split: Minimum number of samples required to split a node, default = 2
min_samples_leaf: Minimum number of samples required to be at a leaf node, default = 1
min_weight_fraction_leaf: Minimum weighted fraction of the sum total of weights, default = 0.
max_leaf_nodes: Maximum number of leaf nodes, deafult = None
bootstrap: Mark if bootstrap, default = False
sparse_output: If output as sparse format (with False), default = False
True for dense output
n_jobs: Number of jobs run in parallel, default = 1
seed: random seed, default = 1
"""
def __init__(
self,
n_estimators=100,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_leaf_nodes=None,
bootstrap=False,
sparse_output=False,
n_jobs=1,
seed=1,
):
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.bootstrap = bootstrap
self.sparse_output = sparse_output
self.n_jobs = n_jobs
self.seed = seed
self.preprocessor = None
def fit(self, X, y=None):
import sklearn.ensemble
self.n_estimators = int(self.n_estimators)
self.max_depth = int(self.max_depth)
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.bootstrap = True if self.bootstrap is None else False
self.preprocessor = sklearn.ensemble.RandomTreesEmbedding(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_leaf_nodes=self.max_leaf_nodes,
sparse_output=self.sparse_output,
n_jobs=self.n_jobs,
random_state=self.seed,
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor == None:
raise NotImplementedError()
return self.preprocessor.transform(X)
"""
from autosklearn.pipeline.components.feature_preprocessing.select_percentile import SelectPercentileBase
using sklearn.feature_selection.SelectPercentile
"""
class SelectPercentileClassification:
"""
from autosklearn.pipeline.components.feature_preprocessing.select_percentile_classification import SelectPercentileClassification
using sklearn.feature_selection.SelectPercentile
Parameters
----------
percentile: Percent of features to keep, default = 10
score_func: default = 'chi2'
supported mode ('chi2', 'f_classif', 'mutual_info_classif')
seed: random seed, default = 1
"""
def __init__(self, percentile=10, score_func="chi2", seed=1):
self.percentile = int(float(percentile))
self.seed = seed
import sklearn.feature_selection
if score_func == "chi2":
self.score_func = sklearn.feature_selection.chi2
elif score_func == "f_classif":
self.score_func = sklearn.feature_selection.f_classif
elif score_func == "mutual_info_classif":
self.score_func = partial(
sklearn.feature_selection.mutual_info_classif, random_state=self.seed
)
else:
raise ValueError(
'Not recognizing score_func, supported ("chi2", "f_classif", "mutual_info_classif", \
get {})'.format(
score_func
)
)
self.preprocessor = None
def fit(self, X, y):
import sklearn.feature_selection
if self.score_func == sklearn.feature_selection.chi2:
X[X < 0] = 0
self.preprocessor = sklearn.feature_selection.SelectPercentile(
score_func=self.score_func, percentile=self.percentile
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
import sklearn.feature_selection
if self.preprocessor is None:
raise NotImplementedError()
if self.score_func == sklearn.feature_selection.chi2:
X[X < 0] = 0
_X = self.preprocessor.transform(X)
if _X.shape[1] == 0:
raise ValueError("All features removed.")
return _X
class SelectPercentileRegression:
"""
from autosklearn.pipeline.components.feature_preprocessing.select_percentile_regression import SelectPercentileRegression
using sklearn.feature_selection.SelectPercentile
Parameters
----------
percentile: Percent of features to keep, default = 10
score_func: default = 'f_regression'
supported mode ('f_regression', 'mutual_info_regression')
seed: random seed, default = 1
"""
def __init__(self, percentile=10, score_func="f_regression", seed=1):
self.percentile = int(float(percentile))
self.seed = seed
import sklearn.feature_selection
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info_regression":
self.score_func = partial(
sklearn.feature_selection.mutual_info_regression, random_state=self.seed
)
self.mode = "percentile"
else:
raise ValueError(
'Not recognizing score_func, only support ("f_regression", "mutual_info_regression"), \
get {}'.format(
score_func
)
)
self.preprocessor = None
def fit(self, X, y):
import sklearn.feature_selection
self.preprocessor = sklearn.feature_selection.SelectPercentile(
score_func=self.score_func, percentile=self.percentile
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
import sklearn.feature_selection
if self.preprocessor is None:
raise NotImplementedError()
_X = self.preprocessor.transform(X)
if _X.shape[1] == 0:
warnings.warn("All features removed.")
return _X
class SelectClassificationRates:
"""
from autosklearn.pipeline.components.feature_preprocessing.select_rates_classification import SelectClassificationRates
using sklearn.feature_selection.GenericUnivariateSelect
Parameters
----------
alpha: parameter of corresponding mode, default = 1e-5
mode: Feature selection mode, default = 'fpr'
supported mode ('percentile', 'k_best', 'fpr', 'fdr', 'fwe')
score_func: default = 'chi2'
supported mode ('chi2', 'f_classif', 'mutual_info_classif')
seed: random seed, default = 1
"""
def __init__(self, alpha=1e-5, mode="fpr", score_func="chi2", seed=1):
self.alpha = alpha
self.mode = mode
self.seed = seed
import sklearn.feature_selection
if score_func == "chi2":
self.score_func = sklearn.feature_selection.chi2
elif score_func == "f_classif":
self.score_func = sklearn.feature_selection.f_classif
elif score_func == "mutual_info_classif":
self.score_func = partial(
sklearn.feature_selection.mutual_info_classif, random_state=self.seed
)
self.mode = "percentile"
else:
raise ValueError(
'Not recognizing score_func, supported ("chi2", "f_classif", "mutual_info_classif", \
get {})'.format(
score_func
)
)
self.preprocessor = None
def fit(self, X, y):
import sklearn.feature_selection
self.alpha = float(self.alpha)
if self.score_func == sklearn.feature_selection.chi2:
X[X < 0] = 0
self.preprocessor = sklearn.feature_selection.GenericUnivariateSelect(
score_func=self.score_func, param=self.alpha, mode=self.mode
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
import sklearn.feature_selection
if self.score_func == sklearn.feature_selection.chi2:
X[X < 0] = 0
if self.preprocessor is None:
raise NotImplementedError()
_X = self.preprocessor.transform(X)
if _X.shape[1] == 0:
warnings.warn("All features removed.")
return _X
class SelectRegressionRates:
"""
from autosklearn.pipeline.components.feature_preprocessing.select_rates_regression import SelectRegressionRates
using sklearn.feature_selection.GenericUnivariateSelect
Parameters
----------
alpha: parameter of corresponding mode, default = 1e-5
mode: Feature selection mode, default = 'percentile'
supported mode ('percentile', 'k_best', 'fpr', 'fdr', 'fwe')
score_func: default = 'f_regression'
supported mode ('f_regression', 'mutual_info_regression')
seed: random seed, default = 1
"""
def __init__(
self, alpha=1e-5, mode="percentile", score_func="f_regression", seed=1
):
self.alpha = alpha
self.mode = mode
self.seed = seed
import sklearn.feature_selection
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info_regression":
self.score_func = partial(
sklearn.feature_selection.mutual_info_regression, random_state=self.seed
)
self.mode = "percentile"
else:
raise ValueError(
'Not recognizing score_func, only support ("f_regression", "mutual_info_regression"), \
get {}'.format(
score_func
)
)
self.preprocessor = None
def fit(self, X, y):
import sklearn.feature_selection
alpha = float(self.alpha)
self.preprocessor = sklearn.feature_selection.GenericUnivariateSelect(
score_func=self.score_func, param=alpha, mode=self.mode
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
_X = self.preprocessor.transform(X)
if _X.shape[1] == 0:
warnings.warn("All features removed.")
return _X
class TruncatedSVD:
"""
from autosklearn.pipeline.components.feature_preprocessing.truncatedSVD import TruncatedSVD
Truncated SVD using sklearn.decomposition.TruncatedSVD
Parameters
----------
n_components: Number of features to retain, default = None
will be set to p - 1, and capped at p -1 for any input
seed: random seed, default = 1
"""
def __init__(self, n_components=None, seed=1):
self.n_components = n_components
self.seed = seed
self.preprocessor = None
def fit(self, X, y):
if self.n_components == None:
self.n_components = X.shape[1] - 1
else:
self.n_components = int(self.n_components)
n_components = min(self.n_components, X.shape[1] - 1) # cap n_components
from sklearn.decomposition import TruncatedSVD
self.preprocessor = TruncatedSVD(
n_components, algorithm="randomized", random_state=self.seed
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
######################################################################################################################
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/__init__.py | <reponame>PanyiDong/AutoML
"""
File: __init__.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/__init__.py
File Created: Tuesday, 5th April 2022 11:01:43 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:24:36 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ._ray._encoder_hyperparameter import encoder_hyperparameter
from ._ray._imputer_hyperparameter import imputer_hyperparameter
from ._ray._scaling_hyperparameter import scaling_hyperparameter
from ._ray._balancing_hyperparameter import balancing_hyperparameter
from ._ray._feature_selection_hyperparameter import (
feature_selection_hyperparameter,
)
from ._ray._classifier_hyperparameter import classifier_hyperparameter
from ._ray._regressor_hyperparameter import regressor_hyperparameter
"""
Notice for designing hyperparameters space:
1. tune.qrandint (ray) allows inclusive lower/upper bound, when interact
with scope.int(hp.quniform) (hyperopt), which is exclusive upper bound,
be careful to use at least two step size in hyerparameter space.
2. When designing hyperparameters space, make sure all hyperparameters
are wrapped in tune methods (even it's not a choice, like a method name),
unless, ray.tune can ignore those hyperparameters and causes further error.
However, since we need to determine whether to contain the hyperparameters
space for the method in search space, which is determined by string comparison,
the method names are stored as string and after comparison in code, it will
be converted in to a tune.choice (with only one choice).
3. As discussed in issue 1, https://github.com/PanyiDong/My_AutoML/issues/1
HyperOpt search algorithm option will convert the ray.tune space into a hyperopt
space, which can be problematic when same hyperparameter names are used in those
methods. So, the default hyperparameter space is designed as: the method namess
contain a order indication (suffix "_1", "_2", ...) and hyperparameter names
contain a method name prefix ("KNNClassifier_", "MLPClassifier_", ...). When
reading in, those suffixes/prefixes are removed in a processing step and becomes
method readable strings.
"""
"""
Classifiers/Hyperparameters from autosklearn:
1. AdaBoost: n_estimators, learning_rate, algorithm, max_depth
2. Bernoulli naive Bayes: alpha, fit_prior
3. Decision Tree: criterion, max_features, max_depth_factor, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes, min_impurity_decrease
4. Extra Trees: criterion, min_samples_leaf, min_samples_split, max_features,
bootstrap, max_leaf_nodes, max_depth, min_weight_fraction_leaf, min_impurity_decrease
5. Gaussian naive Bayes
6. Gradient boosting: loss, learning_rate, min_samples_leaf, max_depth,
max_leaf_nodes, max_bins, l2_regularization, early_stop, tol, scoring
7. KNN: n_neighbors, weights, p
8. LDA: shrinkage, tol
9. Linear SVC (LibLinear): penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling
10. kernel SVC (LibSVM): C, kernel, gamma, shrinking, tol, max_iter
11. MLP (Multilayer Perceptron): hidden_layer_depth, num_nodes_per_layer, activation, alpha,
learning_rate_init, early_stopping, solver, batch_size, n_iter_no_change, tol,
shuffle, beta_1, beta_2, epsilon
12. Multinomial naive Bayes: alpha, fit_prior
13. Passive aggressive: C, fit_intercept, tol, loss, average
14. QDA: reg_param
15. Random forest: criterion, max_features, max_depth, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, bootstrap, max_leaf_nodes
16. SGD (Stochastic Gradient Descent): loss, penalty, alpha, fit_intercept, tol,
learning_rate
"""
"""
Regressors/Hyperparameters from sklearn:
1. AdaBoost: n_estimators, learning_rate, loss, max_depth
2. Ard regression: n_iter, tol, alpha_1, alpha_2, lambda_1, lambda_2,
threshold_lambda, fit_intercept
3. Decision tree: criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_leaf_nodes, min_impurity_decrease
4. extra trees: criterion, min_samples_leaf, min_samples_split,
max_features, bootstrap, max_leaf_nodes, max_depth,
min_weight_fraction_leaf, min_impurity_decrease
5. Gaussian Process: alpha, thetaL, thetaU
6. Gradient boosting: loss, learning_rate, min_samples_leaf, max_depth,
max_leaf_nodes, max_bins, l2_regularization, early_stop, tol, scoring
7. KNN: n_neighbors, weights, p
8. Linear SVR (LibLinear): loss, epsilon, dual, tol, C, fit_intercept,
intercept_scaling
9. Kernel SVR (LibSVM): kernel, C, epsilon, tol, shrinking
10. Random forest: criterion, max_features, max_depth, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf, bootstrap,
max_leaf_nodes, min_impurity_decrease
11. SGD (Stochastic Gradient Descent): loss, penalty, alpha, fit_intercept, tol,
learning_rate
12. MLP (Multilayer Perceptron): hidden_layer_depth, num_nodes_per_layer,
activation, alpha, learning_rate_init, early_stopping, solver,
batch_size, n_iter_no_change, tol, shuffle, beta_1, beta_2, epsilon
"""
|
PanyiDong/AutoML | My_AutoML/_model/_gam.py | """
File: _gam.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_gam.py
File Created: Friday, 15th April 2022 8:18:07 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 7:18:39 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class GAM_Classifier:
def __init__(
self,
type="logistic",
tol=1e-4,
):
self.type = type
self.tol = tol
self._fitted = False
def fit(self, X, y):
if self.type == "logistic":
from pygam import LogisticGAM
self.model = LogisticGAM(tol=self.tol)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.model.predict(X)
def predict_proba(self, X):
return self.model.predict_proba(X)
class GAM_Regressor:
def __init__(
self,
type="linear",
tol=1e-4,
):
self.type = type
self.tol = tol
self._fitted = False
def fit(self, X, y):
if self.type == "linear":
from pygam import LinearGAM
self.model = LinearGAM(tol=self.tol)
elif self.type == "gamma":
from pygam import GammaGAM
self.model = GammaGAM(tol=self.tol)
elif self.type == "poisson":
from pygam import PoissonGAM
self.model = PoissonGAM(tol=self.tol)
elif self.type == "inverse_gaussian":
from pygam import InvGaussGAM
self.model = InvGaussGAM(tol=self.tol)
self.model.fit(X, y)
self._fitted = True
return self
def predict(self, X):
return self.model.predict(X)
def predict_proba(self, X):
raise NotImplementedError("predict_proba is not implemented for regression.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.