repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
PanyiDong/AutoML | archive/_discard.py | <filename>archive/_discard.py
"""
File: _discard.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Relative Path: /archive/_discard.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 6th April 2022 12:31:20 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import shutil
import glob
import numpy as np
import pandas as pd
import json
import warnings
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
import autosklearn
from autosklearn.pipeline.components.feature_preprocessing.no_preprocessing import (
NoPreprocessing,
)
from autosklearn.pipeline.components.feature_preprocessing.densifier import Densifier
from autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_classification import (
ExtraTreesPreprocessorClassification,
)
from autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_regression import (
ExtraTreesPreprocessorRegression,
)
from autosklearn.pipeline.components.feature_preprocessing.fast_ica import FastICA
from autosklearn.pipeline.components.feature_preprocessing.feature_agglomeration import (
FeatureAgglomeration,
)
from autosklearn.pipeline.components.feature_preprocessing.kernel_pca import KernelPCA
from autosklearn.pipeline.components.feature_preprocessing.kitchen_sinks import (
RandomKitchenSinks,
)
from autosklearn.pipeline.components.feature_preprocessing.liblinear_svc_preprocessor import (
LibLinear_Preprocessor,
)
from autosklearn.pipeline.components.feature_preprocessing.nystroem_sampler import (
Nystroem,
)
from autosklearn.pipeline.components.feature_preprocessing.pca import PCA
from autosklearn.pipeline.components.feature_preprocessing.polynomial import (
PolynomialFeatures,
)
from autosklearn.pipeline.components.feature_preprocessing.random_trees_embedding import (
RandomTreesEmbedding,
)
from autosklearn.pipeline.components.feature_preprocessing.select_percentile import (
SelectPercentileBase,
)
from autosklearn.pipeline.components.feature_preprocessing.select_percentile_classification import (
SelectPercentileClassification,
)
from autosklearn.pipeline.components.feature_preprocessing.select_percentile_regression import (
SelectPercentileRegression,
)
from autosklearn.pipeline.components.feature_preprocessing.select_rates_classification import (
SelectClassificationRates,
)
from autosklearn.pipeline.components.feature_preprocessing.select_rates_regression import (
SelectRegressionRates,
)
from autosklearn.pipeline.components.feature_preprocessing.truncatedSVD import (
TruncatedSVD,
)
from sklearn.ensemble import (
AdaBoostClassifier,
ExtraTreesClassifier,
GradientBoostingClassifier,
RandomForestClassifier,
)
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis,
)
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import PassiveAggressiveClassifier, SGDClassifier
from sklearn.neural_network import MLPClassifier
# R environment
import rpy2
import rpy2.robjects as ro
from rpy2.robjects import Formula, pandas2ri
from rpy2.robjects.conversion import localconverter
from rpy2.robjects.packages import importr
from sklearn.tree import ExtraTreeClassifier
from My_AutoML._encoding import DataEncoding
_feature_mol: dict = {
"no_preprocessing": NoPreprocessing,
"densifier": Densifier,
"extra_trees_preproc_for_classification": ExtraTreesPreprocessorClassification,
"extra_trees_preproc_for_regression": ExtraTreesPreprocessorRegression,
"fast_ica": FastICA,
"feature_agglomeration": FeatureAgglomeration,
"kernel_pca": KernelPCA,
"kitchen_sinks": RandomKitchenSinks,
"liblinear_svc_preprocessor": LibLinear_Preprocessor,
"nystroem_sampler": Nystroem,
"pca": PCA,
"polynomial": PolynomialFeatures,
"random_trees_embedding": RandomTreesEmbedding,
"select_percentile": SelectPercentileBase,
"select_percentile_classification": SelectPercentileClassification,
"select_percentile_regression": SelectPercentileRegression,
"select_rates_classification": SelectClassificationRates,
"select_rates_regression": SelectRegressionRates,
"truncatedSVD": TruncatedSVD,
}
class feature_selection:
"""
Restrict the feature selection methods to test model performance
Not yet ready
"""
def __init__(
self,
test_perc=0.15,
task_type="classification",
seed=1,
_base_info="My_AutoML/_database_info.json",
feature_mol="all",
temp_loc="tmp/",
time_left_for_this_task=120,
per_run_time_limit=5,
ensemble_size=50,
skip=False,
):
self.test_perc = test_perc
self.task_type = task_type
self.seed = seed
self._base_info = _base_info
self.feature_mol = feature_mol
self.temp_loc = temp_loc if temp_loc[-1] == "/" else temp_loc + "/"
self.time_left_for_this_task = (
time_left_for_this_task # total time in seconds to find and tune the models
)
self.per_run_time_limit = per_run_time_limit # time in seconds to fit machine learning models per call
self.ensemble_size = ensemble_size
self.skip = skip
def database_test(self, database):
# set feature selection models
if self.feature_mol == "all":
self.feature_mol = [*_feature_mol]
else:
self.feature_mol = self.feature_mol
# check if any models unknown
for _mol in self.feature_mol:
if _mol not in [*_feature_mol]:
raise ValueError(
"{0} not avaiable! All possible models are: {1}.".format(
_mol, [*_feature_mol]
)
)
# get the database infomation (from json file)
_base_info = json.load(open(self._base_info))
# Loop through database and test performance among feature selection methods
database_names = [*database]
for _name in database_names:
if self.skip == True:
self._skip_data_test(database[_name], _name, _base_info)
else:
self._data_test(database[_name], _name, _base_info)
def _skip_data_test(self, data, data_name, base_info):
tmp_folder = "{0}{1}_temp".format(self.temp_loc, data_name)
# extract data information from database information and get response name
data_info = next(item for item in base_info if item["filename"] == data_name)
response = next(
item
for item in data_info["property"]
if item["task_type"] == self.task_type
)["response"]
# create empty temp folder to store feature_selection performance
if os.path.isdir(tmp_folder):
shutil.rmtree(tmp_folder)
os.makedirs(tmp_folder)
if response == "None":
warnings.warn("{0} not avaiable for {1}.".format(data_name, self.task_type))
else:
# feature list
features = list(data.columns)
if isinstance(response, list): # deal with multiple response data
for _response in response:
features.remove(_response)
else:
features.remove(response)
if isinstance(response, list):
response = response[0]
# write basic information to data_info file
data_info_file = open(tmp_folder + "/data_info.txt", "a")
data_info_file.write("Data name: {}\n".format(data_name))
data_info_file.write("Task type: {}\n".format(self.task_type))
data_info_file.write(
"Number of samples: {},\nNumber of features: {}\n".format(
data[features].shape[0], data[features].shape[1]
)
)
data_info_file.write("Features: {}\n".format(features))
data_info_file.write("Response: {}\n".format(response))
data_info_file.close()
# preprocess string type
preprocessor = DataEncoding(dummy_coding=False, transform=False)
new_data = preprocessor.fit(data[features])
# train_test_divide
# need to split after feature selection, since fix seed, not impacted by randomness
X_train, X_test, y_train, y_test = train_test_split(
new_data,
data[[response]],
test_size=self.test_perc,
random_state=self.seed,
)
if self.task_type == "classification":
automl = autosklearn.classification.AutoSklearnClassifier(
seed=self.seed,
time_left_for_this_task=self.time_left_for_this_task,
per_run_time_limit=self.per_run_time_limit,
ensemble_size=self.ensemble_size,
tmp_folder=tmp_folder + "/Auto-sklearn temp",
delete_tmp_folder_after_terminate=False,
)
elif self.task_type == "regression":
automl = autosklearn.regression.AutoSklearnRegressor(
seed=self.seed,
time_left_for_this_task=self.time_left_for_this_task,
per_run_time_limit=self.per_run_time_limit,
ensemble_size=self.ensemble_size,
tmp_folder=tmp_folder + "/Auto-sklearn temp",
delete_tmp_folder_after_terminate=False,
)
automl.fit(X_train, y_train, dataset_name=data_name)
fitting = automl.predict(X_train)
predictions = automl.predict(X_test)
result_file = open("{}/result.txt".format(tmp_folder), "a")
result_file.write(
"Train accuracy: {}\n".format(accuracy_score(y_train, fitting))
)
result_file.write(
"Train MAE: {}\n".format(mean_absolute_error(y_train, fitting))
)
result_file.write(
"Train MSE: {}\n".format(mean_squared_error(y_train, fitting))
)
result_file.write(
"Test accuracy: {}\n".format(accuracy_score(y_test, predictions))
)
result_file.write(
"Test MAE: {}\n".format(mean_absolute_error(y_test, predictions))
)
result_file.write(
"Test MSE: {}\n".format(mean_squared_error(y_test, predictions))
)
result_file.close()
def _data_test(self, data, data_name, base_info):
tmp_folder = "{0}{1}_temp/feature_selection".format(self.temp_loc, data_name)
# extract data information from database information and get response name
data_info = next(item for item in base_info if item["filename"] == data_name)
response = next(
item
for item in data_info["property"]
if item["task_type"] == self.task_type
)["response"]
# create empty temp folder to store feature_selection performance
if os.path.isdir(tmp_folder):
shutil.rmtree(tmp_folder)
os.makedirs(tmp_folder)
if response == "None":
warnings.warn("{0} not avaiable for {1}.".format(data_name, self.task_type))
else:
# feature list
features = list(data.columns)
if isinstance(response, list): # deal with multiple response data
for _response in response:
features.remove(_response)
else:
features.remove(response)
if isinstance(response, list):
response = response[0]
# write basic information to data_info file
data_info_file = open(tmp_folder + "/data_info.txt", "a")
data_info_file.write("Data name: {}\n".format(data_name))
data_info_file.write("Task type: {}\n".format(self.task_type))
data_info_file.write(
"Number of samples: {},\nNumber of features: {}\n".format(
data[features].shape[0], data[features].shape[1]
)
)
data_info_file.write("Features: {}\n".format(features))
data_info_file.write("Response: {}\n".format(response))
data_info_file.close()
# after processing folder
new_info_folder = tmp_folder + "/selected_data_info"
if os.path.isdir(new_info_folder):
shutil.rmtree(new_info_folder)
os.makedirs(new_info_folder)
for _mol in self.feature_mol:
# preprocess string type
preprocessor = DataEncoding(dummy_coding=False, transform=False)
new_data = preprocessor.fit(data[features])
# train_test_divide
# need to split after feature selection, since fix seed, not impacted by randomness
X_train, X_test, y_train, y_test = train_test_split(
new_data,
data[[response]],
test_size=self.test_perc,
random_state=self.seed,
)
# write basic information to data_info file
new_info_file = open(
"{}/{}_info.txt".format(new_info_folder, _mol), "a"
)
new_info_file.write("Data name: {}\n".format(data_name))
new_info_file.write("Task type: {}\n".format(self.task_type))
new_info_file.write(
"Number of train samples: {},\nNumber of test samples: {},\nNumber of features: {}\n".format(
X_train.shape[0], X_test.shape[0], X_train.shape[1]
)
)
new_info_file.write("Features: {}\n".format(features))
new_info_file.write("Response: {}\n".format(response))
new_info_file.close()
if self.task_type == "classification":
automl = autosklearn.classification.AutoSklearnClassifier(
include={"feature_preprocessor": [_mol]},
seed=self.seed,
time_left_for_this_task=self.time_left_for_this_task,
per_run_time_limit=self.per_run_time_limit,
ensemble_size=self.ensemble_size,
tmp_folder="{}/{}".format(new_info_folder, _mol),
delete_tmp_folder_after_terminate=False,
)
elif self.task_type == "regression":
automl = autosklearn.regression.AutoSklearnRegressor(
include={"feature_preprocessor": [_mol]},
seed=self.seed,
time_left_for_this_task=self.time_left_for_this_task,
per_run_time_limit=self.per_run_time_limit,
ensemble_size=self.ensemble_size,
tmp_folder="{}/{}".format(new_info_folder, _mol),
delete_tmp_folder_after_terminate=False,
)
automl.fit(X_train, y_train, dataset_name=data_name)
fitting = automl.predict(X_train)
predictions = automl.predict(X_test)
result_file = open(
"{}/{}_result.txt".format(new_info_folder, _mol), "a"
)
result_file.write(
"Train accuracy: {}\n".format(accuracy_score(y_train, fitting))
)
result_file.write(
"Train MAE: {}\n".format(mean_absolute_error(y_train, fitting))
)
result_file.write(
"Train MSE: {}\n".format(mean_squared_error(y_train, fitting))
)
result_file.write(
"Test accuracy: {}\n".format(accuracy_score(y_test, predictions))
)
result_file.write(
"Test MAE: {}\n".format(mean_absolute_error(y_test, predictions))
)
result_file.write(
"Test MSE: {}\n".format(mean_squared_error(y_test, predictions))
)
result_file.close()
# # perform feature selection on the data
# if _feature_mol[_mol] == ExtraTreesPreprocessorClassification :
# _fit_mol = _feature_mol[_mol](
# n_estimators = 100,
# criterion = "gini",
# min_samples_leaf = 1,
# min_samples_split = 2,
# max_features = "auto",
# bootstrap = False,
# max_leaf_nodes = None,
# max_depth = None,
# min_weight_fraction_leaf = 0.0,
# min_impurity_decrease = 0.0,
# oob_score=False,
# n_jobs=1,
# random_state=self.seed,
# verbose=0,
# class_weight=None
# )
# elif _feature_mol[_mol] == ExtraTreesPreprocessorRegression :
# _fit_mol = _feature_mol[_mol](
# n_estimators = 100,
# criterion = "squared_error",
# min_samples_leaf = 1,
# min_samples_split = 2,
# max_features = "auto",
# bootstrap=False,
# max_leaf_nodes=None,
# max_depth="None",
# min_weight_fraction_leaf=0.0,
# oob_score=False,
# n_jobs=1,
# random_state=None,
# verbose=0
# )
# else :
# _fit_mol = _feature_mol[_mol](random_state = self.seed)
# if _feature_mol[_mol] == ExtraTreesPreprocessorClassification or _feature_mol[_mol] == ExtraTreesPreprocessorRegression :
# _fit_mol.fit(data[features], data[[response]])
# else :
# _fit_mol.fit(data[features])
# _data = _fit_mol.transform(data[features])
|
PanyiDong/AutoML | My_AutoML/_utils/_data.py | """
File: _data.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_data.py
File Created: Wednesday, 6th April 2022 12:01:26 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 10:08:05 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import ast
import json
import numpy as np
import pandas as pd
from ._base import random_index
# string list to list
def str2list(item):
try:
return ast.literal_eval(item)
except:
return item
# string dict to dict
def str2dict(item):
try:
return json.loads(item)
except:
return item
# Train test split using test set percentage
def train_test_split(X, y, test_perc=0.15, seed=1):
"""
return order: X_train, X_test, y_train, y_test
"""
n = len(X)
index_list = random_index(n, seed=seed)
valid_index = index_list[: int(test_perc * n)]
train_index = list(set([i for i in range(n)]) - set(valid_index))
return (
X.iloc[train_index],
X.iloc[valid_index],
y.iloc[train_index],
y.iloc[valid_index],
)
# transform between numpy array and pandas dataframe
# to deal with some problems where dataframe will be converted to array using sklearn objects
class as_dataframe:
def __init__(self):
self.design_matrix = None # record the values of dataframe
self.columns = None # record column heads for the dataframe
def to_array(self, X):
if not isinstance(X, pd.DataFrame):
raise TypeError("Input should be dataframe!")
self.design_matrix = X.values
self.columns = list(X.columns)
return self.design_matrix
def to_df(self, X=None, columns=None):
if not isinstance(X, np.ndarray):
if not X:
X = self.design_matrix # using original values from dataframe
else:
raise TypeError("Input should be numpy array!")
try:
_empty = (columns == None).all()
except AttributeError:
_empty = columns == None
if _empty:
columns = self.columns
if len(columns) != X.shape[1]:
raise ValueError(
"Columns of array {} does not match length of columns {}!".format(
X.shape[1], len(columns)
)
)
return pd.DataFrame(X, columns=columns)
# formatting the type of features in a dataframe
# to ensure the consistency of the features,
# avoid class type (encoded as int) becomes continuous type
# older version
# class formatting:
# def __init__(self, allow_str=False):
# self.allow_str = allow_str
# self.category_table = None
# def fit(self, X):
# # get dtype of the features
# self.dtype_table = X.dtypes.values
# if not self.allow_str: # if not allow str, transform string types to int
# for i in range(len(self.dtype_table)):
# if self.dtype_table[i] == object:
# self.dtype_table[i] = np.int64
# return self
# def transform(self, X):
# for i in range(len(self.dtype_table)):
# X.iloc[:, i] = X.iloc[:, i].astype(self.dtype_table[i])
# return X
# new version of formatting
class formatting:
"""
Format numerical/categorical columns
Parameters
----------
numerics: numerical columns
nas: different types of missing values
allow_string: whether allow string to store in dataframe, default = False
inplace: whether to replace dataset in fit step, default = True
Example
-------
>> a = pd.DataFrame({
>> 'column_1': [1, 2, 3, np.nan],
>> 'column_2': ['John', np.nan, 'Amy', 'John'],
>> 'column_3': [np.nan, '3/12/2000', '3/13/2000', np.nan]
>> })
>> formatter = formatting(columns = ['column_1', 'column_2'], inplace = True)
>> formatter.fit(a)
>> print(a)
column_1 column_2 column_3
0 1.0 0.0 NaN
1 2.0 NaN 3/12/2000
2 3.0 1.0 3/13/2000
3 NaN 0.0 NaN
>> a.loc[2, 'column_2'] = 2.6
>> formatter.refit(a)
>> print(a)
column_1 column_2 column_3
0 1.0 Amy NaN
1 2.0 NaN 3/12/2000
2 3.0 Amy 3/13/2000
3 NaN John NaN
"""
def __init__(
self,
columns=[],
numerics=["int16", "int32", "int64", "float16", "float32", "float64"],
nas=[np.nan, None, "nan", "NaN", "NA", "novalue", "None", "none"],
allow_string=False,
inplace=True,
):
self.columns = columns
self.numerics = numerics
self.nas = nas
self.allow_string = allow_string
self.inplace = inplace
self.type_table = {} # store type of every column
self.unique_table = {} # store unique values of categorical columns
# factorize data without changing values in nas
# pd.factorize will automatically convert missing values
def factorize(self, data):
# get all unique values, including missing types
raw_unique = pd.unique(data)
# remove missing types
# since nan != nan, convert it to string for comparison
unique_values = [item for item in raw_unique if str(item) not in self.nas]
# add unique values to unique_table
self.unique_table[data.name] = unique_values
# create categorical-numerical table
unique_map = {}
for idx, item in enumerate(unique_values):
unique_map[item] = idx
# mapping categorical to numerical
data = data.replace(unique_map)
return data
# make sure the category seen in observed data
def unify_cate(self, x, list):
if not x in list and str(x) not in self.nas:
x = np.argmin(np.abs([item - x for item in list]))
return x
def fit(self, X):
# make sure input is a dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise TypeError("Expect a dataframe, get {}.".format(type(X)))
# if not specified, get all columns
self.columns = list(X.columns) if not self.columns else self.columns
for _column in self.columns:
self.type_table[_column] = X[_column].dtype
# convert categorical to numerics
if X[_column].dtype not in self.numerics:
if self.inplace:
X[_column] = self.factorize(X[_column])
else:
self.factorize(X[_column])
def refit(self, X):
for _column in self.columns:
# if numerical, refit the dtype
if self.type_table[_column] in self.numerics:
X[_column] = X[_column].astype(self.type_table[_column])
else:
# if column originally belongs to categorical,
# but converted to numerical, convert back
if X[_column].dtype in self.numerics:
# get all possible unique values in unique_table
unique_num = np.arange(len(self.unique_table[_column]))
# make sure all values have seen in unique_table
X[_column] = X[_column].apply(
lambda x: self.unify_cate(x, unique_num)
)
# get unique category mapping, from numerical-> categorical
unique_map = dict(zip(unique_num, self.unique_table[_column]))
X[_column] = X[_column].map(
unique_map
) # convert numerical-> categorical
# refit dtype, for double checking
X[_column] = X[_column].astype(self.type_table[_column])
if not self.inplace:
return X
def unify_nan(dataset, columns=[], nas=["novalue", "None", "none"], replace=False):
"""
unify missing values
can specify columns. If None, all missing columns will unify
nas: define the searching criteria of missing
replace: whether to replace the missing columns, default = False
if False, will create new column with _useNA ending
Example
-------
>> data = np.arange(15).reshape(5, 3)
>> data = pd.DataFrame(data, columns = ['column_1', 'column_2', 'column_3'])
>> data.loc[:, 'column_1'] = 'novalue'
>> data.loc[3, 'column_2'] = 'None'
>> data
column_1 column_2 column_3
0 novalue 1 2
1 novalue 4 5
2 novalue 7 8
3 novalue None 11
4 novalue 13 14
>> data = unify_nan(data)
>> data
column_1 column_2 column_3 column_1_useNA column_2_useNA
0 novalue 1 2 NaN 1.0
1 novalue 4 5 NaN 4.0
2 novalue 7 8 NaN 7.0
3 novalue None 11 NaN NaN
4 novalue 13 14 NaN 13.0
"""
# if not specified, all columns containing nas values will add to columns
if not columns:
columns = []
for column in list(dataset.columns):
if dataset[column].isin(nas).any(): # check any values in nas
columns.append(column)
# if only string for one column is available, change it to list
if isinstance(columns, str):
columns = [columns]
# nas dictionary
nas_dict = {}
for _nas in nas:
nas_dict[_nas] = np.nan
# unify the nan values
for _column in columns:
if replace: # if replace, replace missing column with unified nan one
dataset[_column] = dataset[_column].replace(nas_dict)
else:
dataset[_column + "_useNA"] = dataset[_column].replace(nas_dict)
return dataset
def remove_index_columns(
data, index=[], columns=[], axis=1, threshold=1, reset_index=True, save=False
):
"""
delete columns/indexes with majority being nan values
limited/no information these columns/indexes provided
Parameters
----------
data: input data
index: whether to specify index range, default = []
default will include all indexes
columns: whether to specify column range, default = []
default will include all columns
axis: on which axis to remove, default = 1
axis = 1, remove columns; axis = 0, remove indexes
threshold: criteria of missing percentage, whether to remove column, default = 1
accpetable types: numeric in [0, 1], or list of numerics
if both columns and threshold are lists, two can be combined corresponding
reset_index: whether to reset index after dropping
save: save will store the removing columns to another file
"""
remove_index = [] # store index need removing
remove_column = [] # store columns need removing
# make sure it's dataframe
if not isinstance(data, pd.DataFrame):
try:
data = pd.DataFrame(data)
except:
raise TypeError("Expect a dataframe, get {}.".format(type(data)))
n, p = data.shape # number of observations/features in the dataset
if axis == 1: # remove columns
if not columns and index: # in case users confuse index for columns
columns = index
else:
columns = list(data.columns) if not columns else columns
elif axis == 0: # remove index
if not index and columns: # in case users confuse columns for index
index = columns
else:
index = list(data.index) if not index else index
if isinstance(threshold, list):
# if threshold a list, use specified threshold list for each feature
if axis == 1: # remove columns
if len(columns) != len(threshold):
raise ValueError(
"Columns and threshold should be same size, get {} and {}.".format(
len(columns), len(threshold)
)
)
for _column, _threshold in zip(columns, threshold):
# only delete column when missing percentage larger than threshold
if data[_column].isnull().values.sum() / n >= _threshold:
remove_column.append(_column)
elif axis == 0: # remove index
if len(index) != len(threshold):
raise ValueError(
"Indexes and threshold should be same size, get {} and {}.".format(
len(index), len(threshold)
)
)
for _index, _threshold in zip(index, threshold):
if data.loc[_index, :].isnull().values.sum() / p >= _threshold:
remove_index.append(_index)
else:
if axis == 1: # remove columns
for _column in columns:
if data[_column].isnull().values.sum() / n >= threshold:
remove_column.append(_column)
elif axis == 0: # remove indexes
for _index in index:
if data.loc[_index, :].isnull().values.sum() / p >= threshold:
remove_index.append(_index)
# save the removing columns to another file
if save:
if axis == 1:
data[remove_column].to_csv(
"Removing_data(Limited_Information).csv", index=False
)
elif axis == 0:
data[remove_index].to_csv(
"Removing_data(Limited_Information).csv", index=False
)
if axis == 1: # remove columns
data.drop(remove_column, axis=1, inplace=True)
elif axis == 0: # remove index
data.drop(remove_index, axis=0, inplace=True)
if reset_index: # whether to reset index
data.reset_index(drop=True, inplace=True)
return data
# get missing matrix
def get_missing_matrix(
data, nas=["nan", "NaN", "NaT", "NA", "novalue", "None", "none"], missing=1
):
"""
Get missing matrix for datasets
Parameters
----------
data: data containing missing values,
acceptable type: pandas.DataFrame, numpy.ndarray
nas: list of different versions of missing values
(convert to string, since not able to distinguish np.nan)
missing: convert missing indexes to 1/0, default = 1
Example
-------
>> a = pd.DataFrame({
>> 'column_1' : [1, 2, 3, np.nan, 5, 'NA'],
>> 'column_2' : [7, 'novalue', 'none', 10, 11, None],
>> 'column_3' : [np.nan, '3/12/2000', '3/13/2000', np.nan, '3/12/2000', '3/13/2000']
>> })
>> a['column_3'] = pd.to_datetime(a['column_3'])
>> print(get_missing_matrix(a))
[[0 0 1]
[0 1 0]
[0 1 0]
[1 0 1]
[0 0 0]
[1 1 0]]
"""
# make sure input becomes array
# if data is dataframe, get only values
if isinstance(data, pd.DataFrame):
data = data.values
# if not numpy.array, raise Error
if not isinstance(data, np.ndarray):
raise TypeError("Expect a array, get {}.".format(type(data)))
# since np.nan != np.nan, convert data to string for comparison
missing_matrix = np.isin(data.astype(str), nas)
# if missing == 1 :
# missing_matrix = missing_matrix.astype(int)
# elif missing == 0 :
# missing_matrix = 1 - missing_matrix.astype(int)
# convert True/False array to 1/0 array
# one line below works the same as above
missing_matrix = np.abs(1 - missing - missing_matrix.astype(int))
return missing_matrix
# determine whether the data contains imbalanced data
# if value, returns the column header and majority class from the unbalanced dataset
def is_imbalance(data, threshold, value=False):
features = list(data.columns)
for _column in features:
unique_values = data[_column].unique()
if len(unique_values) == 1: # only one class exists
if value:
return None, None
else:
return False
for _value in unique_values:
if (
len(data.loc[data[_column] == _value, _column]) / len(data[_column])
> threshold
):
if value:
return _column, _value
else:
return True
if value:
return None, None
else:
return False
# return the distance between sample and the table sample points
# notice: the distance betwen sample and sample itself will be included, be aware to deal with it
# supported norm ['l1', 'l2']
def LinkTable(sample, table, norm="l2"):
if sample.shape[1] != table.shape[1]:
raise ValueError("Not same size of columns!")
_sample = sample.values
features = list(table.columns)
_table = table.copy(deep=True)
_linktable = []
for sample_point in _sample:
for i in range(len(features)):
if norm == "l2":
# print(sample[_column], sample[_column][0])
_table.iloc[:, i] = (_table.iloc[:, i] - sample_point[i]) ** 2
if norm == "l1":
_table.iloc[:, i] = np.abs(_table.iloc[:, i] - sample_point[i])
_linktable.append(_table.sum(axis=1).values.tolist())
return _linktable
class ExtremeClass:
"""
remove the features where only one unique class exists, since no information provided by the feature
Parameters
----------
extreme_threshold: default = 1
the threshold percentage of which the class holds in the feature will drop the feature
"""
def __init__(self, extreme_threshold=1):
self.extreme_threshold = extreme_threshold
def cut(self, X):
_X = X.copy(deep=True)
features = list(_X.columns)
for _column in features:
unique_values = sorted(_X[_column].dropna().unique())
for _value in unique_values:
if (
len(X.loc[X[_column] == _value, _column]) / len(X)
>= self.extreme_threshold
):
_X.remove(labels=_column, inplace=True)
break
return _X
# convert a (n_samples, n_classes) array to a (n_samples, 1) array
# assign the class with the largest probability to the sample
# common use of this function is to convert the prediction of the class
# from neural network to actual predictions
def assign_classes(list):
return np.array([np.argmax(item) for item in list])
# softmax function that can handle 1d data
def softmax(df):
if len(df.shape) == 1:
ppositive = 1 / (1 + np.exp(-df))
ppositive[ppositive > 0.999999] = 1
ppositive[ppositive < 0.0000001] = 0
return np.transpose(np.array((1 - ppositive, ppositive)))
else:
tmp = df - np.max(df, axis=1).reshape((-1, 1))
tmp = np.exp(tmp)
return tmp / np.sum(tmp, axis=1).reshape((-1, 1))
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_ray/_classifier_hyperparameter.py | """
File: _classifier_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_ray/_classifier_hyperparameter.py
File Created: Friday, 8th April 2022 9:04:05 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 11:41:53 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# NOTE:
# As sklearn enters version 1.0, some of the losses have changed its name,
# hyperparameters will change accordingly
import sklearn
sklearn_1_0_0 = sklearn.__version__ <= "1.0.0"
from ray import tune
from My_AutoML._constant import LIGHTGBM_BOOSTING, LIGHTGBM_TREE_LEARNER
# classifier hyperparameters
classifier_hyperparameter = [
# classification models from autosklearn
{
"model_1": "AdaboostClassifier",
"AdaboostClassifier_n_estimators": tune.qlograndint(10, 500, 1),
"AdaboostClassifier_learning_rate": tune.uniform(0.01, 2),
"AdaboostClassifier_algorithm": tune.choice(["SAMME", "SAMME.R"]),
# for base_estimator of Decision Tree
"AdaboostClassifier_max_depth": tune.qrandint(1, 10, 1),
},
{
"model_2": "BernoulliNB",
"BernoulliNB_alpha": tune.loguniform(1e-2, 100),
"BernoulliNB_fit_prior": tune.choice([True, False]),
},
{
"model_3": "DecisionTree",
"DecisionTree_criterion": tune.choice(["gini", "entropy"]),
"DecisionTree_max_features": tune.choice([1.0]),
"DecisionTree_max_depth_factor": tune.uniform(0.0, 2.0),
"DecisionTree_min_samples_split": tune.qrandint(2, 20, 1),
"DecisionTree_min_samples_leaf": tune.qrandint(1, 20, 1),
"DecisionTree_min_weight_fraction_leaf": tune.choice([0.0]),
"DecisionTree_max_leaf_nodes": tune.choice(["None"]),
"DecisionTree_min_impurity_decrease": tune.choice([0.0]),
},
{
"model_4": "ExtraTreesClassifier",
"ExtraTreesClassifier_criterion": tune.choice(["gini", "entropy"]),
"ExtraTreesClassifier_min_samples_leaf": tune.qrandint(1, 20, 1),
"ExtraTreesClassifier_min_samples_split": tune.qrandint(2, 20, 1),
"ExtraTreesClassifier_max_features": tune.uniform(0.0, 1.0),
"ExtraTreesClassifier_bootstrap": tune.choice([True, False]),
"ExtraTreesClassifier_max_leaf_nodes": tune.choice(["None"]),
"ExtraTreesClassifier_max_depth": tune.choice(["None"]),
"ExtraTreesClassifier_min_weight_fraction_leaf": tune.choice([0.0]),
"ExtraTreesClassifier_min_impurity_decrease": tune.choice([0.0]),
},
{"model_5": "GaussianNB"},
{
"model_6": "HistGradientBoostingClassifier",
"HistGradientBoostingClassifier_loss": tune.choice(["auto"]),
"HistGradientBoostingClassifier_learning_rate": tune.loguniform(0.01, 1),
"HistGradientBoostingClassifier_min_samples_leaf": tune.qlograndint(1, 200, 1),
"HistGradientBoostingClassifier_max_depth": tune.choice(["None"]),
"HistGradientBoostingClassifier_max_leaf_nodes": tune.qlograndint(3, 2047, 1),
"HistGradientBoostingClassifier_max_bins": tune.choice([255]),
"HistGradientBoostingClassifier_l2_regularization": tune.loguniform(1e-10, 1),
"HistGradientBoostingClassifier_early_stop": tune.choice(
["off", "train", "valid"]
),
"HistGradientBoostingClassifier_tol": tune.choice([1e-7]),
"HistGradientBoostingClassifier_scoring": tune.choice(["loss"]),
"HistGradientBoostingClassifier_n_iter_no_change": tune.qrandint(1, 20, 1),
"HistGradientBoostingClassifier_validation_fraction": tune.uniform(0.01, 0.4),
},
{
"model_7": "KNearestNeighborsClassifier",
"KNearestNeighborsClassifier_n_neighbors": tune.qrandint(1, 100, 1),
"KNearestNeighborsClassifier_weights": tune.choice(["uniform", "distance"]),
"KNearestNeighborsClassifier_p": tune.choice([1, 2]),
},
{
"model_8": "LDA",
"LDA_shrinkage": tune.choice([None, "auto", "manual"]),
"LDA_shrinkage_factor": tune.uniform(0.0, 1.0),
"LDA_tol": tune.loguniform(1e-5, 1e-1),
},
{
"model_9": "LibLinear_SVC",
# forbid penalty = 'l1' and loss = 'hinge'
# forbid penalty = 'l2', loss = 'hinge' and dual = False
# forbid penalty = 'l1' and dual = False
"LibLinear_SVC_penalty": tune.choice(["l2"]),
"LibLinear_SVC_loss": tune.choice(["squared_hinge"]),
"LibLinear_SVC_dual": tune.choice([False]),
"LibLinear_SVC_tol": tune.loguniform(1e-5, 1e-1),
"LibLinear_SVC_C": tune.loguniform(0.03125, 32768),
"LibLinear_SVC_multi_class": tune.choice(["ovr"]),
"LibLinear_SVC_fit_intercept": tune.choice([True]),
"LibLinear_SVC_intercept_scaling": tune.choice([1]),
},
{
"model_10": "LibSVM_SVC",
# degree only selected when kernel = 'poly'
# coef0 only selected when kernel = ['poly', 'sigmoid']
"LibSVM_SVC_C": tune.loguniform(0.03125, 32768),
"LibSVM_SVC_kernel": tune.choice(["poly", "rbf", "sigmoid"]),
"LibSVM_SVC_gamma": tune.loguniform(3.0517578125e-05, 8),
"LibSVM_SVC_shrinking": tune.choice([True, False]),
"LibSVM_SVC_tol": tune.loguniform(1e-5, 1e-1),
"LibSVM_SVC_max_iter": tune.choice([-1]),
"LibSVM_SVC_degree": tune.qrandint(2, 5, 1),
"LibSVM_SVC_coef0": tune.uniform(-1, 1),
},
{
"model_11": "MLPClassifier",
"MLPClassifier_hidden_layer_depth": tune.qrandint(1, 3, 1),
"MLPClassifier_num_nodes_per_layer": tune.qlograndint(16, 264, 1),
"MLPClassifier_activation": tune.choice(["tanh", "relu"]),
"MLPClassifier_alpha": tune.loguniform(1e-7, 1e-1),
"MLPClassifier_learning_rate_init": tune.loguniform(1e-4, 0.5),
"MLPClassifier_early_stopping": tune.choice(["train", "valid"]),
#'solver' : tune.choice('MLPClassifier_solver', ['lbfgs', 'sgd', 'adam']),
# autosklearn must include _no_improvement_count, where only supported by 'sgd' and 'adam'
"MLPClassifier_solver": tune.choice(["adam"]),
"MLPClassifier_batch_size": tune.choice(["auto"]),
"MLPClassifier_n_iter_no_change": tune.choice([32]),
"MLPClassifier_tol": tune.choice([1e-4]),
"MLPClassifier_shuffle": tune.choice([True]),
"MLPClassifier_beta_1": tune.choice([0.9]),
"MLPClassifier_beta_2": tune.choice([0.999]),
"MLPClassifier_epsilon": tune.choice([1e-8]),
"MLPClassifier_validation_fraction": tune.choice([0.1]),
},
{
"model_12": "MultinomialNB",
"MultinomialNB_alpha": tune.loguniform(1e-2, 100),
"MultinomialNB_fit_prior": tune.choice([True, False]),
},
{
"model_13": "PassiveAggressive",
"PassiveAggressive_C": tune.loguniform(1e-5, 10),
"PassiveAggressive_fit_intercept": tune.choice([True]),
"PassiveAggressive_tol": tune.loguniform(1e-5, 1e-1),
"PassiveAggressive_loss": tune.choice(["hinge", "squared_hinge"]),
"PassiveAggressive_average": tune.choice([True, False]),
},
{"model_14": "QDA", "QDA_reg_param": tune.uniform(0.0, 1.0)},
{
"model_15": "RandomForest",
"RandomForest_criterion": tune.choice(["gini", "entropy"]),
"RandomForest_max_features": tune.uniform(0.0, 1.0),
"RandomForest_max_depth": tune.choice([None]),
"RandomForest_min_samples_split": tune.qrandint(2, 20, 1),
"RandomForest_min_samples_leaf": tune.qrandint(1, 20, 1),
"RandomForest_min_weight_fraction_leaf": tune.choice([0.0]),
"RandomForest_bootstrap": tune.choice([True, False]),
"RandomForest_max_leaf_nodes": tune.choice([None]),
"RandomForest_min_impurity_decrease": tune.choice([0.0]),
},
{
"model_16": "SGD",
# l1_ratio only selected for penalty = 'elasticnet'
# epsilon only selected for loss = 'modified_huber'
# power_t only selected for learning_rate = 'invscaling'
# eta0 only selected for learning_rate in ['constant', 'invscaling']
"SGD_loss": tune.choice(
["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
),
"SGD_penalty": tune.choice(["l1", "l2", "elasticnet"]),
"SGD_alpha": tune.loguniform(1e-7, 1e-1),
"SGD_fit_intercept": tune.choice([True]),
"SGD_tol": tune.loguniform(1e-5, 1e-1),
"SGD_learning_rate": tune.choice(["constant", "optimal", "invscaling"]),
"SGD_l1_ratio": tune.loguniform(1e-9, 1),
"SGD_epsilon": tune.loguniform(1e-5, 1e-1),
"SGD_eta0": tune.loguniform(1e-7, 1e-1),
"SGD_power_t": tune.uniform(1e-5, 1),
"SGD_average": tune.choice([True, False]),
},
# classification models from sklearn
{
"model_17": "LogisticRegression",
"LogisticRegression_penalty": tune.choice(["l2", "none"]),
"LogisticRegression_tol": tune.loguniform(1e-5, 1e-1),
"LogisticRegression_C": tune.loguniform(1e-5, 10),
},
{
"model_18": "ComplementNB",
"ComplementNB_alpha": tune.uniform(0, 1),
"ComplementNB_fit_prior": tune.choice([True, False]),
"ComplementNB_norm": tune.choice([True, False]),
},
# {
# "model_19": "HistGradientBoostingClassifier",
# "HistGradientBoostingClassifier_loss": tune.choice(["auto"]),
# "HistGradientBoostingClassifier_learning_rate": tune.uniform(1e-7, 1),
# "HistGradientBoostingClassifier_max_leaf_nodes": tune.choice([None]),
# "HistGradientBoostingClassifier_max_depth": tune.choice([None]),
# "HistGradientBoostingClassifier_min_samples_leaf": tune.qrandint(1, 20, 1),
# "HistGradientBoostingClassifier_l2_regularization": tune.uniform(0, 1),
# "HistGradientBoostingClassifier_tol": tune.loguniform(1e-5, 1e-1),
# },
{
"model_19": "GradientBoostingClassifier",
"GradientBoostingClassifier_loss": tune.choice(["deviance", "exponential"]),
"GradientBoostingClassifier_learning_rate": tune.loguniform(0.01, 1),
"GradientBoostingClassifier_n_estimators": tune.qlograndint(10, 500, 1),
"GradientBoostingClassifier_subsample": tune.uniform(0.1, 1),
"GradientBoostingClassifier_criterion": tune.choice(["mse", "mae"])
if sklearn_1_0_0
else tune.choice(["friedman_mse", "squared_error"]),
"GradientBoostingClassifier_min_samples_split": tune.qrandint(2, 20, 1),
"GradientBoostingClassifier_min_samples_leaf": tune.qlograndint(1, 200, 1),
"GradientBoostingClassifier_min_weight_fraction_leaf": tune.uniform(0.0, 0.5),
"GradientBoostingClassifier_max_depth": tune.randint(1, 31),
"GradientBoostingClassifier_min_impurity_decrease": tune.uniform(0.0, 1.0),
"GradientBoostingClassifier_max_features": tune.choice(
["sqrt", "log2", "auto", tune.uniform(0.0, 1.0)]
),
"GradientBoostingClassifier_max_leaf_nodes": tune.qlograndint(3, 2047, 1),
"GradientBoostingClassifier_validation_fraction": tune.uniform(0.01, 0.4),
"GradientBoostingClassifier_n_iter_no_change": tune.qrandint(1, 20, 1),
"GradientBoostingClassifier_tol": tune.choice([1e-7]),
},
# self-defined models
{
"model_20": "MLP_Classifier",
"MLP_Classifier_hidden_layer": tune.qrandint(1, 5, 1),
"MLP_Classifier_hidden_size": tune.qrandint(1, 10, 1),
"MLP_Classifier_activation": tune.choice(["Tanh", "Sigmoid"]),
"MLP_Classifier_learning_rate": tune.uniform(1e-5, 1),
"MLP_Classifier_optimizer": tune.choice(["Adam", "SGD"]),
"MLP_Classifier_criteria": tune.choice(
["CrossEntropy", "NegativeLogLikelihood"]
),
"MLP_Classifier_batch_size": tune.choice([16, 32, 64]),
"MLP_Classifier_num_epochs": tune.qrandint(5, 30, 1),
},
{
"model_21": "RNN_Classifier",
"RNN_Classifier_hidden_size": tune.choice([16, 32, 64, 128, 256]),
"RNN_Classifier_n_layers": tune.qrandint(1, 5, 1),
"RNN_Classifier_RNN_unit": tune.choice(["RNN", "LSTM", "GRU"]),
"RNN_Classifier_activation": tune.choice(["ReLU", "Tanh", "Sigmoid"]),
"RNN_Classifier_dropout": tune.loguniform(1e-7, 0.8),
"RNN_Classifier_learning_rate": tune.loguniform(1e-7, 1),
"RNN_Classifier_optimizer": tune.choice(["Adam", "SGD"]),
"RNN_Classifier_criteria": tune.choice(
["CrossEntropy", "NegativeLogLikelihood"]
),
"RNN_Classifier_batch_size": tune.choice([16, 32, 64]),
"RNN_Classifier_num_epochs": tune.qrandint(5, 30, 1),
},
{
"model_22": "LightGBM_Classifier",
"LightGBM_Classifier_objective": tune.choice(
["Need to specify in HPO by response"]
),
"LightGBM_Classifier_boosting": tune.choice(LIGHTGBM_BOOSTING),
"LightGBM_Classifier_n_estimators": tune.qlograndint(50, 500, 1),
# max_depth == -1 for no limit
"LightGBM_Classifier_max_depth": tune.randint(-1, 31),
"LightGBM_Classifier_num_leaves": tune.qlograndint(3, 2047, 1),
"LightGBM_Classifier_min_data_in_leaf": tune.qrandint(1, 20, 1),
"LightGBM_Classifier_learning_rate": tune.loguniform(1e-4, 1),
"LightGBM_Classifier_tree_learner": tune.choice(LIGHTGBM_TREE_LEARNER),
"LightGBM_Classifier_num_iterations": tune.qlograndint(50, 500, 1),
},
{
"model_23": "XGBoost_Classifier",
"XGBoost_Classifier_eta": tune.uniform(0, 1),
"XGBoost_Classifier_gamma": tune.loguniform(1e-4, 1e3),
"XGBoost_Classifier_max_depth": tune.randint(1, 12),
"XGBoost_Classifier_min_child_weight": tune.loguniform(1e-4, 1e3),
"XGBoost_Classifier_max_delta_step": tune.loguniform(1e-3, 1e1),
"XGBoost_Classifier_reg_lambda": tune.uniform(0, 1),
"XGBoost_Classifier_reg_alpha": tune.uniform(0, 1),
},
{
"model_24": "GAM_Classifier",
"GAM_Classifier_type": tune.choice(["logistic"]),
"GAM_Classifier_tol": tune.loguniform(1e-4, 1),
},
]
|
PanyiDong/AutoML | tests/test_encoder/test_encoder.py | """
File: test_encoder.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_encoder/test_encoder.py
File Created: Saturday, 9th April 2022 10:09:13 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 7:50:31 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def test_encoder_1():
from My_AutoML._encoding import DataEncoding
from My_AutoML import load_data
database = load_data().load("Appendix", "Employee")
encoder = DataEncoding(dummy_coding=True, transform=False)
encoder.fit(database["Employee"])
data = encoder.refit(database["Employee"])
# check whether the method is fitted
assert encoder._fitted == True, "The encoder is not correctly fitted."
def test_encoder_2():
from My_AutoML._encoding import DataEncoding
from My_AutoML import load_data
database = load_data().load("Appendix", "Employee")
encoder = DataEncoding(dummy_coding=False, transform="standardize")
encoder.fit(database["Employee"])
data = encoder.refit(database["Employee"])
# check whether the method is fitted
assert encoder._fitted == True, "The encoder is not correctly fitted."
def test_encoder_3():
from My_AutoML._encoding import DataEncoding
from My_AutoML import load_data
database = load_data().load("Appendix", "Employee")
encoder = DataEncoding(dummy_coding=False, transform="center")
encoder.fit(database["Employee"])
data = encoder.refit(database["Employee"])
# check whether the method is fitted
assert encoder._fitted == True, "The encoder is not correctly fitted."
def test_encoder_4():
from My_AutoML._encoding import DataEncoding
from My_AutoML import load_data
database = load_data().load("Appendix", "Employee")
encoder = DataEncoding(dummy_coding=False, transform="log")
encoder.fit(database["Employee"])
data = encoder.refit(database["Employee"])
# check whether the method is fitted
assert encoder._fitted == True, "The encoder is not correctly fitted."
|
PanyiDong/AutoML | My_AutoML/_model/_legacy.py | """
File: _legacy.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_legacy.py
File Created: Friday, 8th April 2022 9:18:08 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:26:08 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import autosklearn.pipeline.components.classification
import autosklearn.pipeline.components.regression
####################################################################################################
# classifiers
classifiers = {
# classification models from autosklearn
"AdaboostClassifier": autosklearn.pipeline.components.classification.adaboost.AdaboostClassifier,
"BernoulliNB": autosklearn.pipeline.components.classification.bernoulli_nb.BernoulliNB,
"DecisionTree": autosklearn.pipeline.components.classification.decision_tree.DecisionTree,
"ExtraTreesClassifier": autosklearn.pipeline.components.classification.extra_trees.ExtraTreesClassifier,
"GaussianNB": autosklearn.pipeline.components.classification.gaussian_nb.GaussianNB,
"GradientBoostingClassifier": autosklearn.pipeline.components.classification.gradient_boosting.GradientBoostingClassifier,
"KNearestNeighborsClassifier": autosklearn.pipeline.components.classification.k_nearest_neighbors.KNearestNeighborsClassifier,
"LDA": autosklearn.pipeline.components.classification.lda.LDA,
"LibLinear_SVC": autosklearn.pipeline.components.classification.liblinear_svc.LibLinear_SVC,
"LibSVM_SVC": autosklearn.pipeline.components.classification.libsvm_svc.LibSVM_SVC,
"MLPClassifier": autosklearn.pipeline.components.classification.mlp.MLPClassifier,
"MultinomialNB": autosklearn.pipeline.components.classification.multinomial_nb.MultinomialNB,
"PassiveAggressive": autosklearn.pipeline.components.classification.passive_aggressive.PassiveAggressive,
"QDA": autosklearn.pipeline.components.classification.qda.QDA,
"RandomForest": autosklearn.pipeline.components.classification.random_forest.RandomForest,
"SGD": autosklearn.pipeline.components.classification.sgd.SGD,
}
# regressors
regressors = {
# regression models from autosklearn
"AdaboostRegressor": autosklearn.pipeline.components.regression.adaboost.AdaboostRegressor,
"ARDRegression": autosklearn.pipeline.components.regression.ard_regression.ARDRegression,
"DecisionTree": autosklearn.pipeline.components.regression.decision_tree.DecisionTree,
"ExtraTreesRegressor": autosklearn.pipeline.components.regression.extra_trees.ExtraTreesRegressor,
"GaussianProcess": autosklearn.pipeline.components.regression.gaussian_process.GaussianProcess,
"GradientBoosting": autosklearn.pipeline.components.regression.gradient_boosting.GradientBoosting,
"KNearestNeighborsRegressor": autosklearn.pipeline.components.regression.k_nearest_neighbors.KNearestNeighborsRegressor,
"LibLinear_SVR": autosklearn.pipeline.components.regression.liblinear_svr.LibLinear_SVR,
"LibSVM_SVR": autosklearn.pipeline.components.regression.libsvm_svr.LibSVM_SVR,
"MLPRegressor": autosklearn.pipeline.components.regression.mlp.MLPRegressor,
"RandomForest": autosklearn.pipeline.components.regression.random_forest.RandomForest,
"SGD": autosklearn.pipeline.components.regression.sgd.SGD,
}
"""
LibSVM_SVR, MLP and SGD have problems of requiring inverse_transform
of StandardScaler while having 1D array
https://github.com/automl/auto-sklearn/issues/1297
problem solved
"""
|
PanyiDong/AutoML | setup.py | <filename>setup.py
"""
File: setup.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.0.2
Relative Path: /setup.py
File Created: Friday, 4th March 2022 11:33:55 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 3:32:31 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 7):
sys.exit("Python >= 3.7 is required for the pipeline!")
def setup_package():
setup(
name="My_AutoML",
version="0.2.1",
author="<NAME>",
url="https://github.com/PanyiDong/My_AutoML",
author_email="<EMAIL>",
description="Automated Machine Learning/AutoML pipeline.",
license="MIT",
packages=find_packages(
exclude=[
"tests",
"example",
"archive",
"Appendix",
"docs",
".github",
"build",
"dist",
]
),
package_dir={"My_AutoML": "My_AutoML"},
include_package_data=True,
package_data={"My_AutoML": ["Appendix/*", "example/*"]},
platforms=["Linux", "Windows", "MacOS"],
python_requires=">=3.7",
install_requires=[
"numpy",
"pandas",
"scipy",
"matplotlib",
"ray",
# "ray[tune]",
# "ray[rllib]",
"redis;platform_system=='Windows'",
"tqdm==4.62.3",
"mlflow==1.21.0",
"tensorboardX",
"hyperopt==0.2.5",
"auto-sklearn==0.14.6;platform_system=='Linux'",
"scikit-learn==0.24.2;platform_system=='Linux'",
"scikit-learn>1.0.0;platform_system=='Windows'",
"scikit-learn>1.0.0;platform_system=='MacOS'",
],
extras_require={
"lightweight": [],
"normal": [
"rpy2;platform_system=='Linux'",
"lightgbm",
"xgboost",
"pygam",
],
"nn": [
"rpy2;platform_system=='Linux'",
"lightgbm",
"xgboost",
"pygam",
"torch",
# "transformers",
# "datasets",
],
},
)
if __name__ == "__main__":
setup_package()
|
PanyiDong/AutoML | tests/test_balancing/test_balancing.py | """
File: test_balancing.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_balancing/test_balancing.py
File Created: Saturday, 9th April 2022 11:03:41 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 12:11:15 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import numpy as np
import pandas as pd
from My_AutoML._balancing import balancings
data_X = pd.DataFrame(
np.random.normal(0, 10, (100, 10)),
columns=["col_" + str(i) for i in range(10)],
)
data_y = pd.DataFrame(
[1 for _ in range(90)] + [0 for _ in range(10)], columns=["col_y"]
)
class TestScaling(unittest.TestCase):
def test_Scaling(self):
self.method_dict = balancings
self.method_names = list(self.method_dict.keys())
self.method_objects = list(self.method_dict.values())
for method_name, method_object in zip(self.method_names, self.method_objects):
if method_name != "no_processing":
mol = method_object(imbalance_threshold=0.8)
mol.fit_transform(data_X, data_y)
# check whether the method is fitted
self.assertEqual(
mol._fitted,
True,
"The method {} is not correctly fitted.".format(method_name),
)
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_hyperopt/_balancing_hyperparameter.py | """
File: _balancing_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_hyperopt/_balancing_hyperparameter.py
File Created: Tuesday, 5th April 2022 11:04:29 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:22:35 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
# balancing
# if the imbalance threshold small, TomekLink will take too long
balancing_hyperparameter = [
{"balancing": "no_processing"},
{
"balancing": "SimpleRandomOverSampling",
"imbalance_threshold": hp.uniform(
"SimpleRandomOverSampling_imbalance_threshold", 0.8, 1
),
},
{
"balancing": "SimpleRandomUnderSampling",
"imbalance_threshold": hp.uniform(
"SimpleRandomUnderSampling_imbalance_threshold", 0.8, 1
),
},
{
"balancing": "TomekLink",
"imbalance_threshold": hp.uniform("TomekLink_imbalance_threshold", 0.8, 1),
},
{
"balancing": "EditedNearestNeighbor",
"imbalance_threshold": hp.uniform(
"EditedNearestNeighbor_imbalance_threshold", 0.8, 1
),
"k": scope.int(hp.quniform("EditedNearestNeighbor_k", 1, 7, 1)),
},
{
"balancing": "CondensedNearestNeighbor",
"imbalance_threshold": hp.uniform(
"CondensedNearestNeighbor_imbalance_threshold", 0.8, 1
),
},
{
"balancing": "OneSidedSelection",
"imbalance_threshold": hp.uniform(
"OneSidedSelection_imbalance_threshold", 0.8, 1
),
},
{
"balancing": "CNN_TomekLink",
"imbalance_threshold": hp.uniform("CNN_TomekLink_imbalance_threshold", 0.8, 1),
},
{
"balancing": "Smote",
"imbalance_threshold": hp.uniform("Smote_imbalance_threshold", 0.8, 1),
"k": scope.int(hp.quniform("Smote_k", 1, 10, 1)),
},
{
"balancing": "Smote_TomekLink",
"imbalance_threshold": hp.uniform(
"Smote_TomekLink_imbalance_threshold", 0.8, 1
),
"k": scope.int(hp.quniform("Smote_TomekLink_k", 1, 10, 1)),
},
{
"balancing": "Smote_ENN",
"imbalance_threshold": hp.uniform("Smote_ENN_imbalance_threshold", 0.8, 1),
"k": scope.int(hp.quniform("Smote_ENN_k", 1, 10, 1)),
},
]
|
PanyiDong/AutoML | My_AutoML/_model/_FNN.py | <reponame>PanyiDong/AutoML
"""
File: _FNN.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_model/_FNN.py
File Created: Tuesday, 5th April 2022 11:46:17 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 7:17:37 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
import numpy as np
import pandas as pd
from My_AutoML._utils._data import assign_classes
# check if pytorch exists
# if exists, import pytorch
import importlib
pytorch_spec = importlib.util.find_spec("torch")
if pytorch_spec is not None:
import torch
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
####################################################################################################
# Feed Forward Neural Network models
####################################################################################################
# Multi-Layer Perceptron Model
# 1. MLP_Model, forward phase
# 2. MLP_Base, base training/evaluation phase
# 3. MLP_Classifier, MLP specified for classification tasks
# 4. MLP_Regressor, MLP specified for regression tasks
class MLP_Model(nn.Module):
"""
Flexible Multi-Layer Perceptron model
Parameters
----------
input_size: input shape, for tabular data, input_size equals number of features
hidden_layer: number of hidden layers
hidden_size: number of neurons in each hidden layer
output_size: output shape, for classification, output_size equals number of classes;
for regression, output_size equals 1
softmax: if True, add softmax function (for classification), default is False
activation: activation functions, default: "ReLU"
support activation ["ReLU", "Tanh", "Sigmoid"]
"""
def __init__(
self,
input_size,
hidden_layer,
hidden_size,
output_size,
softmax=False,
activation="ReLU",
):
super().__init__()
self.input_size = input_size
self.hidden_layer = hidden_layer
self.hidden_size = hidden_size
self.softmax = softmax
self.output_size = output_size
# specify activation function
if activation == "ReLU":
self.activation = nn.ReLU()
elif activation == "Tanh":
self.activation = nn.Tanh()
elif activation == "Sigmoid":
self.activation = nn.Sigmoid()
self.forward_model = [] # sequential model
# at first layer, from input layer to first hidden layer
# add activation function
self.forward_model.append(nn.Linear(self.input_size, self.hidden_size))
self.forward_model.append(self.activation)
# in the middle layer, from previous hidden layer to next hidden layer
for _ in range(self.hidden_layer):
self.forward_model.append(nn.Linear(self.hidden_size, self.hidden_size))
self.forward_model.append(self.activation)
# at last layer, from last hidden layer to output layer
# no activation function
self.forward_model.append(nn.Linear(self.hidden_size, self.output_size))
# if softmax is True, add softmax function
if self.softmax:
self.forward_model.append(nn.Softmax())
self.forward_model = nn.Sequential(*self.forward_model)
def forward(self, X):
return self.forward_model(X)
# Multi-Layer Perceptron base model fit/predict (training/evaluation)
class MLP_Base:
"""
Multi-Layer Perceptron base model
Parameters
----------
input_size: input shape, for tabular data, input_size equals number of features
hidden_layer: number of hidden layers
hidden_size: number of neurons in each hidden layer
output_size: output shape, for classification, output_size equals number of classes;
for regression, output_size equals 1
softmax: if True, add softmax function (for classification), default is False
activation: activation functions, default: "ReLU"
support activation ["ReLU", "Tanh", "Sigmoid"]
learning_rate: learning rate, default: None
optimizer: optimizer, default: "Adam"
support optimizer ["Adam", "SGD"]
criteria: criteria, default: "MSE"
support criteria ["MSE", "CrossEntropy", "MAE"]
batch_size: batch size, default: 32
num_epochs: number of epochs, default: 20
is_cuda: whether to use cuda, default: True
"""
def __init__(
self,
input_size,
hidden_layer,
hidden_size,
output_size,
softmax=False,
activation="ReLU",
learning_rate=None,
optimizer="Adam",
criteria="MSE",
batch_size=32,
num_epochs=20,
is_cuda=True,
seed=1,
):
self.input_size = input_size
self.hidden_layer = hidden_layer
self.hidden_size = hidden_size
self.output_size = output_size
self.softmax = softmax
self.activation = activation
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
# set seed
torch.manual_seed(self.seed)
# use cuda if detect GPU and is_cuda is True
self.device = torch.device(
"cuda" if torch.cuda.is_available() and self.is_cuda else "cpu"
)
# if try cuda and no cuda available, raise warning
if self.is_cuda and str(self.device) == "cpu":
warnings.warn("No GPU detected, use CPU for training.")
# load model
self.model = MLP_Model(
input_size=self.input_size,
hidden_layer=self.hidden_layer,
hidden_size=self.hidden_size,
output_size=self.output_size,
softmax=self.softmax,
activation=self.activation,
).to(self.device)
# specify optimizer
if self.optimizer == "Adam":
lr = 0.001 if self.learning_rate is None else self.learning_rate
optimizer = optim.Adam(self.model.parameters(), lr=lr)
elif self.optimizer == "SGD":
lr = 0.1 if self.learning_rate is None else self.learning_rate
optimizer = optim.SGD(self.model.parameters(), lr=lr)
# specify loss function
if self.criteria == "MSE":
criteria = nn.MSELoss()
elif self.criteria == "MAE":
criteria = nn.L1Loss()
elif self.criteria == "CrossEntropy":
criteria = nn.CrossEntropyLoss()
elif self.criteria == "NegativeLogLikelihood":
criteria = nn.NLLLoss()
else:
raise ValueError("Not recognized criteria: {}.".format(self.criteria))
# load dataset to TensorDataset
train_tensor = TensorDataset(X, y)
# load dataset to DataLoader
train_loader = DataLoader(
train_tensor, batch_size=self.batch_size, shuffle=True, drop_last=True
)
# train model
for epoch in range(self.num_epochs):
for batch_idx, (data, target) in enumerate(train_loader):
# load batch to device
data, target = data.to(self.device), target.to(self.device)
optimizer.zero_grad()
output = self.model(data) # forward
loss = criteria(output, target) # calculate loss
loss.backward() # backpropagation
optimizer.step() # update parameters
return self
def predict(self, X):
# load test dataset to DataLoader
if isinstance(X, pd.DataFrame):
test_tensor = TensorDataset(torch.as_tensor(X.values, dtype=torch.float32))
else:
test_tensor = TensorDataset(torch.as_tensor(X, dtype=torch.float32))
test_loader = DataLoader(test_tensor, batch_size=len(test_tensor))
# predict
for batch_idx, [data] in enumerate(test_loader):
with torch.no_grad():
results = self.model(data.to(self.device))
return results.cpu().numpy() # make prediction to numpy array
# Multi-Layer Perceptron classifier
class MLP_Classifier(MLP_Base):
"""
Multi-Layer Perceptron classification model
Parameters
----------
hidden_layer: number of hidden layers
hidden_size: number of neurons in each hidden layer
softmax: if True, add softmax function (for classification), default is True
activation: activation functions, default: "ReLU"
support activation ["ReLU", "Tanh", "Sigmoid"]
learning_rate: learning rate, default: None
optimizer: optimizer, default: "Adam"
support optimizer ["Adam", "SGD"]
criteria: criteria, default: "CrossEntropy"
support criteria ["CrossEntropy"]
batch_size: batch size, default: 32
num_epochs: number of epochs, default: 20
is_cuda: whether to use cuda, default: True
"""
def __init__(
self,
hidden_layer,
hidden_size,
softmax=True,
activation="ReLU",
learning_rate=None,
optimizer="Adam",
criteria="CrossEntropy",
batch_size=32,
num_epochs=20,
is_cuda=True,
seed=1,
):
self.hidden_layer = hidden_layer
self.hidden_size = hidden_size
self.softmax = softmax
self.activation = activation
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
self.input_size = X.shape[1] # number of features as input size
self.output_size = len(pd.unique(y)) # unique classes as output size
# make sure losses are classification type
if self.criteria not in ["CrossEntropy", "NegativeLogLikelihood"]:
raise ValueError("Loss must be CrossEntropy!")
super().__init__(
input_size=self.input_size,
hidden_layer=self.hidden_layer,
hidden_size=self.hidden_size,
output_size=self.output_size,
softmax=self.softmax,
activation=self.activation,
learning_rate=self.learning_rate,
optimizer=self.optimizer,
criteria=self.criteria,
batch_size=self.batch_size,
num_epochs=self.num_epochs,
is_cuda=self.is_cuda,
seed=self.seed,
)
# convert to tensors
X = torch.as_tensor(
X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float
)
y = torch.as_tensor(
y.values if isinstance(y, pd.DataFrame) else y, dtype=torch.long
)
return super().fit(X, y)
def predict(self, X):
# need to wrap predict function to convert output format
return assign_classes(super().predict(X))
def predict_proba(self, X):
# not need to use argmax to select the one class
# but to return full probability
return super().predict(X)
# Multi-Layer Perceptron regressor
class MLP_Regressor(MLP_Base):
"""
Multi-Layer Perceptron regression model
Parameters
----------
hidden_layer: number of hidden layers
hidden_size: number of neurons in each hidden layer
softmax: if True, add softmax function (for classification), default is False
activation: activation functions, default: "ReLU"
support activation ["ReLU", "Tanh", "Sigmoid"]
learning_rate: learning rate, default: None
optimizer: optimizer, default: "Adam"
support optimizer ["Adam", "SGD"]
criteria: criteria, default: "MSE"
support criteria ["MSE", "MAE"]
batch_size: batch size, default: 32
num_epochs: number of epochs, default: 20
is_cuda: whether to use cuda, default: True
"""
def __init__(
self,
hidden_layer,
hidden_size,
softmax=False,
activation="ReLU",
learning_rate=None,
optimizer="Adam",
criteria="MSE",
batch_size=32,
num_epochs=20,
is_cuda=True,
seed=1,
):
self.hidden_layer = hidden_layer
self.hidden_size = hidden_size
self.softmax = softmax
self.activation = activation
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criteria = criteria
self.batch_size = batch_size
self.num_epochs = num_epochs
self.is_cuda = is_cuda
self.seed = seed
def fit(self, X, y):
self.input_size = X.shape[1] # number of features as input size
self.output_size = 1 # output size is 1 (regression purpose)
# make sure losses are regression type
if self.criteria not in ["MSE", "MAE"]:
raise ValueError("Loss must be MSE or MAE!")
super().__init__(
input_size=self.input_size,
hidden_layer=self.hidden_layer,
hidden_size=self.hidden_size,
softmax=self.softmax,
output_size=self.output_size,
activation=self.activation,
learning_rate=self.learning_rate,
optimizer=self.optimizer,
criteria=self.criteria,
batch_size=self.batch_size,
num_epochs=self.num_epochs,
is_cuda=self.is_cuda,
seed=self.seed,
)
# convert to tensors
X = torch.as_tensor(
X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float
)
y = torch.as_tensor(
y.values if isinstance(y, pd.DataFrame) else y, dtype=torch.float
)
return super().fit(X, y)
def predict(self, X):
return super().predict(X)
def predict_proba(self, X):
return NotImplementedError("predict_proba is not implemented for regression.")
|
PanyiDong/AutoML | main.py | <reponame>PanyiDong/AutoML<gh_stars>0
"""
File: main.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /main.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 11:32:45 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
from sqlite3 import DatabaseError
import sklearn
import My_AutoML
from My_AutoML import load_data, train_test_split, type_of_task
from My_AutoML import AutoTabular, AutoTabularClassifier, AutoTabularRegressor
model_dict = {
"auto": AutoTabular,
"classification": AutoTabularClassifier,
"regression": AutoTabularRegressor,
}
# read arguments
parser = argparse.ArgumentParser(description="set arguments for AutoML job")
# data-related arguments
parser.add_argument("--data_folder", default="", type=str, help="read-in data folder")
parser.add_argument(
"--train_data", type=str, help="train data name (no file extension needed)"
)
parser.add_argument(
"--test_data",
default="",
type=str,
help="test data name (if not provided, will train_test_split on train data)",
)
parser.add_argument("--response", type=str, help="response column name")
parser.add_argument(
"--test_eval", default="auto", type=str, help="evaluation metric on test data"
)
# model-related arguments
parser.add_argument(
"--task_type",
default="auto",
type=str,
help="task_type: auto, regression, classification",
)
parser.add_argument(
"--n_estimators",
default=5,
type=int,
help="number of pipelines used to build ensemble",
)
parser.add_argument(
"--timeout", default=360, type=int, help="total time allowed for the task"
)
parser.add_argument(
"--max_evals",
default=64,
type=int,
help="number of evaluation processes for the task",
)
parser.add_argument(
"--temp_directory",
default="tmp",
type=str,
help="path to store temporary model/evaluation information",
)
parser.add_argument(
"--delete_temp_after_terminate",
default=False,
type=bool,
help="whether to delete temporary information after training",
)
parser.add_argument(
"--save", default=True, type=bool, help="whether to save optimal training model"
)
parser.add_argument(
"--ignore_warning", default=True, type=bool, help="whether to ignore all warnings"
)
parser.add_argument(
"--encoder",
default="auto",
nargs="+",
type=str or list,
help="encoders for the tasks, auto or a list of encoders",
)
parser.add_argument(
"--imputer",
default="auto",
nargs="+",
type=str or list,
help="imputers for the tasks, auto or a list of imputers",
)
parser.add_argument(
"--balancing",
default="auto",
nargs="+",
type=str or list,
help="balancings for the tasks, auto or a list of balancings",
)
parser.add_argument(
"--scaling",
default="auto",
nargs="+",
type=str or list,
help="scalings for the tasks, auto or a list of scalings",
)
parser.add_argument(
"--feature_selection",
default="auto",
nargs="+",
type=str or list,
help="feature_selections for the tasks, auto or a list of feature_selections",
)
parser.add_argument(
"--models",
default="auto",
nargs="+",
type=str or list,
help="models for the tasks, auto or a list of models",
)
parser.add_argument(
"--validation", default=True, type=bool, help="whether to split a validation set"
)
parser.add_argument(
"--valid_size", default=0.15, type=float, help="validation set percentage to split"
)
parser.add_argument(
"--objective",
default=None,
type=str,
help="evaluation metrics for tasks performance",
)
parser.add_argument(
"--search_algo",
default="HyperOpt",
type=str,
help="model selection/hyperparameter optimization search algorithm",
)
parser.add_argument(
"--search_algo_settings",
default={},
type=dict,
help="model selection/hyperparameter optimization search algorithm",
)
parser.add_argument(
"--search_scheduler",
default="FIFOScheduler",
type=str,
help="search scheduler",
)
parser.add_argument(
"--progress_reporter",
default="CLIReporter",
type=str,
help="progress reporting manager",
)
parser.add_argument(
"--full_status",
default=False,
type=bool,
help="whether to print full status of the job",
)
parser.add_argument("--seed", default=1, type=int, help="random seed")
args = parser.parse_args()
# convert arguments to parameters
MODEL = model_dict[args.task_type]
N_ESTIMATORS = args.n_estimators
TIMEOUT = args.timeout
MAX_EVALS = args.max_evals
TEMP_DIRECTORY = args.temp_directory
DELETE_TEMP_AFTER_TERMINATE = args.delete_temp_after_terminate
SAVE = args.save
MODEL_NAME = args.train_data + "_model"
IGNORE_WARNING = args.ignore_warning
ENCODER = args.encoder
IMPUTER = args.imputer
BALANCING = args.balancing
SCALING = args.scaling
FEATURE_SELECTION = args.feature_selection
MODELS = args.models
VALIDATION = args.validation
VALID_SIZE = args.valid_size
OBJECTIVE = args.objective
SEARCH_ALGO = args.search_algo
SEARCH_ALGO_SETTINGS = args.search_algo_settings
SEARCH_SCHEDULER = args.search_scheduler
PROGRESS_REPORTER = args.progress_reporter
FULL_STATUS = args.full_status
SEED = args.seed
if __name__ == "__main__":
print("Preprocessing:")
train = args.train_data
test = args.test_data
response = args.response
print("Train/Test identification.")
# if test_data provided, use train/test data seperately
if test != "":
database = load_data().load(args.data_folder, [train, test])
# check whether train/test set share same columns
if set(database[train].columns) != set(database[test].columns):
raise DatabaseError("Train/Test datasets have different columns!")
features = list(database[train].columns)
features.remove(response)
train_X, train_y = database[train][features], database[train][[response]]
test_X, test_y = database[test][features], database[test][[response]]
# if no test_data provided, read only train_dat
# and use train_test_split to get train/test sets
else:
database = load_data().load(args.data_folder, [train])
features = list(database[train].columns)
features.remove(response)
train_X, test_X, train_y, test_y = train_test_split(
database[train][features], database[train][[response]]
)
print("Training:")
# construct the model by parameters
model = MODEL(
n_estimators=N_ESTIMATORS,
timeout=TIMEOUT,
max_evals=MAX_EVALS,
temp_directory=TEMP_DIRECTORY,
delete_temp_after_terminate=DELETE_TEMP_AFTER_TERMINATE,
save=SAVE,
model_name=MODEL_NAME,
ignore_warning=IGNORE_WARNING,
encoder=ENCODER,
imputer=IMPUTER,
balancing=BALANCING,
scaling=SCALING,
feature_selection=FEATURE_SELECTION,
models=MODELS,
validation=VALIDATION,
valid_size=VALID_SIZE,
objective=OBJECTIVE,
search_algo=SEARCH_ALGO,
search_algo_settings=SEARCH_ALGO_SETTINGS,
search_scheduler=SEARCH_SCHEDULER,
progress_reporter=PROGRESS_REPORTER,
full_status=FULL_STATUS,
seed=SEED,
)
# training process
model.fit(train_X, train_y)
print("Evaluation:")
# test and evaluation
y_pred = model.predict(test_X)
# select from evaluation metrics
eval_metrics = {
"accuracy": sklearn.metrics.accuracy_score,
"precision": sklearn.metrics.precision_score,
"auc": sklearn.metrics.roc_auc_score,
"hinge": sklearn.metrics.hinge_loss,
"f1": sklearn.metrics.f1_score,
"MSE": sklearn.metrics.mean_squared_error,
"MAE": sklearn.metrics.mean_absolute_error,
"MSLE": sklearn.metrics.mean_squared_log_error,
"R2": sklearn.metrics.r2_score,
"MAX": sklearn.metrics.max_error,
}
if args.test_eval == "auto":
_type = type_of_task(train_y)
if _type in ["binary", "multiclass"]:
args.test_eval = "accuracy"
elif _type in ["integer", "continuous"]:
args.test_eval = "MSE"
print(
"The {} of test data is: {:.4f}".format(
args.test_eval, eval_metrics[args.test_eval](y_pred, test_y)
)
)
|
PanyiDong/AutoML | tests/test_scaling/test_scaling.py | <reponame>PanyiDong/AutoML<filename>tests/test_scaling/test_scaling.py<gh_stars>1-10
"""
File: test_scaling.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_scaling/test_scaling.py
File Created: Saturday, 9th April 2022 1:56:15 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Sunday, 17th April 2022 5:23:45 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import numpy as np
import pandas as pd
from My_AutoML._scaling import scalings
data_X = pd.DataFrame(
{
"col_1": [3, 8, 6, 7, 9, 9, 8, 8, 7, 5],
"col_2": [9, 7, 2, 1, 6, 8, 8, 9, 3, 6],
}
)
data_y = pd.DataFrame({"col_3": [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]})
class TestScaling(unittest.TestCase):
def test_Scaling(self):
self.method_dict = scalings
self.method_names = list(self.method_dict.keys())
self.method_objects = list(self.method_dict.values())
for method_name, method_object in zip(self.method_names, self.method_objects):
mol = method_object()
scaled_X = mol.fit_transform(data_X, data_y)
# check whether the method is fitted
self.assertEqual(
mol._fitted,
True,
"The fit_transform method {} is not correctly fitted.".format(
method_name
),
)
if method_name != "Winsorization":
mol.inverse_transform(scaled_X)
self.assertEqual(
mol._fitted,
False,
"The inverse method {} is not correctly fitted.".format(
method_name
),
)
def test_NoScaing():
from My_AutoML._scaling._scaling import NoScaling
method = NoScaling()
method.fit(data_X, data_y)
fitted_data = method.transform(data_X)
assert method._fitted, "The NoScaling method is not correctly fitted."
data = method.inverse_transform(fitted_data)
assert method._fitted == False, "The NoScaling method is not correctly inversed."
# test decrepted methods
def test_feature_manipulation():
from My_AutoML._scaling import Feature_Manipulation
data = pd.DataFrame(
np.arange(15).reshape(5, 3), columns=["column_" + str(i + 1) for i in range(3)]
)
transformed_data = Feature_Manipulation(
data,
columns=["column_1", "column_2", "column_3"],
manipulation=["* 100", "ln", "+ 1"],
rename_columns={"column_2": "log_column_2"},
)
target_data = data.copy(deep=True)
target_data["column_1_* 100"] = data["column_1"] * 100
target_data["log_column_2"] = np.log(data["column_2"])
target_data["column_3_+ 1"] = data["column_3"] + 1
assert (
(transformed_data == target_data).all().all()
), "The feature manipulation is not correctly done."
def test_feature_truncation():
from My_AutoML._scaling import Feature_Truncation
data = pd.DataFrame(
np.random.randint(0, 100, size=(100, 10)),
columns=["column_" + str(i) for i in range(10)],
)
transformer = Feature_Truncation(
quantile=[0.2 * np.random.random() + 0.8 for _ in range(10)],
)
transformer.fit(data)
capped_data = transformer.transform(data)
assert (
(capped_data <= transformer.quantile_list).all().all()
), "The feature truncation is not correctly done."
capped_data_2 = transformer.fit_transform(data)
assert (
(capped_data_2 <= transformer.quantile_list).all().all()
), "The feature truncation is not correctly done."
|
PanyiDong/AutoML | My_AutoML/_encoding/_encoding.py | """
File: _encoding.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_encoding/_encoding.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 15th April 2022 7:37:38 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from sklearn import preprocessing
from My_AutoML._utils._base import is_date
from My_AutoML._utils._data import formatting
class DataEncoding(formatting):
"""
Data preprocessing
1. convert string type features to numerical categorical/dummy variables
2. transform non-categorical features
3. refit for test data (in cases train/test data is already divided),
using category table recorded while convert train data, only deal with non nan values
Parameters
----------
df: data
dummy_coding: whether to use dummy variables, default = False
if True, convert string categories to numerical categories(0, 1, 2, ...)
transform: how to transform numerical features, default = False
'standardize', 'center', 'log' are available
"""
def __init__(self, dummy_coding=False, transform=False):
self.dummy_coding = dummy_coding
self.transform = transform
self._fitted = False # record whether the method is fitted
def fit(self, _df):
df = _df.copy(deep=True)
features = list(df.columns)
self.category = pd.DataFrame()
self.mean_scaler = {}
self.sigma_scaler = {}
for column in features:
if (
df[column].dtype == object
and is_date(df[[column]])
and len(df[column].dropna().unique()) > 31
):
df[column] = pd.to_numeric(pd.to_datetime(df[column]))
elif (df[column].dtype == object) or (str(df[column].dtype) == "category"):
# dummy coding for string categorical features
if str(df[column].dtype) == "category":
df[column] = df[column].astype(str)
if self.dummy_coding == True:
unique_value = np.sort(df[column].dropna().unique())
if self.category.empty:
self.category = pd.DataFrame({column: unique_value})
else:
self.category = pd.concat(
[self.category, pd.DataFrame({column: unique_value})],
axis=1,
)
for elem in unique_value:
df[column + "_" + str(elem)] = (df[column] == elem).astype(int)
else:
unique_value = np.sort(df[column].dropna().unique())
if self.category.empty:
self.category = pd.DataFrame({column: unique_value})
else:
self.category = pd.concat(
[self.category, pd.DataFrame({column: unique_value})],
axis=1,
)
for i in range(len(unique_value)):
df.loc[df[column] == unique_value[i], column] = i
df.loc[~df[column].isnull(), column] = df.loc[
~df[column].isnull(), column
].astype(int)
else:
df.loc[~df[column].isnull(), column] = df.loc[
~df[column].isnull(), column
].astype(float)
# standardize numerical features
if self.transform == "standardize":
standard_scaler = preprocessing.StandardScaler().fit(
df[[column]].values
)
# save scale map for scale back
self.mean_scaler.update({column: standard_scaler.mean_[0]})
self.sigma_scaler.update({column: standard_scaler.scale_[0]})
df[column] = standard_scaler.transform(df[[column]].values)
elif self.transform == "center":
standard_scaler = preprocessing.StandardScaler().fit(
df[[column]].values
)
# save scale map for scale back
self.mean_scaler.update({column: standard_scaler.mean_[0]})
df.loc[~df[column].isnull(), column] = (
df.loc[~df[column].isnull(), column] - standard_scaler.mean_[0]
)
elif self.transform == "log":
df.loc[~df[column].isnull(), column] = np.log(
df.loc[~df[column].isnull(), column]
)
# remove categorical variables
if self.dummy_coding == True:
df.drop(columns=list(self.category.columns), inplace=True)
self._fitted = True
return df
def refit(self, _df):
df = _df.copy(deep=True)
if self.category.empty:
return df
categorical_features = list(self.category.columns)
for column in list(df.columns):
if (
df[column].dtype == object
and is_date(df[[column]])
and len(df[column].dropna().unique()) > 31
):
df[column] = pd.to_numeric(pd.to_datetime(df[column]))
elif df[column].dtype == object or str(df[column].dtype) == "category":
if str(df[column].dtype) == "category":
df[column] = df[column].astype(str)
if (
column in categorical_features
): # map categorical testdata based on category
unique_values = self.category.loc[
self.category[column].notnull(), column
].values # Select only non nan values
if self.dummy_coding == True:
for value in unique_values:
df[str(column) + "_" + str(value)] = (
df[column] == value
).astype(int)
# Notice: categorical values not appear in traindata will be dropped,
# it's not a problem seems it can not be trained if it's not even in train data
# if False in (np.sort(unique_values) == np.sort(df[column].unique())) :
# raise ValueError('Testdata has unkown categories!')
else:
# update, put notin in front of refit, so after refit, there will be no mistake
df.loc[~df[column].isin(unique_values), column] = np.NaN
for i in range(len(unique_values)):
df.loc[df[column] == unique_values[i], column] = i
df.loc[~df[column].isnull(), column] = df.loc[
~df[column].isnull(), column
].astype(int)
else:
df.loc[~df[column].isnull(), column] = df.loc[
~df[column].isnull(), column
].astype(float)
# standardize numerical features
if self.transform == "standardize":
standard_scaler = preprocessing.StandardScaler().fit(
df[[column]].values
)
# save scale map for scale back
self.mean_scaler.update({column: standard_scaler.mean_[0]})
self.sigma_scaler.update({column: standard_scaler.scale_[0]})
df[column] = standard_scaler.transform(df[[column]].values)
elif self.transform == "center":
standard_scaler = preprocessing.StandardScaler().fit(
df[[column]].values
)
# save scale map for scale back
self.mean_scaler.update({column: standard_scaler.mean_[0]})
df.loc[~df[column].isnull(), column] = (
df.loc[~df[column].isnull(), column] - standard_scaler.mean_[0]
)
elif self.transform == "log":
df.loc[~df[column].isnull(), column] = np.log(
df.loc[~df[column].isnull(), column]
)
# remove categorical variables
if self.dummy_coding == True:
df.drop(columns=list(self.category.columns), inplace=True)
return df
class CategoryShift:
"""
Add 3 to every cateogry
Parameters
----------
seed: random seed
"""
def __init__(self, seed=1):
self.seed = seed
self._fitted = False # whether the model has been fitted
def fit(self, X):
# Check data type
columns = list(X.columns)
for _column in columns:
if X[_column].dtype == object:
raise ValueError("Cannot handle object type!")
elif str(X[_column].dtype) == "category":
raise ValueError("Cannot handle categorical type!")
self._fitted = True
def transform(self, X):
_X = X.copy(deep=True)
_X += 3
return _X
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_ray/_imputer_hyperparameter.py | """
File: _imputer_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_ray/_imputer_hyperparameter.py
File Created: Wednesday, 6th April 2022 10:06:01 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:24:14 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ray import tune
# imputer
imputer_hyperparameter = [
{
"imputer_1": "SimpleImputer",
"SimpleImputer_method": tune.choice(
["mean", "zero", "median", "most frequent"]
),
},
{"imputer_2": "DummyImputer"},
{"imputer_3": "JointImputer"},
{
"imputer_4": "ExpectationMaximization",
"ExpectationMaximization_iterations": tune.qrandint(10, 100, 1),
"ExpectationMaximization_threshold": tune.uniform(1e-5, 1),
},
{
"imputer_5": "KNNImputer",
"KNNImputer_n_neighbors": tune.qrandint(1, 15, 1),
"KNNImputer_fold": tune.qrandint(5, 15, 1),
},
{"imputer_6": "MissForestImputer"},
{"imputer_7": "MICE", "MICE_cycle": tune.qrandint(5, 20, 1)},
{"imputer_8": "GAIN"},
# {"imputer_9": "AAI_kNN"},
# {"imputer_10": "KMI"},
# {"imputer_11": "CMI"},
# {"imputer_12": "k_Prototype_NN"},
]
|
PanyiDong/AutoML | My_AutoML/_hyperparameters/_hyperopt/_feature_selection_hyperparameter.py | <gh_stars>1-10
"""
File: _feature_selection_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_hyperopt/_feature_selection_hyperparameter.py
File Created: Tuesday, 5th April 2022 11:04:57 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:22:54 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
# feature_selection
feature_selection_hyperparameter = [
{"feature_selection": "no_processing"},
{"feature_selection": "LDASelection"},
{"feature_selection": "PCA_FeatureSelection"},
{"feature_selection": "RBFSampler"},
{"feature_selection": "FeatureFilter"},
{"feature_selection": "ASFFS"},
{"feature_selection": "GeneticAlgorithm"},
{
"feature_selection": "extra_trees_preproc_for_classification",
"n_estimators": hp.choice(
"extra_trees_preproc_for_classification_n_estimators", [100]
),
"criterion": hp.choice(
"extra_trees_preproc_for_classification_criterion", ["gini", "entropy"]
),
"min_samples_leaf": scope.int(
hp.quniform(
"extra_trees_preproc_for_classification_min_samples_leaf", 1, 20, 1
)
),
"min_samples_split": scope.int(
hp.quniform(
"extra_trees_preproc_for_classification_min_samples_split", 2, 20, 1
)
),
"max_features": hp.uniform(
"extra_trees_preproc_for_classification_max_features", 0.1, 1.0
),
"bootstrap": hp.choice(
"extra_trees_preproc_for_classification_bootstrap", [True, False]
),
"max_leaf_nodes": hp.choice(
"extra_trees_preproc_for_classification_max_leaf_nodes", [None]
),
"max_depth": hp.choice(
"extra_trees_preproc_for_classification_max_depth", [None]
),
"min_weight_fraction_leaf": hp.choice(
"extra_trees_preproc_for_classification_min_weight_fraction_leaf", [0.0]
),
"min_impurity_decrease": hp.choice(
"extra_trees_preproc_for_classification_min_impurity_decrease", [0.0]
),
},
{
"feature_selection": "extra_trees_preproc_for_regression",
"n_estimators": hp.choice(
"extra_trees_preproc_for_regression_n_estimators", [100]
),
"criterion": hp.choice(
"extra_trees_preproc_for_regression_criterion",
["mse", "friedman_mse", "mae"],
),
"min_samples_leaf": scope.int(
hp.quniform("extra_trees_preproc_for_regression_min_samples_leaf", 1, 20, 1)
),
"min_samples_split": scope.int(
hp.quniform(
"extra_trees_preproc_for_regression_min_samples_split", 2, 20, 1
)
),
"max_features": hp.uniform(
"extra_trees_preproc_for_regression_max_features", 0.1, 1.0
),
"bootstrap": hp.choice(
"extra_trees_preproc_for_regression_bootstrap", [True, False]
),
"max_leaf_nodes": hp.choice(
"extra_trees_preproc_for_regression_max_leaf_nodes", [None]
),
"max_depth": hp.choice("extra_trees_preproc_for_regression_max_depth", [None]),
"min_weight_fraction_leaf": hp.choice(
"extra_trees_preproc_for_regression_min_weight_fraction_leaf", [0.0]
),
},
{
"feature_selection": "fast_ica",
# n_components only selected when whiten = True
"algorithm": hp.choice("fast_ica_algorithm", ["parallel", "deflation"]),
"whiten": hp.choice("fast_ica_whiten", [True, False]),
"fun": hp.choice("fast_ica_fun", ["logcosh", "exp", "cube"]),
"n_components": scope.int(hp.quniform("fast_ica_n_components", 10, 2000, 1)),
},
{
"feature_selection": "feature_agglomeration",
# forbid linkage = 'ward' while affinity in ['manhattan', 'cosine']
"n_clusters": scope.int(
hp.quniform("feature_agglomeration_n_clusters", 2, 400, 1)
),
"affinity": hp.choice(
"feature_agglomeration_affinity", ["euclidean", "manhattan", "cosine"]
),
"linkage": hp.choice(
"feature_agglomeration_linkage", ["ward", "complete", "average"]
),
"pooling_func": hp.choice(
"feature_agglomeration_pooling_func", ["mean", "median", "max"]
),
},
{
"feature_selection": "kernel_pca",
# degree only selected when kernel = 'poly'
# coef0 only selected when kernel in ['poly', 'sigmoid']
# gamma only selected when kernel in ['poly', 'rbf']
"n_components": scope.int(hp.quniform("kernel_pca_n_components", 10, 2000, 1)),
"kernel": hp.choice("kernel_pca_kernel", ["poly", "rbf", "sigmoid", "cosine"]),
"gamma": hp.loguniform("kernel_pca_gamma", np.log(3.0517578125e-05), np.log(8)),
"degree": scope.int(hp.quniform("kernel_pca_degree", 2, 5, 1)),
"coef0": hp.uniform("kernel_pca_coef0", -1, 1),
},
{
"feature_selection": "kitchen_sinks",
"gamma": hp.loguniform(
"kitchen_sinks_gamma", np.log(3.0517578125e-05), np.log(8)
),
"n_components": scope.int(
hp.quniform("kitchen_sinks_n_components", 50, 10000, 1)
),
},
{
"feature_selection": "liblinear_svc_preprocessor",
# forbid penalty = 'l1' while loss = 'hinge'
"penalty": hp.choice("liblinear_svc_preprocessor_penalty", ["l1"]),
"loss": hp.choice("liblinear_svc_preprocessor_loss", ["squared_hinge"]),
"dual": hp.choice("liblinear_svc_preprocessor_dual", [False]),
"tol": hp.loguniform(
"liblinear_svc_preprocessor_tol", np.log(1e-5), np.log(1e-1)
),
"C": hp.loguniform(
"liblinear_svc_preprocessor_C", np.log(0.03125), np.log(32768)
),
"multi_class": hp.choice("liblinear_svc_preprocessor_multi_class", ["ovr"]),
"fit_intercept": hp.choice("liblinear_svc_preprocessor_fit_intercept", [True]),
"intercept_scaling": hp.choice(
"liblinear_svc_preprocessor_intercept_scaling", [1]
),
},
{
"feature_selection": "nystroem_sampler",
# degree only selected when kernel = 'poly'
# coef0 only selected when kernel in ['poly', 'sigmoid']
# gamma only selected when kernel in ['poly', 'rbf', 'sigmoid']
"kernel": hp.choice(
"nystroem_sampler_kernel", ["poly", "rbf", "sigmoid", "cosine"]
),
"n_components": scope.int(
hp.quniform("nystroem_sampler_n_components", 50, 10000, 1)
),
"gamma": hp.loguniform(
"nystroem_sampler_gamma", np.log(3.0517578125e-05), np.log(8)
),
"degree": scope.int(hp.quniform("nystroem_sampler_degree", 2, 5, 1)),
"coef0": hp.uniform("nystroem_sampler_coef0", -1, 1),
},
{
"feature_selection": "pca",
"keep_variance": hp.uniform("pca_keep_variance", 0.5, 0.9999),
"whiten": hp.choice("pca_whiten", [True, False]),
},
{
"feature_selection": "polynomial",
"degree": scope.int(hp.quniform("polynomial_degree", 2, 3, 1)),
"interaction_only": hp.choice("polynomial_interaction_only", [True, False]),
"include_bias": hp.choice("polynomial_include_bias", [True, False]),
},
{
"feature_selection": "random_trees_embedding",
"n_estimators": scope.int(
hp.quniform("random_trees_embedding_n_estimators", 10, 100, 1)
),
"max_depth": scope.int(
hp.quniform("random_trees_embedding_max_depth", 2, 10, 1)
),
"min_samples_split": scope.int(
hp.quniform("random_trees_embedding_min_samples_split", 2, 20, 1)
),
"min_samples_leaf": scope.int(
hp.quniform("random_trees_embedding_min_samples_leaf", 1, 20, 1)
),
"min_weight_fraction_leaf": hp.choice(
"random_trees_embedding_min_weight_fraction_leaf", [1.0]
),
"max_leaf_nodes": hp.choice("random_trees_embedding_max_leaf_nodes", [None]),
"bootstrap": hp.choice("random_trees_embedding_bootstrap", [True, False]),
},
{
"feature_selection": "select_percentile_classification",
"percentile": scope.int(
hp.quniform("select_percentile_classification_percentile", 1, 99, 1)
),
"score_func": hp.choice(
"select_percentile_classification_score_func",
["chi2", "f_classif", "mutual_info"],
),
},
{
"feature_selection": "select_percentile_regression",
"percentile": scope.int(
hp.quniform("select_percentile_regression_percentile", 1, 99, 1)
),
"score_func": hp.choice(
"select_percentile_regression_score_func", ["f_regression", "mutual_info"]
),
},
{
"feature_selection": "select_rates_classification",
"alpha": hp.uniform("select_rates_classification_alpha", 0.01, 0.5),
"score_func": hp.choice(
"select_rates_classification_score_func",
["chi2", "f_classif", "mutual_info_classif"],
),
"mode": hp.choice("select_rates_classification_mode", ["fpr", "fdr", "fwe"]),
},
{
"feature_selection": "select_rates_regression",
"alpha": hp.uniform("select_rates_regression_alpha", 0.01, 0.5),
"score_func": hp.choice(
"select_rates_classification_score_func", ["f_regression"]
),
"mode": hp.choice("select_rates_classification_mode", ["fpr", "fdr", "fwe"]),
},
{
"feature_selection": "truncatedSVD",
"target_dim": scope.int(hp.quniform("truncatedSVD_target_dim", 10, 256, 1)),
},
]
|
PanyiDong/AutoML | tests/test_hyperparameters/test_hyperparameters.py | """
File: test_hyperparameters.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_hyperparameters/test_hyperparameters.py
File Created: Thursday, 14th April 2022 12:25:53 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Thursday, 14th April 2022 12:31:22 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def test_encoder_hyperparameters():
from My_AutoML._hyperparameters import (
encoder_hyperparameter,
)
assert (
isinstance(encoder_hyperparameter, list)
), "Encoder hyperparameters correctly imported."
def test_imputer_hyperparameters():
from My_AutoML._hyperparameters import (
imputer_hyperparameter,
)
assert (
isinstance(imputer_hyperparameter, list)
), "Imputer hyperparameters correctly imported."
def test_balancing_hyperparameters():
from My_AutoML._hyperparameters import (
balancing_hyperparameter,
)
assert (
isinstance(balancing_hyperparameter, list)
), "Balancing hyperparameters correctly imported."
def test_scaling_hyperparameters():
from My_AutoML._hyperparameters import (
scaling_hyperparameter,
)
assert (
isinstance(scaling_hyperparameter, list)
), "Scaling hyperparameters correctly imported."
def test_feature_selection_hyperparameters():
from My_AutoML._hyperparameters import (
feature_selection_hyperparameter,
)
assert (
isinstance(feature_selection_hyperparameter, list)
), "Feature Selection hyperparameters correctly imported."
def test_regressor_hyperparameters():
from My_AutoML._hyperparameters import (
regressor_hyperparameter,
)
assert (
isinstance(regressor_hyperparameter, list)
), "Regressor Selection hyperparameters correctly imported."
def test_classifier_hyperparameters():
from My_AutoML._hyperparameters import (
classifier_hyperparameter,
)
assert (
isinstance(classifier_hyperparameter, list)
), "Classifier Selection hyperparameters correctly imported." |
PanyiDong/AutoML | My_AutoML/_scaling/_scaling.py | <gh_stars>1-10
"""
File: _scaling.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_scaling/_scaling.py
File Created: Friday, 25th February 2022 6:13:42 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 15th April 2022 11:58:53 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from logging import warning
from random import random
from re import L
import warnings
import numpy as np
import pandas as pd
import scipy
import scipy.stats
from My_AutoML._encoding import DataEncoding
class NoScaling:
def __init__(self):
self._fitted = False
def fit(self, X, y=None):
self._fitted = True
return self
def transform(self, X):
return X
def inverse_transform(self, X):
self._fitted = False
return X
class Standardize:
"""
Standardize the dataset by column (each feature), using _x = (x - mean) / std
Parameters
----------
with_mean: whether to standardize with mean, default = True
with_std: whether to standardize with standard variance, default = True
"""
def __init__(self, with_mean=True, with_std=True, deep_copy=True):
self.with_mean = with_mean
self.with_std = with_std
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape
if self.with_mean == True:
self._mean = [0 for _ in range(p)]
if self.with_std == True:
self._std = [0 for _ in range(p)]
for i in range(p):
_data = _X.iloc[:, i].values
n_notnan = n - np.isnan(_data).sum()
_x_sum = 0
_x_2_sum = 0
_x_sum += np.nansum(_data)
_x_2_sum += np.nansum(_data**2)
if self.with_mean == True:
self._mean[i] = _x_sum / n_notnan
if self.with_std == True:
self._std[i] = np.sqrt(
(_x_2_sum - n_notnan * ((_x_sum / n_notnan) ** 2)) / (n_notnan - 1)
)
self._fitted = True
return self
def transform(self, X):
_X = X.copy(deep=self.deep_copy)
if self.with_mean:
_X -= self._mean
if self.with_std:
_X /= self._std
return _X
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(_X, y)
self._fitted = True
_X = self.transform(_X)
return _X
def inverse_transform(self, X):
if self.with_mean:
X += self._mean
if self.with_std:
X *= self._std
self._fitted = False
return X
class Normalize:
"""
Normalize features with x / x_
Parameters
----------
norm: how to select x_, default = 'max'
supported ['l1', 'l2', 'max']
"""
def __init__(
self,
norm="max",
deep_copy=True,
):
self.norm = norm
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
if self.norm not in ["l1", "l2", "max"]:
raise ValueError("Not recognizing norm method!")
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape
self._scale = [0 for _ in range(p)]
for i in range(p):
_data = _X.iloc[:, i].values
if self.norm == "max":
self._scale[i] = np.max(np.abs(_data))
elif self.norm == "l1":
self._scale[i] = np.abs(_data).sum()
elif self.norm == "l2":
self._scale[i] = (_data**2).sum()
self._fitted = True
return self
def transform(self, X):
_X = X.copy(deep=self.deep_copy)
_X /= self._scale
return _X
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(_X, y)
self._fitted = True
_X = self.transform(_X)
return _X
def inverse_transform(self, X):
X *= self._scale
self._fitted = False
return X
class RobustScale:
"""
Use quantile to scale, x / (q_max - q_min)
Parameters
----------
with_centering: whether to standardize with median, default = True
with_std: whether to standardize with standard variance, default = True
quantile: (q_min, q_max), default = (25.0, 75.0)
uni_variance: whether to set unit variance for scaled data, default = False
"""
def __init__(
self,
with_centering=True,
with_scale=True,
quantile=(25.0, 75.0),
unit_variance=False,
deep_copy=True,
):
self.with_centering = with_centering
self.with_scale = with_scale
self.quantile = quantile
self.unit_variance = unit_variance
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
q_min, q_max = self.quantile
if q_min == None: # in case no input
q_min = 25.0
if q_max == None:
q_max = 75.0
if not 0 <= q_min <= q_max <= 100.0:
raise ValueError(
"Quantile not in range, get {0:.1f} and {1:.1f}!".format(q_min, q_max)
)
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape
if self.with_centering == True:
self._median = [0 for _ in range(p)]
if self.with_scale == True:
self._scale = [0 for _ in range(p)]
for i in range(p):
_data = _X.iloc[:, i].values
if self.with_centering == True:
self._median[i] = np.nanmedian(_data)
quantile = np.nanquantile(_data, (q_min / 100, q_max / 100))
quantile = np.transpose(quantile)
self._scale[i] = quantile[1] - quantile[0]
if self.unit_variance == True:
self._scale[i] = self.scale[i] / (
scipy.stats.norm.ppf(q_max / 100.0)
- scipy.stats.norm.ppf(q_min / 100.0)
)
# handle 0 in scale
constant_mask = (
self._scale < 10 * np.finfo(np.float64).eps
) # avoid extremely small values
for index in [i for i, value in enumerate(constant_mask) if value]:
self._scale[
index
] = 1.0 # change scale at True index of constant_mask to 1.0
self._fitted = True
return self
def transform(self, X):
_X = X.copy(deep=self.deep_copy)
if self.with_centering == True:
_X -= self._median
if self.with_scale == True:
_X /= self._scale
return _X
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(_X, y)
self._fitted = True
_X = self.transform(_X)
return _X
def inverse_transform(self, X):
if self.with_centering:
X += self._median
if self.with_scale:
X *= self._scale
self._fitted = False
return X
class MinMaxScale:
"""
Use min_max value to scale the feature, x / (x_max - x_min)
Parameters
----------
feature_range: (feature_min, feature_max) to scale the feature, default = (0, 1)
"""
def __init__(
self,
feature_range=(0, 1),
deep_copy=True,
):
self.feature_range = feature_range
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
n, p = _X.shape
self._min = [0 for _ in range(p)]
self._max = [0 for _ in range(p)]
for i in range(p):
_data = _X.iloc[:, i].values
self._min[i] = np.nanmin(_data)
self._max[i] = np.nanmax(_data)
self._fitted = True
return self
def transform(self, X):
f_min, f_max = self.feature_range
if not f_min < f_max:
raise ValueError("Minimum of feature range must be smaller than maximum!")
_X = X.copy(deep=self.deep_copy)
_X = (_X - self._min) / (np.array(self._max) - np.array(self._min))
_X = _X * (f_max - f_min) + f_min
return _X
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(_X, y)
self._fitted = True
_X = self.transform(_X)
return _X
def inverse_transform(self, X):
f_min, f_max = self.feature_range
if not f_min < f_max:
raise ValueError("Minimum of feature range must be smaller than maximum!")
_X = X.copy(deep=True)
_X = (_X - f_min) / (f_max - f_min)
_X = _X * (np.array(self._max) - np.array(self._min)) + self._min
self._fitted = False
return _X
class Winsorization:
"""
Limit feature to certain quantile (remove the effect of extreme values)
if the response of extreme values are different than non extreme values above threshold, the feature will
be capped
No inverse transform available for Winsorization
Parameters
----------
quantile: quantile to be considered as extreme, default = 0.95
threshold: threshold to decide whether to cap feature, default = 0.1
"""
def __init__(
self,
quantile=0.95,
threshold=0.1,
deep_copy=True,
):
self.quantile = quantile
self.threshold = threshold
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y):
if not isinstance(y, pd.DataFrame) and not isinstance(X, pd.Series):
warnings.warn("Method Winsorization requires response, but not getting it.")
_X = X.copy(deep=self.deep_copy)
features = list(_X.columns)
self._quantile_list = []
self._list = []
for _column in features:
quantile = np.nanquantile(_X[_column], self.quantile, axis=0)
self._quantile_list.append(quantile)
_above_quantile = y[_X[_column] > quantile].mean()[0]
_below_quantile = y[_X[_column] <= quantile].mean()[0]
# deal with the case where above quantile do not exists
if not _above_quantile:
_above_quantile = quantile
if abs(_above_quantile / _below_quantile - 1) > self.threshold:
self._list.append(True)
else:
self._list.append(False)
self._fitted = True
return self
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(_X, y)
self._fitted = True
_X = self.transform(_X)
return _X
def transform(self, X):
_X = X.copy(deep=self.deep_copy)
features = list(_X.columns)
i = 0
for _column in features:
if self._list[i]:
_X.loc[
_X[_column] > self._quantile_list[i], _column
] = self._quantile_list[i]
i += 1
return _X
class PowerTransformer:
"""
PowerTransformer, implemented by sklearn, is a transformer that applies
a power function to each feature.
[1] I.K. Yeo and <NAME>, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
[2] <NAME> and <NAME>, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Parameters
----------
method: 'yeo-johnson' or 'box-cox', default = 'yeo-johnson'
'yeo-johnson' [1]_, works with positive and negative values
'box-cox' [2]_, only works with strictly positive values
standardize: boolean, default = True
deep_copy: whether to use deep copy, default = True
"""
def __init__(
self,
method="yeo-johnson",
standardize=True,
deep_copy=True,
):
self.method = method
self.standardize = standardize
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
from sklearn.preprocessing import PowerTransformer
self.mol = PowerTransformer(
method=self.method,
standardize=self.standardize,
copy=self.deep_copy,
)
self.mol.fit(X, y)
self._fitted = True
return self
def transform(self, X, y=None):
return self.mol.transform(X)
def fit_transform(self, X, y=None):
self.fit(X, y)
self._fitted = True
return self.transform(X)
def inverse_transform(self, X):
self._fitted = False
return self.mol.inverse_transform(X)
class QuantileTransformer:
"""
QuantileTransformer, implemented by sklearn
Parameters
----------
n_quantiles: Number of quantiles to be computed, default = 1000
output_distribution: 'normal' or 'uniform', default = 'normal'
ignore_implicit_zeros: Only applies to sparse matrices, default = False
subsample: Maximum number of samples used to estimate the quantiles, default = 100000
random_state: RandomState instance or None, default = None
deep_copy: whether to use deep copy, default = True
"""
def __init__(
self,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
deep_copy=True,
):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.deep_copy = deep_copy
self._fitted = False # record whether the model has been fitted
def fit(self, X, y=None):
# limit max number of quantiles to entries number
self.n_quantiles = min(self.n_quantiles, X.shape[0])
from sklearn.preprocessing import QuantileTransformer
self.mol = QuantileTransformer(
n_quantiles=self.n_quantiles,
output_distribution=self.output_distribution,
ignore_implicit_zeros=self.ignore_implicit_zeros,
subsample=self.subsample,
random_state=self.random_state,
copy=self.deep_copy,
)
self.mol.fit(X, y)
self._fitted = True
return self
def transform(self, X, y=None):
return self.mol.transform(X)
def fit_transform(self, X, y=None):
self.fit(X, y)
self._fitted = True
return self.transform(X)
def inverse_transform(self, X):
self._fitted = False
return self.mol.inverse_transform(X)
####################################################################################################
# Special Case
def Feature_Manipulation(
X,
columns=[],
manipulation=[],
rename_columns={},
replace=False,
deep_copy=False,
):
"""
Available methods: +, -, *, /, //, %, ln, log2, log10, exp
Parameters
----------
columns: columns need manipulation, default = []
manipulation: list of manipulation, default = []
rename_columns: specific changing column names, default = {}
replace: whether to replace the new columns, default = False
deep_copy: whether need deep copy the input, default = False
Example
-------
>> data = np.arange(15).reshape(5, 3)
>> data = pd.DataFrame(data, columns = ['column_1', 'column_2', 'column_3'])
>> data
column_1 column_2 column_3
0 0 1 2
1 3 4 5
2 6 7 8
3 9 10 11
4 12 13 14
>> data = Feature_Manipulation(
>> data, columns= ['column_1', 'column_2', 'column_3'],
>> manipulation = ['* 100', 'ln', '+ 1'],
>> rename_columns= {'column_2': 'log_column_2'}
>> )
>> data
column_1 column_2 column_3 column_1_* 100 log_column_2 column_3_+ 1
0 0 1 2 0 0.000000 3
1 3 4 5 300 1.386294 6
2 6 7 8 600 1.945910 9
3 9 10 11 900 2.302585 12
4 12 13 14 1200 2.564949 15
"""
# make sure input is dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise ValueError("Expect a dataframe, get {}.".format(type(X)))
_X = X.copy(deep=deep_copy)
# if no columns/manipulation specified, raise warning
if not columns or not manipulation:
warnings.warn("No manipulation executed.")
return _X
# expect one manipulation for one column
# if not same size, raise Error
if len(columns) != len(manipulation):
raise ValueError(
"Expect same length of columns and manipulation, get {} and {} respectively.".format(
len(columns), len(manipulation)
)
)
manipulation = dict(zip(columns, manipulation))
for _column in columns:
# if observed in rename dict, change column names
new_column_name = (
rename_columns[_column] if _column in rename_columns.keys() else _column
)
# if not replace, and new column names coincide with old column names
# new column names = old column names + manipulation
# for distinguish
if not replace and new_column_name == _column:
new_column_name += "_" + manipulation[_column]
# column manipulation
if manipulation[_column] == "ln":
_X[new_column_name] = np.log(_X[_column])
elif manipulation[_column] == "log2":
_X[new_column_name] = np.log2(_X[_column])
elif manipulation[_column] == "log10":
_X[new_column_name] = np.log10(_X[_column])
elif manipulation[_column] == "exp":
_X[new_column_name] = np.exp(_X[_column])
else:
exec("_X[new_column_name] = _X[_column]" + manipulation[_column])
return _X
####################################################################################################
# Feature Truncation
class Feature_Truncation:
"""
Truncate feature to certain quantile (remove the effect of extreme values)
No inverse transform available
Parameters
----------
quantile: quantile to be considered as extreme, default = 0.95
if quantile less than 0.5, left truncation; else, right truncation
Example
-------
>> scaling = Feature_Truncation(
>> columns = ['column_2', 'column_5', 'column_6', 'column_8', 'column_20'],
>> quantile = [0.95, 0.95, 0.9, 0.1, 0.8]
>> )
>> data = scaling.fit_transform(data)
(column_2 right truncated at 95 percentile, column_8 left truncated at 10
percentile, etc.)
"""
def __init__(self, columns=[], quantile=0.95, deep_copy=False):
self.columns = columns
self.quantile = quantile
self.deep_copy = deep_copy
def fit(self, X, y=None):
# make sure input is dataframe
if not isinstance(X, pd.DataFrame):
try:
X = pd.DataFrame(X)
except:
raise ValueError("Expect a dataframe, get {}.".format(type(X)))
_X = X.copy(deep=self.deep_copy)
self.columns = list(_X.columns) if not self.columns else self.columns
if isinstance(self.quantile, list):
if len(self.columns) != len(self.quantile):
raise ValueError(
"Expect same length of columns and quantile, get {} and {} respectively.".format(
len(self.columns), len(self.quantile)
)
)
self.quantile = dict(zip(self.columns, self.quantile))
self.quantile_list = {}
for _column in self.columns:
quantile = np.nanquantile(X[_column], self.quantile[_column], axis=0)
self.quantile_list[_column] = quantile
return self
def transform(self, X):
_X = X.copy(deep=self.deep_copy)
for _column in self.columns:
if self.quantile_list[_column] >= 0.5:
_X.loc[
_X[_column] > self.quantile_list[_column], _column
] = self.quantile_list[_column]
else:
_X.loc[
_X[_column] < self.quantile_list[_column], _column
] = self.quantile_list[_column]
return _X
def fit_transform(self, X, y=None):
_X = X.copy(deep=self.deep_copy)
self.fit(X, y)
_X = self.transform(_X)
return _X
|
PanyiDong/AutoML | tests/test_feature_selection/test_feature_selection.py | """
File: test_feature_selection.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_feature_selection/test_feature_selection.py
File Created: Friday, 15th April 2022 12:27:07 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 11th May 2022 9:57:52 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from My_AutoML._feature_selection import feature_selections
from My_AutoML._feature_selection._base import (
PCA_FeatureSelection,
RBFSampler,
)
def test_feature_selection():
# loop through all feature selection methods
for method_name, method in zip(
feature_selections.keys(), feature_selections.values()
):
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
if method_name in ["FeatureFilter", "ASFFS", "GeneticAlgorithm", "RBFSampler"]:
pass
elif method_name == "SFS":
feature_selection = method(
estimator="Lasso",
n_components=5,
criteria="MSE",
)
elif method_name in ["mRMR", "CBFS"]:
feature_selection = method(n_components=5)
else:
feature_selection = method()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
if method_name != "polynomial":
assert (
_X.shape[1] <= X.shape[1]
), "Feature selection method {} failed".format(method_name)
# test sklearn version if autosklearn is installed
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import (
extra_trees_preproc_for_classification,
extra_trees_preproc_for_regression,
liblinear_svc_preprocessor,
polynomial,
select_percentile_classification,
select_percentile_regression,
select_rates_classification,
select_rates_regression,
truncatedSVD,
)
methods = {
"extra_trees_preproc_for_classification": extra_trees_preproc_for_classification,
"extra_trees_preproc_for_regression": extra_trees_preproc_for_regression,
"liblinear_svc_preprocessor": liblinear_svc_preprocessor,
"polynomial": polynomial,
"select_percentile_classification": select_percentile_classification,
"select_percentile_regression": select_percentile_regression,
"select_rates_classification": select_rates_classification,
"select_rates_regression": select_rates_regression,
"truncatedSVD": truncatedSVD,
}
for method_name, method in zip(methods.keys(), methods.values()):
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = method()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_FeatureFilter():
from My_AutoML._feature_selection import FeatureFilter
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = FeatureFilter(
n_components=5,
criteria="Pearson",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method FeatureFilter failed"
feature_selection = FeatureFilter(
n_components=5,
criteria="MI",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method FeatureFilter failed"
def test_ASFFS():
from My_AutoML._feature_selection import ASFFS
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = ASFFS(
n_components=5,
model="Linear",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method ASFFS failed"
feature_selection = ASFFS(
n_components=5,
model="Lasso",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method ASFFS failed"
feature_selection = ASFFS(n_components=5, model="Ridge", objective="MAE")
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method ASFFS failed"
def test_GA():
from My_AutoML._encoding import DataEncoding
from My_AutoML._feature_selection import GeneticAlgorithm
data = pd.read_csv("Appendix/heart.csv")
formatter = DataEncoding()
# to numerical
formatter.fit(data)
data = formatter.refit(data)
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = GeneticAlgorithm(
n_components=5,
feature_selection="random",
fitness_fit="Linear",
n_generations=50,
p_mutation=0.1,
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method GeneticAlgorithm failed"
feature_selection = GeneticAlgorithm(
n_components=5, feature_selection=["Entropy"], fitness_fit="Decision Tree"
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method GeneticAlgorithm failed"
feature_selection = GeneticAlgorithm(
n_components=5, feature_selection=["t_statistics"], fitness_fit="Random Forest"
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method GeneticAlgorithm failed"
feature_selection = GeneticAlgorithm(
n_components=5, feature_selection="auto", fitness_fit="SVM"
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
assert _X.shape[1] <= X.shape[1], "Feature selection method GeneticAlgorithm failed"
def test_feature_selection_PCA_FeatureSelection():
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = PCA_FeatureSelection(
n_components=5,
solver="auto",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
feature_selection = PCA_FeatureSelection(
n_components=5,
solver="full",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
feature_selection = PCA_FeatureSelection(
n_components=5,
solver="truncated",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
feature_selection = PCA_FeatureSelection(
n_components=5,
solver="randomized",
)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# def test_feature_selection_LDASelection():
# data = pd.read_csv("Appendix/Medicalpremium.csv")
# X = data.iloc[:, :-1]
# y = data.iloc[:, -1]
# feature_selection = LDASelection(n_components=5)
# feature_selection.fit(X, y)
# assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_RBFSampler():
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = RBFSampler(n_components=5)
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# test decrepted methods
def test_feature_selection_densifier():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import densifier
else:
from My_AutoML._feature_selection._autosklearn import densifier
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = densifier()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import densifier
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = densifier()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# def test_feature_selection_fast_ica():
# from My_AutoML._feature_selection._autosklearn import fast_ica
# data = pd.read_csv("Appendix/Medicalpremium.csv")
# X = data.iloc[:, :-1]
# y = data.iloc[:, -1]
# feature_selection = fast_ica()
# feature_selection.fit(X, y)
# _X = feature_selection.transform(X)
# assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_feature_agglomeration():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import feature_agglomeration
else:
from My_AutoML._feature_selection._autosklearn import feature_agglomeration
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = feature_agglomeration()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import feature_agglomeration
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = feature_agglomeration()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_kernel_pca():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import kernel_pca
else:
from My_AutoML._feature_selection._autosklearn import kernel_pca
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = kernel_pca()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import kernel_pca
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = kernel_pca()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_kitchen_sinks():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import kitchen_sinks
else:
from My_AutoML._feature_selection._autosklearn import kitchen_sinks
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = kitchen_sinks()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import kitchen_sinks
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = kitchen_sinks()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_nystroem_sampler():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import nystroem_sampler
else:
from My_AutoML._feature_selection._autosklearn import nystroem_sampler
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = nystroem_sampler()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import nystroem_sampler
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = nystroem_sampler()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_pca():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import pca
else:
from My_AutoML._feature_selection._autosklearn import pca
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = pca()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import pca
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = pca()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
def test_feature_selection_random_trees_embedding():
import importlib
autosklearn_spec = importlib.util.find_spec("autosklearn")
if autosklearn_spec is None:
from My_AutoML._feature_selection._sklearn import random_trees_embedding
else:
from My_AutoML._feature_selection._autosklearn import random_trees_embedding
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = random_trees_embedding()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
# add tests for sklearn methods when autosklearn is installed
if autosklearn_spec is not None:
from My_AutoML._feature_selection._sklearn import random_trees_embedding
data = pd.read_csv("Appendix/Medicalpremium.csv")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
feature_selection = random_trees_embedding()
feature_selection.fit(X, y)
_X = feature_selection.transform(X)
assert feature_selection._fitted == True, "Fitted should be True"
|
PanyiDong/AutoML | My_AutoML/_feature_selection/_legacy.py | """
File: _legacy.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_feature_selection/_legacy.py
File Created: Friday, 8th April 2022 9:20:27 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:21:14 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ._base import RBFSampler
from ._advance import FeatureFilter, ASFFS, GeneticAlgorithm
from My_AutoML._base import no_processing
import autosklearn
import autosklearn.pipeline.components.feature_preprocessing
feature_selections = {
"no_processing": no_processing,
# "LDASelection": LDASelection,
# "PCA_FeatureSelection": PCA_FeatureSelection,
"RBFSampler": RBFSampler,
"FeatureFilter": FeatureFilter,
"ASFFS": ASFFS,
"GeneticAlgorithm": GeneticAlgorithm,
# 'densifier' : autosklearn.pipeline.components.feature_preprocessing.densifier.Densifier, # from autosklearn
"extra_trees_preproc_for_classification": autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_classification.ExtraTreesPreprocessorClassification,
"extra_trees_preproc_for_regression": autosklearn.pipeline.components.feature_preprocessing.extra_trees_preproc_for_regression.ExtraTreesPreprocessorRegression,
# "fast_ica": autosklearn.pipeline.components.feature_preprocessing.fast_ica.FastICA,
# "feature_agglomeration": autosklearn.pipeline.components.feature_preprocessing.feature_agglomeration.FeatureAgglomeration,
# "kernel_pca": autosklearn.pipeline.components.feature_preprocessing.kernel_pca.KernelPCA,
# "kitchen_sinks": autosklearn.pipeline.components.feature_preprocessing.kitchen_sinks.RandomKitchenSinks,
"liblinear_svc_preprocessor": autosklearn.pipeline.components.feature_preprocessing.liblinear_svc_preprocessor.LibLinear_Preprocessor,
# "nystroem_sampler": autosklearn.pipeline.components.feature_preprocessing.nystroem_sampler.Nystroem,
# "pca": autosklearn.pipeline.components.feature_preprocessing.pca.PCA,
"polynomial": autosklearn.pipeline.components.feature_preprocessing.polynomial.PolynomialFeatures,
# "random_trees_embedding": autosklearn.pipeline.components.feature_preprocessing.random_trees_embedding.RandomTreesEmbedding,
# 'select_percentile' : autosklearn.pipeline.components.feature_preprocessing.select_percentile.SelectPercentileBase,
"select_percentile_classification": autosklearn.pipeline.components.feature_preprocessing.select_percentile_classification.SelectPercentileClassification,
"select_percentile_regression": autosklearn.pipeline.components.feature_preprocessing.select_percentile_regression.SelectPercentileRegression,
"select_rates_classification": autosklearn.pipeline.components.feature_preprocessing.select_rates_classification.SelectClassificationRates,
"select_rates_regression": autosklearn.pipeline.components.feature_preprocessing.select_rates_regression.SelectRegressionRates,
"truncatedSVD": autosklearn.pipeline.components.feature_preprocessing.truncatedSVD.TruncatedSVD,
}
|
PanyiDong/AutoML | My_AutoML/_utils/_file.py | <gh_stars>1-10
"""
File: _file.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_file.py
File Created: Wednesday, 6th April 2022 6:25:09 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Sunday, 17th April 2022 1:02:29 am
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pickle
# save model
def save_model(
encoder,
encoder_hyperparameters,
imputer,
imputer_hyperparameters,
balancing,
balancing_hyperparameters,
scaling,
scaling_hyperparameters,
feature_selection,
feature_selection_hyperparameters,
model,
model_hyperparameters,
model_name,
):
with open(model_name, "w") as f:
f.write("{}\n".format(encoder))
print(encoder_hyperparameters, file=f, end="\n")
f.write("{}\n".format(imputer))
print(imputer_hyperparameters, file=f, end="\n")
f.write("{}\n".format(balancing))
print(balancing_hyperparameters, file=f, end="\n")
f.write("{}\n".format(scaling))
print(scaling_hyperparameters, file=f, end="\n")
f.write("{}\n".format(feature_selection))
print(feature_selection_hyperparameters, file=f, end="\n")
f.write("{}\n".format(model))
print(model_hyperparameters, file=f, end="\n")
# save list of methods
def save_methods(file_name, methods):
"""
Parameters
----------
file_name: path of the file to save
methods: list of methods objects to save
"""
with open(file_name, "wb") as out_f:
for method in methods:
pickle.dump(method, out_f)
# load methods
def load_methods(file_name):
"""
Parameters
----------
file_name: path of the file to load
"""
with open(file_name, "rb") as in_f:
results = []
# load all methods
while True:
try:
results.append(pickle.load(in_f))
except EOFError:
break
return results
# find exact folder path
def find_exact_path(path, spec_str) :
for folder in os.listdir(path):
if spec_str in os.path.join(path, folder):
return os.path.join(path, folder) |
PanyiDong/AutoML | My_AutoML/_hyperparameters/_hyperopt/_imputer_hyperparameter.py | """
File: _imputer_hyperparameter.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_hyperparameters/_hyperopt/_imputer_hyperparameter.py
File Created: Tuesday, 5th April 2022 11:02:55 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:23:00 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
# imputer
imputer_hyperparameter = [
{
"imputer": "SimpleImputer",
"method": hp.choice(
"SimpleImputer_method", ["mean", "zero", "median", "most frequent"]
),
},
{"imputer": "DummyImputer"},
{"imputer": "JointImputer"},
{
"imputer": "ExpectationMaximization",
"iterations": hp.quniform("ExpectationMaximization_iterations", 10, 100, 1),
"threshold": hp.uniform(
"ExpectationMaximization_iterations_threshold", 1e-5, 1
),
},
{
"imputer": "KNNImputer",
"n_neighbors": scope.int(hp.quniform("KNNImputer_n_neighbors", 1, 15, 1)),
"fold": scope.int(hp.quniform("KNNImputer_fold", 5, 15, 1)),
},
{"imputer": "MissForestImputer"},
{"imputer": "MICE", "cycle": hp.quniform("MICE_cycle", 5, 20, 1)},
{"imputer": "GAIN"},
# {"imputer": "AAI_kNN"},
# {"imputer": "KMI"},
# {"imputer": "CMI"},
# {"imputer": "k_Prototype_NN"},
]
|
PanyiDong/AutoML | My_AutoML/_balancing/_mixed_sampling.py | <gh_stars>1-10
"""
File: _mixed_sampling.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_balancing/_mixed_sampling.py
File Created: Wednesday, 6th April 2022 12:27:23 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 9th April 2022 11:01:16 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import warnings
from My_AutoML._utils._data import is_imbalance
from ._over_sampling import Smote
from ._under_sampling import TomekLink, EditedNearestNeighbor
"""
Reference for: Simple Random Over Sampling, Simple Random Under Sampling, Tomek Link, \
Edited Nearest Neighbor, Condensed Nearest Neighbor, One Sided Selection, CNN_TomekLink, \
Smote, Smote_TomekLink, Smote_ENN
<NAME>., <NAME>. and <NAME>., 2004. A study of the behavior of several methods for
balancing machine learning training data. ACM SIGKDD explorations newsletter, 6(1), pp.20-29.
"""
class Smote_TomekLink(Smote, TomekLink):
"""
Run Smote then run Tomek Link to balance dataset
"""
def __init__(
self,
imbalance_threshold=0.9,
norm="l2",
all=False,
max_iter=1000,
seed=1,
k=5,
generation="mean",
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self.k = k
self.generation = generation
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
super().__init__(
imbalance_threshold=(1.0 + self.imbalance_threshold) / 2,
norm=self.norm,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
k=self.k,
generation=self.generation,
)
_data = super().fit_transform(_data)
super(Smote, self).__init__(
imbalance_threshold=self.imbalance_threshold,
norm=self.norm,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
)
_data = super(Smote, self).fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
class Smote_ENN(Smote, EditedNearestNeighbor):
"""
Run Smote then run ENN to balance dataset
"""
def __init__(
self,
imbalance_threshold=0.9,
norm="l2",
all=False,
max_iter=1000,
seed=1,
k=5,
generation="mean",
):
self.imbalance_threshold = imbalance_threshold
self.norm = norm
self.all = all
self.max_iter = max_iter
self.seed = seed
self.k = k
self.generation = generation
self._fitted = False # whether the model has been fitted
def fit_transform(self, X, y=None):
try: # if missing y, will be None value; or will be dataframe, use df.empty for judge
_empty = y.empty
except AttributeError:
_empty = y == None
if (
not _empty
): # if no y input, only convert X; or, combine X and y to consider balancing
features = list(X.columns)
response = list(y.columns)
data = pd.concat([X, y], axis=1)
else:
features = list(X.columns)
response = None
data = X
_data = data.copy(deep=True)
if not is_imbalance(_data, self.imbalance_threshold):
warnings.warn("The dataset is balanced, no change.")
else:
super().__init__(
imbalance_threshold=(1.0 + self.imbalance_threshold) / 2,
norm=self.norm,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
k=self.k,
generation=self.generation,
)
_data = super().fit_transform(_data)
super(Smote, self).__init__(
imbalance_threshold=self.imbalance_threshold,
all=self.all,
max_iter=self.max_iter,
seed=self.seed,
k=self.k,
)
_data = super(Smote, self).fit_transform(_data)
self._fitted = True
if not _empty:
return _data[features], _data[response]
else:
return _data
|
PanyiDong/AutoML | My_AutoML/_constant.py | """
File: _constant.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_constant.py
File Created: Sunday, 10th April 2022 4:50:47 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Wednesday, 27th April 2022 5:53:01 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# encoders
ENCODERS = ["DataEncoding"]
# imputers
IMPUTERS = [
"SimpleImputer",
"DummyImputer",
"JointImputer",
"ExpectationMaximization",
"KNNImputer",
"MissForestImputer",
"MICE",
"AAI_kNN",
"KMI",
"CMI",
"k_Prototype_NN",
]
# balancings
BALANCINGS = [
"no_processing",
"SimpleRandomOverSampling",
"SimpleRandomUnderSampling",
"TomekLink",
"EditedNearestNeighbor",
"CondensedNearestNeighbor",
"OneSidedSelection",
"CNN_TomekLink",
"Smote",
"Smote_TomekLink",
"Smote_ENN",
]
# scalings
SCALINGS = [
"no_processing",
"MinMaxScale",
"Standardize",
"Normalize",
"RobustScale",
"PowerTransformer",
"QuantileTransformer",
"Winsorization",
"Feature_Manipulation",
"Feature_Truncation",
]
# feature_selection
FEATURE_SELECTION = [
"no_processing",
"LDASelection",
"PCA_FeatureSelection",
"RBFSampler",
"FeatureFilter",
"ASFFS",
"GeneticAlgorithm",
"extra_trees_preproc_for_classification",
"extra_trees_preproc_for_regression",
"liblinear_svc_preprocessor",
"polynomial",
"select_percentile_classification",
"select_percentile_regression",
"select_rates_classification",
"select_rates_regression",
"truncatedSVD",
]
# classifiers
CLASSIFIERS = [
"AdaboostClassifier",
"BernoulliNB",
"DecisionTree",
"ExtraTreesClassifier",
"GaussianNB",
"GradientBoostingClassifier",
"KNearestNeighborsClassifier",
"LDA",
"LibLinear_SVC",
"LibSVM_SVC",
"MLPClassifier",
"MultinomialNB",
"PassiveAggressive",
"QDA",
"RandomForest",
"SGD",
"LogisticRegression",
"ComplementNB",
"HistGradientBoostingClassifier",
"LightGBM_Classifier",
"XGBoost_Classifier",
"GAM_Classifier",
"MLP_Classifier",
"RNN_Classifier",
]
# regressors
REGRESSORS = [
"AdaboostRegressor",
"ARDRegression",
"DecisionTree",
"ExtraTreesRegressor",
"GaussianProcess",
"GradientBoosting",
"KNearestNeighborsRegressor",
"LibLinear_SVR",
"LibSVM_SVR",
"MLPRegressor",
"RandomForest",
"SGD",
"LinearRegression",
"Lasso",
"RidgeRegression",
"ElasticNet",
"BayesianRidge",
"HistGradientBoostingRegressor",
"LightGBM_Regressor",
"XGBoost_Regressor",
"GAM_Regressor",
"MLP_Regressor",
"RNN_Regressor",
]
# maximum unique classes determined as categorical variable
# 31 is capped by days in a month
UNI_CLASS = 31
# maximum iteration allowed for the algorithm
MAX_ITER = 1024
# maximum time budge allowed per run (in seconds)
# set at 3 days
MAX_TIME = 259200
# LightGBM default object (metric/loss)
# binary classification
LIGHTGBM_BINARY_CLASSIFICATION = ["binary", "cross_entropy"]
# multiclass classification
LIGHTGBM_MULTICLASS_CLASSIFICATION = ["multiclass", "multiclassova", "num_class"]
# regression
LIGHTGBM_REGRESSION = [
"regression",
"regression_l1",
"huber",
"fair",
"poisson",
"quantile",
"mape",
"gamma",
"tweedie",
]
# LightGBM boosting methods
LIGHTGBM_BOOSTING = ["gbdt", "dart", "goss"] # suppress "rf"
# LightGBM tree learner
LIGHTGBM_TREE_LEARNER = ["serial", "feature", "data", "voting"]
# Classification estimators
CLASSIFICATION_ESTIMATORS = [
"LogisticRegression",
"ExtraTreeClassifier",
"RandomForestClassifier",
]
# Classification metrics
CLASSIFICATION_CRITERIA = [
"neg_accuracy",
"neg_precision",
"neg_auc",
"neg_hinge",
"neg_f1",
]
# Regression estimators
REGRESSION_ESTIMATORS = [
"Lasso",
"Ridge",
"ExtraTreeRegressor",
"RandomForestRegressor",
]
# Regression metrics
REGRESSION_CRITERIA = [
"MSE",
"MAE",
"MSLE",
"neg_R2",
"MAX",
]
|
PanyiDong/AutoML | My_AutoML/_imputation/__init__.py | """
File: __init__.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_imputation/__init__.py
File Created: Tuesday, 5th April 2022 11:49:07 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Saturday, 16th April 2022 5:57:03 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ._base import SimpleImputer, DummyImputer, JointImputer
from ._multiple import ExpectationMaximization, KNNImputer, MissForestImputer, MICE
# from ._clustering import AAI_kNN, KMI, CMI, k_Prototype_NN
imputers = {
"SimpleImputer": SimpleImputer,
# 'DummyImputer' : DummyImputer,
"JointImputer": JointImputer,
"ExpectationMaximization": ExpectationMaximization,
"KNNImputer": KNNImputer,
"MissForestImputer": MissForestImputer,
"MICE": MICE,
# "AAI_kNN": AAI_kNN, # extremely slow (all below)
# "KMI": KMI, # not implemented
# "CMI": CMI,
# "k_Prototype_NN": k_Prototype_NN,
}
# Markov Chain Monte Carlo (MCMC)
import importlib
# check tensorflow/pytorch installed only import nn-based methods
# when tensorflow/pytorch is installed
tensorflow_spec = importlib.util.find_spec("tensorflow")
torch_spec = importlib.util.find_spec("torch")
if tensorflow_spec is not None or torch_spec is not None:
from ._nn import GAIN
imputers["GAIN"] = GAIN
|
PanyiDong/AutoML | My_AutoML/_utils/_preprocessing.py | <filename>My_AutoML/_utils/_preprocessing.py
"""
File: _preprocessing.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_utils/_preprocessing.py
File Created: Wednesday, 6th April 2022 12:04:44 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Friday, 8th April 2022 10:27:33 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import importlib
pytorch_spec = importlib.util.find_spec("torch")
if pytorch_spec is not None:
import torch
from torch.utils.data import TensorDataset, DataLoader
torchtext_spec = importlib.util.find_spec("torchtext")
if torchtext_spec is not None:
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
transformers_spec = importlib.util.find_spec("transformers")
if transformers_spec is not None:
import transformers
from transformers import AutoTokenizer
# text preprocessing
# build a vocabulary from text using torchtext methods
# fixed length sequence needed
def text_preprocessing_torchtext(
data,
batch_size=32,
shuffle=True,
drop_first=True,
return_offset=False,
):
tokenizer = get_tokenizer("basic_english")
# yield tokens from a string
def yield_tokens(data_iter):
for item in data_iter:
# if item is dict, get text/label
# assume from datasets packages
if isinstance(item, dict):
yield tokenizer(item["text"])
# else, assume in (text, label) format
else:
text, label = item
yield tokenizer(text)
# define vocabulary
vocab = build_vocab_from_iterator(yield_tokens(data), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
# tokenize data and build vocab
text_pipeline = lambda x: vocab(tokenizer(x))
# label_pipeline = lambda x: int(x) - 1
# return tensordataset text, label, and offset (optional)
text_list, label_list, offsets = [], [], [0]
for idx, item in enumerate(data):
# if item is dict, get text/label
# assume from datasets packages
if isinstance(item, dict):
_text, _label = item["text"], item["label"]
# else, assume in (text, label) format
else:
_text, _label = item
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
label_list.append(_label)
if return_offset:
offsets.append(processed_text.size(0))
text_list = torch.stack(text_list)
label_list = torch.tensor(label_list, dtype=torch.int64)
if return_offset:
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
if return_offset:
data_tensor = TensorDataset(text_list, label_list, offsets)
else:
data_tensor = TensorDataset(text_list, label_list)
# load data to DataLoader
data_loader = DataLoader(
data_tensor, batch_size=batch_size, shuffle=shuffle, drop_first=drop_first
)
return data_loader, vocab
# text preprocessing using transformers package
def text_preprocessing_transformers(
data,
batch_size=32,
tokenizer_model="bert-base-uncased",
max_len=512,
return_attention_mask=False,
return_token_type_ids=False,
return_tensors="pt",
):
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model)
# define a mapping tokenization method
def mapping_tokenizer(example):
# tokenizer the text to tensor inputs
# max_length, padding, truncation combination can pad/truncate text tokens to max_length
# 1. add space after meaningful tokens if sentence length < max_length
# 2. delete text tokens to max_length if sentence length > max_length
# decide whether to return attention masks and token type ids
return tokenizer(
example["text"],
max_length=max_len,
padding="max_length",
truncation=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_tensors=return_tensors,
)
# apply mapping tokenization method to data examples
tokenized_data = data.map(mapping_tokenizer)
# limit data parts to use
selected_data = tokenized_data.set_format(type="torch", columns=["inputs", "label"])
# load data to DataLoader
train_tensor = TensorDataset(
torch.as_tensor(selected_data["input_ids"]),
torch.as_tensor(selected_data["label"]),
)
train_loader = DataLoader(
train_tensor, batch_size=batch_size, shuffle=True, drop_last=True
)
return train_loader
|
Stuti82/pandas | pandas/tests/extension/test_boolean.py | <reponame>Stuti82/pandas<filename>pandas/tests/extension/test_boolean.py
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.boolean import BooleanDtype
from pandas.tests.extension import base
def make_data():
return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]
@pytest.fixture
def dtype():
return BooleanDtype()
@pytest.fixture
def data(dtype):
return pd.array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return pd.array(np.ones(100), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return pd.array([np.nan, True], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return pd.array([True, True, False], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return pd.array([True, np.nan, False], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = True
a = False
na = np.nan
return pd.array([b, b, na, na, a, a, b], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, obj, op, other, op_name, exc=NotImplementedError):
if exc is None:
if op_name in self.implements:
msg = r"numpy boolean subtract"
with pytest.raises(TypeError, match=msg):
op(obj, other)
return
result = op(obj, other)
expected = self._combine(obj, other, op)
if op_name in (
"__floordiv__",
"__rfloordiv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
):
# combine keeps boolean type
expected = expected.astype("Int8")
elif op_name in ("__truediv__", "__rtruediv__"):
# combine with bools does not generate the correct result
# (numpy behaviour for div is to regard the bools as numeric)
expected = self._combine(obj.astype(float), other, op)
expected = expected.astype("Float64")
if op_name == "__rpow__":
# for rpow, combine does not propagate NaN
expected[result.isna()] = np.nan
self.assert_equal(result, expected)
else:
with pytest.raises(exc):
op(obj, other)
def _check_divmod_op(self, s, op, other, exc=None):
# override to not raise an error
super()._check_divmod_op(s, op, other, None)
class TestComparisonOps(base.BaseComparisonOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
def test_compare_scalar(self, data, all_compare_operators):
pass
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
def test_compare_array(self, data, all_compare_operators):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.parametrize("na_sentinel", [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
# override because we only have 2 unique values
labels, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
expected_labels = np.array(
[0, 0, na_sentinel, na_sentinel, 1, 1, 0], dtype=np.intp
)
expected_uniques = data_for_grouping.take([0, 4])
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_combine_le(self, data_repeated):
# override because expected needs to be boolean instead of bool dtype
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
dtype="boolean",
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= val for a in list(orig_data1)], dtype="boolean")
self.assert_series_equal(result, expected)
def test_searchsorted(self, data_for_sorting, as_series):
# override because we only have 2 unique values
data_for_sorting = pd.array([True, False], dtype="boolean")
b, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
result = arr.searchsorted(arr.take([0, 1]))
expected = np.array([0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts_with_normalize(self, data):
pass
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting):
# override because there are only 2 unique values
# data_for_sorting -> [B, C, A] with A < B < C -> here True, True, False
assert data_for_sorting.argmax() == 0
assert data_for_sorting.argmin() == 2
# with repeated values -> first occurrence
data = data_for_sorting.take([2, 0, 0, 1, 1, 2])
assert data.argmax() == 1
assert data.argmin() == 0
# with missing values
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
assert data_missing_for_sorting.argmax() == 0
assert data_missing_for_sorting.argmin() == 2
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
"""
Groupby-specific tests are overridden because boolean only has 2
unique values, base tests uses 3 groups.
"""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A": ["B", "B", None, None, "A", "A", "B"], "B": data_for_grouping}
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3, 1], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_agg_extension(self, data_for_grouping):
# GH#38980 groupby agg on extension type fails for non-numeric types
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
expected = df.iloc[[0, 2, 4]]
expected = expected.set_index("A")
result = df.groupby("A").agg({"B": "first"})
self.assert_frame_equal(result, expected)
result = df.groupby("A").agg("first")
self.assert_frame_equal(result, expected)
result = df.groupby("A").first()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
expected = pd.Series([1, 3], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1], "B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3], name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
df.groupby("B").apply(groupby_apply_op)
df.groupby("B").A.apply(groupby_apply_op)
df.groupby("A").apply(groupby_apply_op)
df.groupby("A").B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
],
index=pd.Index([1, 2, 3], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1],
}
)
result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
else:
expected = pd.Index(["C"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("min_count", [0, 10])
def test_groupby_sum_mincount(self, data_for_grouping, min_count):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("A").sum(min_count=min_count)
if min_count == 0:
expected = pd.DataFrame(
{"B": pd.array([3, 0, 0], dtype="Int64")},
index=pd.Index([1, 2, 3], name="A"),
)
tm.assert_frame_equal(result, expected)
else:
expected = pd.DataFrame(
{"B": pd.array([pd.NA] * 3, dtype="Int64")},
index=pd.Index([1, 2, 3], name="A"),
)
tm.assert_frame_equal(result, expected)
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
# override parent function to cast to bool for min/max
if np.isnan(expected):
expected = pd.NA
elif op_name in ("min", "max"):
expected = bool(expected)
tm.assert_almost_equal(result, expected)
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestUnaryOps(base.BaseUnaryOpsTests):
pass
class TestParsing(base.BaseParsingTests):
pass
|
Stuti82/pandas | pandas/tests/groupby/aggregate/test_aggregate.py | <filename>pandas/tests/groupby/aggregate/test_aggregate.py<gh_stars>1-10
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
import datetime
import functools
from functools import partial
import re
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
concat,
)
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.grouper import Grouping
def test_groupby_agg_no_extra_calls():
# GH#31760
df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
gb = df.groupby("key")["value"]
def dummy_func(x):
assert len(x) != 0
return x.sum()
gb.agg(dummy_func)
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby("A")["C"]
msg = "Must produce aggregated value"
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.describe())
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(["A", "B"]).sum()["C"]
tm.assert_series_equal(results, expected)
def test_groupby_aggregation_mixed_dtype():
# GH 6212
expected = DataFrame(
{
"v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
"v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
},
index=MultiIndex.from_tuples(
[
(1, 95),
(1, 99),
(2, 95),
(2, 99),
("big", "damp"),
("blue", "dry"),
("red", "red"),
("red", "wet"),
],
names=["by1", "by2"],
),
)
df = DataFrame(
{
"v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
"v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
"by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
"by2": [
"wet",
"dry",
99,
95,
np.nan,
"damp",
95,
99,
"red",
99,
np.nan,
np.nan,
],
}
)
g = df.groupby(["by1", "by2"])
result = g[["v1", "v2"]].mean()
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_multi_level_column():
# GH 29772
lst = [
[True, True, True, False],
[True, False, np.nan, False],
[True, True, np.nan, False],
[True, True, np.nan, False],
]
df = DataFrame(
data=lst,
columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
)
result = df.groupby(level=1, axis=1).sum()
expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]})
tm.assert_frame_equal(result, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe["A"] * np.nan)
exp_df = DataFrame(
columns=tsframe.columns,
dtype=float,
index=Index([], name="A", dtype=np.float64),
)
tm.assert_frame_equal(grouped.sum(), exp_df)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouping_vector
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(["A", "B"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
)
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped["A"].agg("std")
expected = grouped["A"].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate("var")
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
expected = DataFrame(
{
"A": grouped["A"].var(),
"B": grouped["B"].std(),
"C": grouped["C"].mean(),
"D": grouped["D"].sem(),
}
)
tm.assert_frame_equal(result, expected)
def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):
gb = df.groupby(level=0)
if reduction_func in ("idxmax", "idxmin"):
error = TypeError
msg = "reduction operation '.*' not allowed for this dtype"
else:
error = ValueError
msg = f"Operation {reduction_func} does not support axis=1"
with pytest.raises(error, match=msg):
gb.agg(reduction_func, axis=1)
def test_aggregate_item_by_item(df):
grouped = df.groupby("A")
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == "foo").sum()
bar = (df.A == "bar").sum()
K = len(result.columns)
# GH5782
exp = Series(np.array([foo] * K), index=list("BCD"), name="foo")
tm.assert_series_equal(result.xs("foo"), exp)
exp = Series(np.array([bar] * K), index=list("BCD"), name="bar")
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(["A", "B"])
def func(ser):
if ser.dtype == object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != "C"]
expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
result = df.groupby("A")["C"].agg(funcs)
exp_cols = Index(["mean", "max", "min"])
tm.assert_index_equal(result.columns, exp_cols)
def test_agg_multiple_functions_same_name():
# GH 30880
df = DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_same_name_with_ohlc_present():
# GH 30880
# ohlc expands dimensions, so different test to the above is required.
df = DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = MultiIndex.from_tuples(
[
("A", "ohlc", "open"),
("A", "ohlc", "high"),
("A", "ohlc", "low"),
("A", "ohlc", "close"),
("A", "quantile", "A"),
("A", "quantile", "A"),
]
)
non_ohlc_expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values])
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
# PerformanceWarning is thrown by `assert col in right` in assert_frame_equal
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(result, expected)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [("foo", "mean"), "std"]
ex_funcs = [("foo", "mean"), ("std", "std")]
result = df.groupby("A")["C"].agg(funcs)
expected = df.groupby("A")["C"].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").agg(funcs)
expected = df.groupby("A").agg(ex_funcs)
tm.assert_frame_equal(result, expected)
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby("A")
exmean = grouped.agg({"C": np.mean, "D": np.mean})
exstd = grouped.agg({"C": np.std, "D": np.std})
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = {"C": [np.mean, np.std], "D": [np.mean, np.std]}
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
tm.assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
d = {"C": np.mean, "D": {"foo": np.mean, "bar": np.std}}
grouped.aggregate(d)
# But without renaming, these functions are OK
d = {"C": [np.mean], "D": [foo, bar]}
grouped.aggregate(d)
def test_multi_function_flexible_mix(df):
# GH #1268
grouped = df.groupby("A")
# Expected
d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 1
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 2
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
def test_groupby_agg_coercing_bools():
# issue 14873
dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
gp = dat.groupby("a")
index = Index([1, 2], name="a")
result = gp["b"].aggregate(lambda x: (x != 0).all())
expected = Series([False, True], index=index, name="b")
tm.assert_series_equal(result, expected)
result = gp["c"].aggregate(lambda x: x.isnull().all())
expected = Series([True, False], index=index, name="c")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
lambda x: x.sum(),
lambda x: x.cumsum(),
lambda x: x.transform("sum"),
lambda x: x.transform("cumsum"),
lambda x: x.agg("sum"),
lambda x: x.agg("cumsum"),
],
)
def test_bool_agg_dtype(op):
# GH 7001
# Bool sum aggregations result in int
df = DataFrame({"a": [1, 1], "b": [False, True]})
s = df.set_index("a")["b"]
result = op(df.groupby("a"))["b"].dtype
assert is_integer_dtype(result)
result = op(s.groupby("a")).dtype
assert is_integer_dtype(result)
@pytest.mark.parametrize(
"keys, agg_index",
[
(["a"], Index([1], name="a")),
(["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
],
)
@pytest.mark.parametrize(
"input_dtype", ["bool", "int32", "int64", "float32", "float64"]
)
@pytest.mark.parametrize(
"result_dtype", ["bool", "int32", "int64", "float32", "float64"]
)
@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
def test_callable_result_dtype_frame(
keys, agg_index, input_dtype, result_dtype, method
):
# GH 21240
df = DataFrame({"a": [1], "b": [2], "c": [True]})
df["c"] = df["c"].astype(input_dtype)
op = getattr(df.groupby(keys)[["c"]], method)
result = op(lambda x: x.astype(result_dtype).iloc[0])
expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(
result_dtype
)
if method == "apply":
expected.columns.names = [0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"keys, agg_index",
[
(["a"], Index([1], name="a")),
(["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
],
)
@pytest.mark.parametrize("input", [True, 1, 1.0])
@pytest.mark.parametrize("dtype", [bool, int, float])
@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
def test_callable_result_dtype_series(keys, agg_index, input, dtype, method):
# GH 21240
df = DataFrame({"a": [1], "b": [2], "c": [input]})
op = getattr(df.groupby(keys)["c"], method)
result = op(lambda x: x.astype(dtype).iloc[0])
expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)
tm.assert_series_equal(result, expected)
def test_order_aggregate_multiple_funcs():
# GH 25692
df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
result = res.columns.levels[1]
expected = Index(["sum", "max", "mean", "ohlc", "min"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
def test_uint64_type_handling(dtype, how):
# GH 26310
df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
expected = df.groupby("y").agg({"x": how})
df.x = df.x.astype(dtype)
result = df.groupby("y").agg({"x": how})
result.x = result.x.astype(np.int64)
tm.assert_frame_equal(result, expected, check_exact=True)
def test_func_duplicates_raises():
# GH28426
msg = "Function names"
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").agg(["min", "min"])
@pytest.mark.parametrize(
"index",
[
pd.CategoricalIndex(list("abc")),
pd.interval_range(0, 3),
pd.period_range("2020", periods=3, freq="D"),
MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
],
)
def test_agg_index_has_complex_internals(index):
# GH 31223
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
result = df.groupby("group").agg({"value": Series.nunique})
expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
tm.assert_frame_equal(result, expected)
def test_agg_split_block():
# https://github.com/pandas-dev/pandas/issues/31522
df = DataFrame(
{
"key1": ["<KEY>"],
"key2": ["one", "two", "one", "two", "one"],
"key3": ["three", "three", "three", "six", "six"],
}
)
result = df.groupby("key1").min()
expected = DataFrame(
{"key2": ["one", "one"], "key3": ["six", "six"]},
index=Index(["a", "b"], name="key1"),
)
tm.assert_frame_equal(result, expected)
def test_agg_split_object_part_datetime():
# https://github.com/pandas-dev/pandas/pull/31616
df = DataFrame(
{
"A": pd.date_range("2000", periods=4),
"B": ["a", "b", "c", "d"],
"C": [1, 2, 3, 4],
"D": ["b", "c", "d", "e"],
"E": pd.date_range("2000", periods=4),
"F": [1, 2, 3, 4],
}
).astype(object)
result = df.groupby([0, 0, 0, 0]).min()
expected = DataFrame(
{
"A": [pd.Timestamp("2000")],
"B": ["a"],
"C": [1],
"D": ["b"],
"E": [pd.Timestamp("2000")],
"F": [1],
}
)
tm.assert_frame_equal(result, expected)
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = Series([1, 2, 3, 4])
gr = df.groupby([0, 0, 1, 1])
result = gr.agg(a="sum", b="min")
expected = DataFrame(
{"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=[0, 1]
)
tm.assert_frame_equal(result, expected)
result = gr.agg(b="min", a="sum")
expected = expected[["b", "a"]]
tm.assert_frame_equal(result, expected)
def test_no_args_raises(self):
gr = Series([1, 2]).groupby([0, 1])
with pytest.raises(TypeError, match="Must provide"):
gr.agg()
# but we do allow this
result = gr.agg([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_series_named_agg_duplicates_no_raises(self):
# GH28426
gr = Series([1, 2, 3]).groupby([0, 0, 1])
grouped = gr.agg(a="sum", b="sum")
expected = DataFrame({"a": [3, 3], "b": [3, 3]})
tm.assert_frame_equal(expected, grouped)
def test_mangled(self):
gr = Series([1, 2, 3]).groupby([0, 0, 1])
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
expected = DataFrame({"a": [0, 0], "b": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"inp",
[
pd.NamedAgg(column="anything", aggfunc="min"),
("anything", "min"),
["anything", "min"],
],
)
def test_named_agg_nametuple(self, inp):
# GH34422
s = Series([1, 1, 2, 2, 3, 3, 4, 5])
msg = f"func is expected but received {type(inp).__name__}"
with pytest.raises(TypeError, match=msg):
s.groupby(s.values).agg(a=inp)
class TestNamedAggregationDataFrame:
def test_agg_relabel(self):
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
expected = DataFrame(
{"a_max": [1, 3], "b_max": [6, 8]},
index=Index(["a", "b"], name="group"),
columns=["a_max", "b_max"],
)
tm.assert_frame_equal(result, expected)
# order invariance
p98 = functools.partial(np.percentile, q=98)
result = df.groupby("group").agg(
b_min=("B", "min"),
a_min=("A", min),
a_mean=("A", np.mean),
a_max=("A", "max"),
b_max=("B", "max"),
a_98=("A", p98),
)
expected = DataFrame(
{
"b_min": [5, 7],
"a_min": [0, 2],
"a_mean": [0.5, 2.5],
"a_max": [1, 3],
"b_max": [6, 8],
"a_98": [0.98, 2.98],
},
index=Index(["a", "b"], name="group"),
columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_non_identifier(self):
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(**{"my col": ("A", "max")})
expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
tm.assert_frame_equal(result, expected)
def test_duplicate_no_raises(self):
# GH 28426, if use same input function on same column,
# no error should raise
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
tm.assert_frame_equal(grouped, expected)
quant50 = functools.partial(np.percentile, q=50)
quant70 = functools.partial(np.percentile, q=70)
quant50.__name__ = "quant50"
quant70.__name__ = "quant70"
test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
grouped = test.groupby("col1").agg(
quantile_50=("col2", quant50), quantile_70=("col2", quant70)
)
expected = DataFrame(
{"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
index=Index(["a", "b"], name="col1"),
)
tm.assert_frame_equal(grouped, expected)
def test_agg_relabel_with_level(self):
df = DataFrame(
{"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
)
result = df.groupby(level=0).agg(
aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
)
expected = DataFrame(
{"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_other_raises(self):
df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
grouped = df.groupby("A")
match = "Must provide"
with pytest.raises(TypeError, match=match):
grouped.agg(foo=1)
with pytest.raises(TypeError, match=match):
grouped.agg()
with pytest.raises(TypeError, match=match):
grouped.agg(a=("B", "max"), b=(1, 2, 3))
def test_missing_raises(self):
df = DataFrame({"A": [0, 1], "B": [1, 2]})
match = re.escape("Column(s) ['C'] do not exist")
with pytest.raises(KeyError, match=match):
df.groupby("A").agg(c=("C", "sum"))
def test_agg_namedtuple(self):
df = DataFrame({"A": [0, 1], "B": [1, 2]})
result = df.groupby("A").agg(
b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
)
expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
tm.assert_frame_equal(result, expected)
def test_mangled(self):
df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
[
(
(("y", "A"), "max"),
(("y", "A"), np.min),
(("y", "B"), "mean"),
[1, 3],
[0, 2],
[5.5, 7.5],
),
(
(("y", "A"), lambda x: max(x)),
(("y", "A"), lambda x: 1),
(("y", "B"), "mean"),
[1, 3],
[1, 1],
[5.5, 7.5],
),
(
pd.NamedAgg(("y", "A"), "max"),
pd.NamedAgg(("y", "B"), np.mean),
pd.NamedAgg(("y", "A"), lambda x: 1),
[1, 3],
[5.5, 7.5],
[1, 1],
),
],
)
def test_agg_relabel_multiindex_column(
agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
):
# GH 29422, add tests for multiindex column cases
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
idx = Index(["a", "b"], name=("x", "group"))
result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
expected = DataFrame({"a_max": [1, 3]}, index=idx)
tm.assert_frame_equal(result, expected)
result = df.groupby(("x", "group")).agg(
col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
)
expected = DataFrame(
{"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_multiindex_raises_not_exist():
# GH 29422, add test for raises scenario when aggregate column does not exist
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
with pytest.raises(KeyError, match="do not exist"):
df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
def test_agg_relabel_multiindex_duplicates():
# GH29422, add test for raises scenario when getting duplicates
# GH28426, after this change, duplicates should also work if the relabelling is
# different
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
result = df.groupby(("x", "group")).agg(
a=(("y", "A"), "min"), b=(("y", "A"), "min")
)
idx = Index(["a", "b"], name=("x", "group"))
expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
def test_groupby_aggregate_empty_key(kwargs):
# GH: 32580
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg(kwargs)
expected = DataFrame(
[1, 4],
index=Index([1, 2], dtype="int64", name="a"),
columns=MultiIndex.from_tuples([["c", "min"]]),
)
tm.assert_frame_equal(result, expected)
def test_groupby_aggregate_empty_key_empty_return():
# GH: 32580 Check if everything works, when return is empty
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg({"b": []})
expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped.
df = DataFrame(
{"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
)
grouped = df.groupby("key", as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped. Check if
# multiindex is returned in the right order
df = DataFrame(
{
"key": ["x", "y", "x", "y", "x", "x"],
"key1": ["a", "b", "c", "b", "a", "c"],
"val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
}
)
grouped = df.groupby(["key", "key1"], as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = DataFrame(
{"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
)
def test_multiindex_custom_func(func):
# GH 31777
data = [[1, 4, 2], [5, 7, 1]]
df = DataFrame(data, columns=MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]]))
result = df.groupby(np.array([0, 1])).agg(func)
expected_dict = {
(1, 3): {0: 1.0, 1: 5.0},
(1, 4): {0: 4.0, 1: 7.0},
(2, 3): {0: 2.0, 1: 1.0},
}
expected = DataFrame(expected_dict)
tm.assert_frame_equal(result, expected)
def myfunc(s):
return np.percentile(s, q=0.90)
@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
def test_lambda_named_agg(func):
# see gh-28467
animals = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
result = animals.groupby("kind").agg(
mean_height=("height", "mean"), perc90=("height", func)
)
expected = DataFrame(
[[9.3, 9.1036], [20.0, 6.252]],
columns=["mean_height", "perc90"],
index=Index(["cat", "dog"], name="kind"),
)
tm.assert_frame_equal(result, expected)
def test_aggregate_mixed_types():
# GH 16916
df = DataFrame(
data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
)
df["grouping"] = ["group 1", "group 1", 2]
result = df.groupby("grouping").aggregate(lambda x: x.tolist())
expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
expected = DataFrame(
expected_data,
index=Index([2, "group 1"], dtype="object", name="grouping"),
columns=Index(["X", "Y", "Z"], dtype="object"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented;see GH 31256")
def test_aggregate_udf_na_extension_type():
# https://github.com/pandas-dev/pandas/pull/31359
# This is currently failing to cast back to Int64Dtype.
# The presence of the NA causes two problems
# 1. NA is not an instance of Int64Dtype.type (numpy.int64)
# 2. The presence of an NA forces object type, so the non-NA values is
# a Python int rather than a NumPy int64. Python ints aren't
# instances of numpy.int64.
def aggfunc(x):
if all(x > 2):
return 1
else:
return pd.NA
df = DataFrame({"A": pd.array([1, 2, 3])})
result = df.groupby([1, 1, 2]).agg(aggfunc)
expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_column(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
expected = Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_frame(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a"), func)()
idx = pd.Int64Index([1, 2], name="a")
expected = DataFrame({"b": periods}, index=idx)
tm.assert_frame_equal(result, expected)
class TestLambdaMangling:
def test_basic(self):
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
expected = DataFrame(
{("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
index=Index([0, 1], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_mangle_series_groupby(self):
gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
result = gr.agg([lambda x: 0, lambda x: 1])
expected = DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
def test_with_kwargs(self):
f1 = lambda x, y, b=1: x.sum() + y + b
f2 = lambda x, y, b=2: x.sum() + y * b
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
tm.assert_frame_equal(result, expected)
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
def test_agg_with_one_lambda(self):
# GH 25719, write tests for DataFrameGroupby.agg with only one lambda
df = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = ["height_sqr_min", "height_max", "weight_max"]
expected = DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
},
index=Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check pd.NameAgg case
result1 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
)
tm.assert_frame_equal(result1, expected)
# check agg(key=(col, aggfunc)) case
result2 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
)
tm.assert_frame_equal(result2, expected)
def test_agg_multiple_lambda(self):
# GH25719, test for DataFrameGroupby.agg with multiple lambdas
# with mixed aggfunc
df = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = [
"height_sqr_min",
"height_max",
"weight_max",
"height_max_2",
"weight_min",
]
expected = DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
"height_max_2": [9.5, 34.0],
"weight_min": [7.9, 7.5],
},
index=Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check agg(key=(col, aggfunc)) case
result1 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
height_max_2=("height", lambda x: np.max(x)),
weight_min=("weight", lambda x: np.min(x)),
)
tm.assert_frame_equal(result1, expected)
# check pd.NamedAgg case
result2 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
)
tm.assert_frame_equal(result2, expected)
def test_groupby_get_by_index():
# GH 33439
df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
({"nr": "min"}, {"nr": [1, 5]}),
],
)
def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
# test single aggregations on ordered categorical cols GHGH27800
# create the result dataframe
input_df = DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
expected_df = DataFrame(data=exp_data, index=cat_index)
if "cat_ord" in expected_df:
# ordered categorical columns should be preserved
dtype = input_df["cat_ord"].dtype
expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)
tm.assert_frame_equal(result_df, expected_df)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
],
)
def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
# test combined aggregations on ordered categorical cols GH27800
# create the result dataframe
input_df = DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
# unpack the grp_col_dict to create the multi-index tuple
# this tuple will be used to create the expected dataframe index
multi_index_list = []
for k, v in grp_col_dict.items():
if isinstance(v, list):
for value in v:
multi_index_list.append([k, value])
else:
multi_index_list.append([k, v])
multi_index = MultiIndex.from_tuples(tuple(multi_index_list))
expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
for col in expected_df.columns:
if isinstance(col, tuple) and "cat_ord" in col:
# ordered categorical should be preserved
expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)
tm.assert_frame_equal(result_df, expected_df)
def test_nonagg_agg():
# GH 35490 - Single/Multiple agg of non-agg function give same results
# TODO: agg should raise for functions that don't aggregate
df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
g = df.groupby("a")
result = g.agg(["cumsum"])
result.columns = result.columns.droplevel(-1)
expected = g.agg("cumsum")
tm.assert_frame_equal(result, expected)
def test_agg_no_suffix_index():
# GH36189
df = DataFrame([[4, 9]] * 3, columns=["A", "B"])
result = df.agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = DataFrame(
{"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]
)
tm.assert_frame_equal(result, expected)
# test Series case
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
def test_aggregate_datetime_objects():
# https://github.com/pandas-dev/pandas/issues/36003
# ensure we don't raise an error but keep object dtype for out-of-bounds
# datetimes
df = DataFrame(
{
"A": ["X", "Y"],
"B": [
datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
],
}
)
result = df.groupby("A").B.max()
expected = df.set_index("A")["B"]
tm.assert_series_equal(result, expected)
def test_aggregate_numeric_object_dtype():
# https://github.com/pandas-dev/pandas/issues/39329
# simplified case: multiple object columns where one is all-NaN
# -> gets split as the all-NaN is inferred as float
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}
).set_index("key")
tm.assert_frame_equal(result, expected)
# same but with numbers
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}
).set_index("key")
tm.assert_frame_equal(result, expected)
def test_groupby_index_object_dtype():
# GH 40014
df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})
df.index = df.index.astype("O")
grouped = df.groupby(["c0", "c1"])
res = grouped.p.agg(lambda x: all(x > 0))
# Check that providing a user-defined function in agg()
# produces the correct index shape when using an object-typed index.
expected_index = MultiIndex.from_tuples(
[("x", "x"), ("x", "y")], names=("c0", "c1")
)
expected = Series([False, True], index=expected_index, name="p")
tm.assert_series_equal(res, expected)
|
nwy140/algosand | ch2.py | # Super power of ordered arrays
n = 101
binarray = range(n)
# 4 primary operations in an array algo
# Read
# Search
# Insert
# Delete
# This is the most basic code, but will be updated
# with algorithms while we play along.
# Read
def aread(arr, i):
return arr[i]
# Search
def asearch(arr, s):
# TODO, bin_search ;-)
for i, a in enumerate(arr):
if a == s:
return aread(arr, i)
# Insert
def ainsert(arr, i):
# TODO ;-)
# Gonna do this the hard way -- shift right at first
# We'll make it better later.
raise NotImplemented
# Delete
def adelete(arr, i)
# TODO ;-)
# Gonna do this the hard way -- shift right at first
# then remove and shift left.
# We'll make it better later.
raise NotImplemented
|
asa-leholland/SudokuSolver | consoleSudoku.py | # filename: consoleSudokuUI.py
# author: <NAME>
# Import random to randomly select which puzzle is chosen
import random
# import regex to match validity for user-provided input
import re
# import json to read and handle the Sudoku puzzle library (which is stored in a .json format)
import json
# perform a local import to load the main sudoku verification and solving functions
import main as main_sudoku
def welcome():
"""
Welcomes the user to Console Sudoku.
"""
print("""
Welcome to Console Sudoku!""")
def describe_sudoku():
"""
Outputs the general rules of Sudoku when the User selects the relevant option on the Console Sudoku Main Menu.
"""
print('The goal of Sudoku is to completely fill a 9×9 grid with digits. When filled with valid digits, every column, every row, and every 3×3 subgrid that compose the game board contain each digit from 1 to 9. The Sudoku player is provided a grid which is partially complete (some cells are left blank), and the Sudoku player aims to fill in the grid with valid digits so that the puzzle can be completed. If enough blanks are present when the board is first presented, it is possible for multiple valid solutions to exist for a single board, but a good instance of a Sudoku puzzle has only one single valid solution.')
return True
def generate_solution(board):
"""
Provided a Sudoku board, check to see if there is a possible solution for that board, and if there is, generate and display it for the user.
"""
# Notify the user what is being done
print('\nChecking for a solution...')
# convert the provided board into a duplicate so no changes are made to the original board
board_copy = list(board)
# Check if the board is solvable
if main_sudoku.solve_sudoku(board_to_solve=board_copy):
# If the board is solvable, let the user know that is is solvable and disply the valid solution generated by the solving algorithm
print('\nOne valid solution to this Sudoku puzzle is as follows:\n')
main_sudoku.npdisplay(board_copy)
# If the board is not solvable, let the user know.
else:
print('\nThere is no valid solution for this puzzle.')
def user_solve(puzzle_for_user_to_solve, backup):
"""
Provided two copies of a sudoku board, allow the user to make changes to one.
"""
# duplicate the provided user board to ensure no changes are made to the original
user_board = list(puzzle_for_user_to_solve)
# display the current user Sudoku board
main_sudoku.npdisplay(user_board)
# start a flag indicating the user is currently accessing the puzzle
running = True
while running:
# Before each step taken by the user, display the in-game menu options
print(f"""
You are currently solving this puzzle.
Please enter one of the following options:
1. Place a number in an empty cell of this Sudoku board.
2. Submit this Sudoku board and verify if this puzzle has been solved.
3. Reset this Sudoku board. Warning: All progress will be lost!
4. Close this Sudoku board, and return to the Puzzle Selection Menu for this puzzle.
Or enter 'Q' to quit Console Sudoku.
""")
# prompt the user for their input
user_selection = input('Enter your selection: ')
# handle user requst to quit the game
if user_selection == 'Q':
return False
# in all other cases...
else:
# First, validate the user input
validated_input = get_valid_int(provided_input=user_selection)
# If the user's input was invalid, prompt the user and return to the running loop
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
# If the user's input was a valid integer but not a valid option, prompt the user with the mistake.
elif validated_input not in {1, 2, 3, 4}:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
# Allow the user to set the number in a cell
elif validated_input == 1:
# prompt user with instruction
user_input = input("Please enter the Row, Column, Value you would like to place: ")
# Check if the user input is valid
is_valid_input = bool(re.match(r"^[0-8], [0-8], [1-9]$", user_input))
if not is_valid_input:
print("""Invalid entry. To enter 2 in Row 3, Column 4, enter '2, 3, 4'.""")
else:
# Parse the user provided input
[user_row, user_column, user_value] = [int(digit) for digit in user_input.split(', ')]
# Check if the user input would change an original cell
if user_board[user_row][user_column] != 0:
print("""Invalid placement. You can't change cells from the original provided puzzle.""")
# If the user input will not change the default puzzle values, place the value and output the change made to the user's board
else:
user_board[user_row][user_column] = user_value
print(f"""Valid placement. You have placed a {user_value} in Row {user_row}, Column {user_column}.""")
# Regardless of whethter the input was valid or invalid, display the updated board
main_sudoku.npdisplay(user_board)
# Allow the user to submit a solution
elif validated_input == 2:
# prompt the user that the validation is occuring
print("You have submitted the current board. Checking if the current board is a valid solution...")
# duplicate the board and run the validation algoroithm on the duplicate to confirm whether it is valid or not
copied_board = list(user_board)
submission_result = main_sudoku.validate_user_submission(user_board=copied_board)
# Output each line of the result of the validation check
for line in submission_result:
print(line)
# Output the board
main_sudoku.npdisplay(user_board)
# Allow the user to reset the board
elif validated_input == 3:
print("You have reset this Sudoku board.")
user_board = list(backup)
main_sudoku.npdisplay(user_board)
# Allow the user to quit, returning them to puzzle selection
elif validated_input == 4:
print("Returning to the Puzzle Selection Menu for this puzzle.")
return True
def load_sudoku_board(board_to_load):
"""
Loads a selected sudoku board into the Puzzle Selection Menu, allowing the user to solve the puzzle or generate a solution.
"""
# duplicate the board so changes are kept locally
copy_of_board_to_load = list(board_to_load)
second_copy = list(board_to_load)
# display the loaded board
main_sudoku.npdisplay(copy_of_board_to_load)
# start tracking running
running = True
while running:
# Output the Puzzle Selection options
print(f"""
This is the Puzzle Selection Menu.
Please enter one of the following options:
1. Attempt to solve this puzzle.
2. Generate and view a valid solution to this puzzle.
3. Return to the Main Menu.
Or enter 'Q' to quit Console Sudoku.
""")
# obtain the user input
user_selection = input('Enter your selection: ')
# allow the user to quit Console Sudoku
if user_selection == 'Q':
return False
# In all other cases
else:
# confirm that the user provided valid input
validated_input = get_valid_int(provided_input=user_selection)
# if the user did not provided valid input, let them know
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
# If the user provided valid input but not a valid selection, let them know
elif validated_input not in {1, 2, 3}:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
# Allow user to attempt to solve the puzzle
elif validated_input == 1:
user_solve(puzzle_for_user_to_solve=copy_of_board_to_load, backup=second_copy)
# Allow user to view a solution
elif validated_input == 2:
duplicate = list(board_to_load)
generate_solution(board=duplicate)
# Allow user to return to puzzle selection
elif validated_input == 3:
print("Returning to the Main Menu of Console Sudoku.")
return True
else:
running = valid_selections[validated_input](copy_of_board_to_load)
def load_random_sudoku():
"""
Loads a random sudoku puzzle from the puzzle libary (json file)
"""
# open the json file that stores the Sudoku puzzles
with open(f"puzzles.json") as json_file:
data = json.load(json_file)
# select a random puzzle
random_puzzle = random.choice(data)
# Identify the puzzle name and source
name = random_puzzle["puzzle_name"]
source = random_puzzle["source"]
# notify the user that a puzzle has been selected and output the name and source of the Sudoku puzzle
selection_prompt = [
f"You have selected a Sudoku puzzle called '{name}'.",
f"This source of this puzzle is {source}."
]
for line in selection_prompt:
print(line)
# create a local copy of the puzzle that the user can load
copy_of_selection = list(random_puzzle["puzzle"])
# return the loaded sudoku board
return load_sudoku_board(board_to_load=copy_of_selection)
# Modified from source: https://pynative.com/python-check-user-input-is-number-or-string
def get_valid_int(provided_input):
"""
Short function to validate the provided user input
"""
# Attempt to convert the provided input to an integer and return that integer
try:
val = int(provided_input)
return val
# If a value error occurs, the input is either a float or a string
except ValueError:
# attempt to convert the input to a float and return that
try:
val = float(provided_input)
return val
# if that doesn't work, return None (the input is not valid)
except ValueError:
return None
def present_options():
"""
Present user with options to select from on the Console Sudoku Main Menu
"""
# create a dictionary to store the options
valid_selections = {}
# Allow user to read a description about Sudoku
option_1 = 'Read a short description about the rules of Sudoku.'
valid_selections[1] = describe_sudoku
# Allow user to open a random sudoku puzzle from library
option_2 = 'Load a random Sudoku puzzle from the Puzzle Library.'
valid_selections[2] = load_random_sudoku
# start running the Comsole Sudoku Main Menu
running = True
while running:
# Present the user with their options
print(f"""
This is the Console Sudoku Main Menu.
Please enter one of the following options:
1. {option_1}
2. {option_2}
Or enter 'Q' to quit Console Sudoku.
""")
# allow the user to provide input
user_selection = input('Enter your selection: ')
# Allow the user to exit console sudoku
if user_selection == 'Q':
running = False
# in all other cases
else:
# validate the user input
validated_input = get_valid_int(provided_input=user_selection)
# if the user input is not valid, prompt the user accordingly
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
# if the input is valid but not a legal option, let the user know.
elif validated_input not in valid_selections:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
# otherwise, run the selected option
else:
running = valid_selections[validated_input]()
print('Thank you for using Console Sudoku.')
# when run as a script, run the console sudoku interface
if __name__ == '__main__':
welcome()
present_options()
|
asa-leholland/SudokuSolver | backupConsoleUI.py | # filename: consoleSudokuUI.py
# author: <NAME>
import random
import time
import re
import main as main_sudoku
import json
def welcome():
"""
Welcomes the user to Console Sudoku.
"""
print("""
Welcome to Console Sudoku!""")
def describe_sudoku():
print('The goal of Sudoku is to completely fill a 9×9 grid with digits. When filled with valid digits, every column, every row, and every 3×3 subgrid that compose the game board contain each digit from 1 to 9. The Sudoku player is provided a grid which is partially complete (some cells are left blank), and the Sudoku player aims to fill in the grid with valid digits so that the puzzle can be completed. If enough blanks are present when the board is first presented, it is possible for multiple valid solutions to exist for a single board, but a good instance of a Sudoku puzzle has only one single valid solution.')
time.sleep(3)
return True
def generate_solution(board):
print('\nChecking for a solution...')
time.sleep(2)
board_copy = list(board)
if main_sudoku.solve_sudoku(board_to_solve=board_copy):
print('\nOne valid solution to this Sudoku puzzle is as follows:\n')
main_sudoku.npdisplay(board_copy)
else:
print('\nThere is no valid solution for this puzzle.')
time.sleep(1)
def user_solve(puzzle_for_user_to_solve, backup):
time.sleep(1)
user_board = list(puzzle_for_user_to_solve)
main_sudoku.npdisplay(user_board)
running = True
while running:
print(f"""
You are currently solving this puzzle.
Please enter one of the following options:
1. Place a number in an empty cell of this Sudoku board.
2. Submit this Sudoku board and verify if this puzzle has been solved.
3. Reset this Sudoku board. Warning: All progress will be lost!
4. Close this Sudoku board, and return to the Puzzle Selection Menu for this puzzle.
Or enter 'Q' to quit Console Sudoku.
""")
user_selection = input('Enter your selection: ')
if user_selection == 'Q':
return False
else:
validated_input = get_valid_int(provided_input=user_selection)
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
elif validated_input not in {1, 2, 3, 4}:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
# Allow the user to set the number in a cell
elif validated_input == 1:
user_input = input("Please enter the Row, Column, Value you would like to place: ")
time.sleep(1)
# Check if the user input is valid
is_valid_input = bool(re.match(r"^[0-8], [0-8], [1-9]$", user_input))
if not is_valid_input:
print("""Invalid entry. To enter 2 in Row 3, Column 4, enter '2, 3, 4'.""")
else:
# Parse the user provided input
[user_row, user_column, user_value] = [int(digit) for digit in user_input.split(', ')]
# Check if the user input would change an original cell
if user_board[user_row][user_column] != 0:
print("""Invalid placement. You can't change cells from the original provided puzzle.""")
else:
user_board[user_row][user_column] = user_value
print(f"""Valid placement. You have placed a {user_value} in Row {user_row}, Column {user_column}.""")
main_sudoku.npdisplay(user_board)
# Allow the user to submit a solution
elif validated_input == 2:
print("You have submitted the current board. Checking if the current board is a valid solution...")
time.sleep(1)
copied_board = list(user_board)
submission_result = main_sudoku.validate_user_submission(user_board=copied_board)
for line in submission_result:
time.sleep(0.5)
print(line)
main_sudoku.npdisplay(user_board)
# Allow the user to reset the board
elif validated_input == 3:
print("You have reset this Sudoku board.")
time.sleep(1)
user_board = list(backup)
main_sudoku.npdisplay(user_board)
# Allow the user to quit, returning them to puzzle selection
elif validated_input == 4:
print("Returning to the Puzzle Selection Menu for this puzzle.")
time.sleep(1)
return True
def load_sudoku_board(board_to_load):
time.sleep(1)
copy_of_board_to_load = list(board_to_load)
second_copy = list(board_to_load)
main_sudoku.npdisplay(copy_of_board_to_load)
valid_selections = {}
option_1 = '.'
valid_selections[1] = user_solve
# # Allow user to confirm the puzzle is valid
# option_2 = 'Confirm this is a valid sudoku puzzle (confirm there is at least one possible solution).'
# valid_selections[2] = confirm_solvable
option_2 = ''
valid_selections[2] = generate_solution
option_3 = ''
valid_selections[3] = None
running = True
while running:
print(f"""
This is the Puzzle Selection Menu.
Please enter one of the following options:
1. Attempt to solve this puzzle.
2. Generate and view a valid solution to this puzzle.
3. Return to the Main Menu.
Or enter 'Q' to quit Console Sudoku.
""")
user_selection = input('Enter your selection: ')
if user_selection == 'Q':
return False
else:
validated_input = get_valid_int(provided_input=user_selection)
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
elif validated_input not in valid_selections:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
# Allow user to attempt to solve the puzzle
elif validated_input == 1:
user_solve(puzzle_for_user_to_solve=copy_of_board_to_load, backup=second_copy)
# Allow user to view a solution
elif validated_input == 2:
duplicate = list(board_to_load)
generate_solution(board=duplicate)
# Allow user to return to puzzle selection
elif validated_input == 3:
print("Returning to the Main Menu of Console Sudoku.")
time.sleep(1)
return True
else:
running = valid_selections[validated_input](copy_of_board_to_load)
def load_random_sudoku():
with open(f"puzzles.json") as json_file:
data = json.load(json_file)
random_puzzle = random.choice(data)
name = random_puzzle["puzzle_name"]
source = random_puzzle["source"]
selection_prompt = [
f"You have selected a Sudoku puzzle called '{name}'.",
f"This source of this puzzle is {source}."
]
for line in selection_prompt:
time.sleep(0.5)
print(line)
copy_of_selection = list(random_puzzle["puzzle"])
return load_sudoku_board(board_to_load=copy_of_selection)
# Modified from source: https://pynative.com/python-check-user-input-is-number-or-string
def get_valid_int(provided_input):
try:
# Convert it into integer
val = int(provided_input)
return val
except ValueError:
try:
# Convert it into float
val = float(provided_input)
return val
except ValueError:
return None
def present_options():
"""
Present user with options
"""
valid_selections = {}
# Allow user to read a description about Sudoku
option_1 = 'Read a short description about Sudoku.'
valid_selections[1] = describe_sudoku
# Allow user to open a random sudoku puzzle from library
option_2 = 'Load a random Sudoku puzzle from the Puzzle Library.'
valid_selections[2] = load_random_sudoku
# # Allow user to view all sudoku puzzles in library
# option_3 = 'Select a specific Sudoku puzzle from the Puzzle Library.'
# valid_selections[1] = describe_sudoku
# # Allow user to add a new sudoku puzzle to library
# option_4 = 'Add a new Sudoku puzzle to Puzzle Library.'
# valid_selections[1] = describe_sudoku
# # Allow user to delete existing sudoku puzzle from library
# option_5 = 'Delete an existing Sudoku puzzle from the Puzzle Library.'
# valid_selections[1] = describe_sudoku
# 3. {option_3}
# 4. {option_4}
# 5. {option_5}
running = True
while running:
print(f"""
This is the Console Sudoku Main Menu.
Please enter one of the following options:
1. {option_1}
2. {option_2}
Or enter 'Q' to quit Console Sudoku.
""")
user_selection = input('Enter your selection: ')
if user_selection == 'Q':
running = False
else:
validated_input = get_valid_int(provided_input=user_selection)
if validated_input is None:
print("Error, invalid input. Please enter a number selecting one of the provided options or enter 'Q' to quit Console Sudoku.")
elif validated_input not in valid_selections:
print("Error, invalid selection. Please enter one of the provided options or enter 'Q' to quit Console Sudoku.")
else:
running = valid_selections[validated_input]()
print('Thank you for using Console Sudoku.')
# when run as a script, run the console sudoku interface
if __name__ == '__main__':
welcome()
present_options()
|
asa-leholland/SudokuSolver | main.py | # main.py
# Author: <NAME>
def npdisplay(board):
"""
Function to dislay the provded board to the user through a series of print statements to the console.
"""
# Output the column header and establish the column borders
print()
column_header = ' Columns '
print(column_header)
column_header = ' | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8'
print(column_header)
spacer = ' | | | | | | | | | '
print(spacer)
row_separator = ' -----|---|---|---|---|---|---|---|---|---'
print(row_separator)
# Iterate over each row in the board
for index, row in enumerate(board):
# at the middle row, add a 'Rows' label on the left
if index == 4:
print(' Rows ', end='')
# On all other rows, add empty space to buffer the Sudoku board off the edge of the console
else:
print(' ', end='')
# Add the row number
print(f' {str(index)} ', end='|')
# For each digit placed in the provided board, display it
for digit in row:
# If the digit is zero, display an empty square instead
if digit == 0:
print(' ', end=' |')
# Otherwise fill the cell with the content from the provided board
else:
print(' ' + str(digit) + ' ', end='|')
# At the end of each row, add a row of dashed lines to separate each row
print('\n' + row_separator)
def is_placement_possible(y, x, n, board):
"""
Returns whether or not a number can be placed at a provided coordinate
This function uses the following format: board[y][x] == is_placement_possible(n)?
:param x: the provided x coordinate (the column number from left)
:param y: the provided y coordinate (the row number from the top)
:param n: the integer for which to test placement in that particular location
:param board: the current sudoku board for which to test on
"""
# Iterate over all possible indices for the rows
for i in range(0, 9):
# First check each cell in the provided row to see if the provided number is already present
if board[y][i] == n:
# If the provided number is found in the row, then it is not possible to place
return False
# Then check each cell in the provided column to see if the provided number is already present
if board[i][x] == n:
# If the provided number is found in the column, then it is not possible to place
return False
# Check within the current subsquare
# First, determine which third of the rows the provided coordinates fall into
subsquare_x = (x//3)*3 # For example, if x=8, (x//3)*3 = 6, which is the first index of the 3rd horizontal subsquare of the board
subsquare_y = (y//3)*3 # For example, if y=2, (y//3)*3 = 0, which is the first index of the 1st vertical subsquare of the board
# Once we know which subsquare we are placing into, we can iterate over each cell in that subsquare.
# This is done by adding 0, 1 or 2 to the first index of both the vertical and horizontal coordinates of that subsquare
for i in range(3):
for j in range(3):
if board[subsquare_y+i][subsquare_x+j] == n:
# If a cell within the subsquare contains the provided number, then it is not possible to place
return False
# If we have reached here, then the provided number n is not present in the row, column, or subsquare.
# Return True, since it is possible to place the number at these coordinates
return True
def find_empty_location(board, coordinates):
"""
Finds the next empty location on the Sudoku Board
:param board: 9x9 array of arrays representing a sudoku board
:param coordinates: temp coordinates of a row and column value that will be mutated to store the coordinates of the empty location
:return: True if empty location is found, False if no remaining cells are found.
"""
# iterate over each row (y values)
for y_i in range(9):
# iterate over each column (x values)
for x_j in range(9):
# check each cell at the provided indices and see if it is 0 (or not solved yet)
if board[y_i][x_j] == 0:
# if a digit has not been placed in a cell, save the coordinates of that cell and return True
# mutate the provided coordinates
coordinates[0]= y_i
coordinates[1]= x_j
return True
# If there are no remaining empty cells, we return False
return False
def solve_sudoku(board_to_solve):
"""
Provided an unfinished sudoku puzzle, attempt to solve the puzzle by using recursion and backtracking
:param board_to_solve: a 9x9 array of arrays representing a Sudoku puzzle board
:return: True if a valid solution is found or False if no valid solution was found
"""
copied_board = board_to_solve.copy()
# define the current coordinates of the Sudoku cell we are examining.
# Start in the upper left corner
current_coords = [0, 0]
# Check if all cells have been filled
if not find_empty_location(board=copied_board, coordinates=current_coords):
# If so, we have solved the puzzle. Return True
return True
# Assign row and column values using the provided coordinates
[row, col] = current_coords
# If the cell is not solved yet, attempt to solve it using each possible digit
for digit in range(1, 10):
# Check if it is possible to place the digit there
if is_placement_possible(y=row, x=col, n=digit, board=copied_board):
# If the digit is possible to place, place it
copied_board[row][col] = digit
# Then, check to see if placing this final digit solves the puzzle.
if solve_sudoku(board_to_solve=copied_board):
# If so, return True stating we have found the solution
return True
# Otherwise, take away the placed digit and continue the iteration
copied_board[row][col] = 0
# If a dead end is reached (no possible digits are placeable), then it means
# that we we made a mistake earlier in our placements
# (in which case, returning False will backtrack and we can replace the value with a 0)
# OR there is no valid solution to this puzzle (it is impossible)
return False
def is_valid_sudoku(board_to_test):
"""
Provided a sudoku board, determine if it is a valid, complete solution.
:param board_to_test: a 9x9 array representing a sudoku board for which we want to determine validity
:return: A dictionary containing the following key value pairs:
'is_valid': True if the provided board is valid or False if the board is not valid
'invalid_row': integer of the index of the first invalid row found
'invalid_column': integer of the index of the first invalid column found
'invalid_digit': the invalid digit placed at the invalid row and column
"""
# iterate over rows (y values)
for row in range(9):
# iterate over columns (x values)
for col in range(9):
# First, check that the cell at the current row and column is not empty
if board_to_test[row][col] == 0:
# If the cell is not filled, then the provided board is not complete. Therefore it is not valid.
return {'is_valid': False, 'invalid_row': row, 'invalid_column': col, 'invalid_digit': 0}
# Then, check the value of the cell.
# Remove the value of the cell and store it as a temp variable.
temp = board_to_test[row][col]
board_to_test[row][col] = 0
# Then attempt to put the same value back in the board
if not is_placement_possible(y=row, x=col, n=temp, board=board_to_test):
# If it is not possible to place this value in the board, then the provided board is not valid
return {'is_valid': False, 'invalid_row': row, 'invalid_column': col, 'invalid_digit': temp}
# If it was valid, replace the number and continue searching
board_to_test[row][col] = temp
# If all cells are valid, then we can return True.
# Return an empty coordinate set
return {'is_valid': True, 'invalid_row': None, 'invalid_column': None, 'invalid_digit': None}
def confirm_puzzle_is_solvable(puzzle):
"""
Solves and validates a provided puzzle
"""
# To test the puzzle's capacity to be solvable, we need to make a copy of the provided puzzle board.
# Since it is a list of lists, we need to zip together all sublists into a dictionary, then seprate them back out using the map(list) function.
# We finally turn the result into a list to ensure that this remains a copy instead of referencing the same original puzzle
temp_board = list(puzzle)
# If the problem is not possible to be solved, then we can return to the user that the puzzle cannot be solved.
if not solve_sudoku(board_to_solve=list(temp_board)):
print("This particular Sudoku puzzle cannot be solved.")
return
# As long as the puzzle can be solved, perform a validation to see if it is correct.
validation = is_valid_sudoku(board_to_test=temp_board)
# If the validation succeeds, then we can confirm there is at least one valid solution.
if validation['is_valid']:
print("At least one valid solution for the provided puzzle exists.")
# If the validation is unsuccessful, it means that there is likely an error in the algorithm. In this case there should be a valid solution.
else:
print("Error! The provided puzzle is possible to solve, but a valid solution was not found.")
def validate_user_submission(user_board):
"""
Solves and validates a user-provided board
"""
# Duplicate the provided board so no changes are made to the user board
temp = user_board.copy()
# Perform the validation check
validation = is_valid_sudoku(board_to_test=temp)
# If the validation determines the board to be valid, return a string telling the user that they have solved the puzzle.
if validation['is_valid']:
return ["Solved! Nice work."]
# If the validation check does not determine the board is valid let the user know and display which cell (column, row) and contained value is invalid.
else:
return ["The provided submission is not valid.",
f"The digit in Row {validation['invalid_row']}, Column {validation['invalid_column']} cannot be {validation['invalid_digit']}."]
if __name__ == '__main__':
fill_in_puzzle = [
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 1,2,3, 0,0,0],
[0,0,0, 4,0,6, 0,0,0],
[0,0,0, 7,8,9, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0]
]
print(solve_sudoku(board_to_solve=fill_in_puzzle))
print(fill_in_puzzle) |
asa-leholland/SudokuSolver | test.py | # test.py
# import numpy to handle array board
import numpy as np
# source: https://www.geeksforgeeks.org/print-colors-python-terminal/
def prRed(text): print("\033[91m {}\033[00m" .format(text))
def prGreen(text): print("\033[92m {}\033[00m" .format(text))
def prYellow(text): print("\033[93m {}\033[00m" .format(text))
def prLightPurple(text): print("\033[94m {}\033[00m" .format(text))
def prPurple(text): print("\033[95m {}\033[00m" .format(text))
def prPurpleStarter(text): print("\033[95m {}\033[00m" .format(text), end='')
def prCyan(text): print("\033[96m {}\033[00m" .format(text))
def prLightGray(text): print("\033[97m {}\033[00m" .format(text))
def prBlack(text): print("\033[98m {}\033[00m" .format(text))
def npdisplay(board):
"""
Function to convert the provided puzzle to a numpy array, which is output as a square board
"""
# 0 1 2 3 4 5 6 7 8 (purple)
# 0 1 2 3 4 5 6 7 8 9 (purple first char, then white background with black text)
column_header = ' 0 1 2 3 4 5 6 7 8'
prPurple(column_header)
for index, row in enumerate(board):
print(str(index), end='')
row_chars = ''
for digit in row:
if digit == 0:
print()
else:
row_chars += str(digit) + ' '
print(' ')
print('\n')
print(np.array(board))
fill_in_puzzle = [
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 1,2,3, 0,0,0],
[0,0,0, 4,0,6, 0,0,0],
[0,0,0, 7,8,9, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0]
]
npdisplay(fill_in_puzzle) |
asa-leholland/SudokuSolver | tests.py | <gh_stars>0
# test.py
# Author: <NAME>
import unittest
import main as main_sudoku
# Puzzle Source: https://dingo.sbs.arizona.edu/~sandiway/sudoku/examples.html
puzzle = [
[0,0,0, 2,6,0, 7,0,1],
[6,8,0, 0,7,0, 0,9,0],
[1,9,0, 0,0,4, 5,0,0],
[8,2,0, 1,0,0, 0,4,0],
[0,0,4, 6,0,2, 9,0,0],
[0,5,0, 0,0,3, 0,2,8],
[0,0,9, 3,0,0, 0,7,4],
[0,4,0, 0,5,0, 0,3,6],
[7,0,3, 0,1,8, 0,0,0]
]
# Solution Source: https://dingo.sbs.arizona.edu/~sandiway/sudoku/examples.html
solution = [
[4,3,5, 2,6,9, 7,8,1],
[6,8,2, 5,7,1, 4,9,3],
[1,9,7, 8,3,4, 5,6,2],
[8,2,6, 1,9,5, 3,4,7],
[3,7,4, 6,8,2, 9,1,5],
[9,5,1, 7,4,3, 6,2,8],
[5,1,9, 3,2,6, 8,7,4],
[2,4,8, 9,5,7, 1,3,6],
[7,6,3, 4,1,8, 2,5,9]
]
# Impossible Puzzle
# Source: https://www.sudokudragon.com/unsolvable.htm
impossible_puzzle = [
[5,1,6, 8,4,9, 7,3,2],
[3,0,7, 6,0,5, 0,0,0],
[8,0,9, 7,0,0, 0,5,6],
[1,3,5, 0,6,0, 9,0,7],
[4,7,2, 5,9,1, 0,0,6],
[9,6,8, 3,7,0, 0,5,0],
[2,5,3, 1,8,6, 0,7,4],
[6,8,4, 2,0,7, 5,0,0],
[7,9,1, 0,5,0, 6,0,8]
]
# Blank Puzzle
blank_puzzle = [
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0]
]
class SudokuTestMethods(unittest.TestCase):
def test_valid_placement(self):
# Test that is_placement_possible() returns valid input for a Valid placement
self.assertEqual(main_sudoku.is_placement_possible(y=0, x=0, n=3, board=puzzle), True)
def test_invalid_row_placement(self):
# Test that is_placement_possible() returns valid input for a non-valid placement in the same row
self.assertEqual(main_sudoku.is_placement_possible(y=0, x=0, n=2, board=puzzle), False)
def test_invalid_column_placement(self):
# Test that is_placement_possible() returns valid input for a non-valid placement in the same column
self.assertEqual(main_sudoku.is_placement_possible(y=0, x=0, n=8, board=puzzle), False)
def test_invalid_subsquare_placement(self):
# Test that is_placement_possible() returns valid input for a non-valid placement in the same subsquare
self.assertEqual(main_sudoku.is_placement_possible(y=0, x=0, n=9, board=puzzle), False)
def test_valid_solving(self):
# Test that solve_sudoku() correctly solves a puzzle when compared to the actual solution
attempt = puzzle
main_sudoku.solve_sudoku(board_to_solve=attempt)
self.assertEqual(attempt, solution)
def test_impossible_solving(self):
# Test that an impossible sudokue puzzle correctly returns False when attempted to solve
attempt = impossible_puzzle
self.assertEqual(main_sudoku.solve_sudoku(board_to_solve=attempt), False)
def test_solved_solving(self):
# Test that an already solved sudoku puzzle correctly returns True when attempted to solve
attempt = solution
self.assertEqual(main_sudoku.solve_sudoku(board_to_solve=attempt), True)
def test_blank_solving(self):
# Test that solution can be obtained from a fully blank sudoku board when attempted to solve
attempt = blank_puzzle
self.assertEqual(main_sudoku.solve_sudoku(board_to_solve=attempt), True)
def test_validate_valid_board(self):
# Test that a valid board proudces the correct result
attempt = solution
actual_result = main_sudoku.is_valid_sudoku(board_to_test=attempt)['is_valid']
expected_result = True
self.assertEqual(actual_result, expected_result)
def test_validate_incomplete_board(self):
# Test that an incomplete board produces the correct result
incomplete_solution = [
[4,3,5, 2,6,9, 7,8,1],
[6,8,2, 5,7,1, 4,9,3],
[1,9,7, 8,3,4, 5,6,2],
[8,2,6, 1,9,5, 3,4,7],
[3,7,4, 6,0,2, 9,1,5],
[9,5,1, 7,4,3, 6,2,8],
[5,1,9, 3,2,6, 8,7,4],
[2,4,8, 9,5,7, 1,3,6],
[7,6,3, 4,1,8, 2,5,9]
]
actual_result = main_sudoku.is_valid_sudoku(board_to_test=incomplete_solution)
expected_result = {'is_valid': False, 'invalid_row':4, 'invalid_column':4, 'invalid_digit':0}
self.assertEqual(actual_result, expected_result)
def test_validate_duplicated_number(self):
# Test that a board with an invalid duplicated number produces the correct result
incomplete_solution = [
[4,3,5, 2,6,9, 7,8,1],
[6,8,2, 5,4,1, 4,9,3],
[1,9,7, 8,3,4, 5,6,2],
[8,2,6, 1,9,5, 3,4,7],
[3,7,4, 6,8,2, 9,1,5],
[9,5,1, 7,4,3, 6,2,8],
[5,1,9, 3,2,6, 8,7,4],
[2,4,8, 9,5,7, 1,3,6],
[7,6,3, 4,1,8, 2,5,9]
]
actual_result = main_sudoku.is_valid_sudoku(board_to_test=incomplete_solution)
expected_result = {'is_valid': False, 'invalid_row':1, 'invalid_column':4, 'invalid_digit':4}
self.assertEqual(actual_result, expected_result)
def test_generate_solution(self):
# Generate a solution for an easy puzzle to add to the puzzle library
fill_in_puzzle = [
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 1,2,3, 0,0,0],
[0,0,0, 4,0,6, 0,0,0],
[0,0,0, 7,8,9, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0],
[0,0,0, 0,0,0, 0,0,0]
]
main_sudoku.solve_sudoku(board_to_solve=fill_in_puzzle)
actual_result = main_sudoku.is_valid_sudoku(board_to_test=fill_in_puzzle)
expected_result = {'is_valid': True, 'invalid_row':None, 'invalid_column':None, 'invalid_digit':None}
self.assertEqual(actual_result, expected_result)
# When run as a script, run the test cases
if __name__ == '__main__':
unittest.main()
|
AishikaBanik98/challenges | code.py | <filename>code.py
"""
Load module to filter data on fly
"""
from __future__ import print_function
#import os
#import sys
import pandas as pd
#import numpy as np
#import re
class Load():
"""
Load class is used for exclusive dataframe filtering
which takes `df` a csv file and convert into dataframe
"""
def __init__(self, filename):
self.version = open('VERSION').readlines()[0]
self.df = pd.read_csv(filename)
self.data = """W_A11,2000-02,Moving average,59.66666667,50.92582302,
68.40751031,Injuries,Number,Assault,Validated,Whole
pop,All ages,FatalW_A11,2001-03,Moving average,60,10,
20,30,33,31,12,51.23477459,68.76522541,Injuries,
Number,Assault,Validated,Whole pop,All ages,Fatale
50, 50, 60,pop,All ages,Fatal"""
def get_version(self):
""" Get current `version` of library"""
return self.version
def pick_numbers(self):
"""
From self.data extract all numbers as a list
:eg:
data = "W_A11,2000-02,Moving average,59.66666667,50.92582302,68.40751031,
Injuries,Number,Assault,Validated,Whole pop,All ages,Fatal"
:returns: [59.66666667,50.92582302,68.40751031]
Usage:
======
>> df = Load('data.csv')
>> df.pick_numbers()
>> [1,2,3,4,5,6]
"""
# complete code here
li = []
for i in self.data.split(","):
try:
li.append(float(i))
except ValueError:
pass
return li
def sum_all_numbers(self):
"""
From `self.data` extract all numbers and return the sum of all numbers
:eg:
data = "W_A11,2000-02,Moving average,59.66666667,50.92582302,68.40751031,
Injuries,Number,Assault,Validated,Whole pop,All ages,Fatal"
:returns 179.0
Usage:
======
>> df = Load('data.csv')
>> df.sum_all_numbers()
>> 179.0
"""
#complete the code
li = []
for i in self.data.split(","):
try:
li.append(float(i))
except ValueError:
pass
return sum(li)
def extract_vowels(self):
"""
Return all vowels in the given string `self.data`
:returns [] all vowels as list
Usage:
======
>> df = Load('data.csv')
>> df.extract_vowels()
>> ['A', 'E', 'I', 'O']
"""
#complete the code
li = []
for vowel in 'aeiou':
if vowel in self.data:
li.append(vowel)
return li
def pick_odd_numbers(self):
"""
Take the string from `self.data` and extract all odd numbers and return
list of all odd numbers from the string
:returns: [1, 3, 5]
Usage:
======
>> df = Load('data.csv')
>> df.pick_odd_numbers()
>> [1, 3, 5]
"""
# complete code here
li = []
odd = []
for i in self.data.split(","):
try:
li.append(float(i))
except ValueError:
pass
for i in li:
if i%2 != 0:
odd.append(i)
return odd
def get_mean(self):
"""
Take the string from `self.data` and extract all numbers and return
the mean of extracted list of numbers.
:returns: 50
Usage:
======
>> df = Load('data.csv')
>> df.get_mean()
>> 50
"""
# complete code here
li = []
for i in self.data.split(","):
try:
li.append(float(i))
except ValueError:
pass
mean = sum(li)/len(li)
return mean
def get_all_categorical(self):
"""
Take the pandas dataframe from `self.df` and return all
the columns which are categorical variables
:returns: All categorical.
:rtype: List
Usage:
======
>> df = Load('data.csv')
>> df.get_all_categorical()
>> ['Series_reference', 'Type']
"""
# complete code here
return self.df.drop(columns=self.df._get_numeric_data()).columns
def get_all_continuous(self):
"""
Take the pandas dataframe from `self.df` and return all
the columns which contain categorical variables
:returns: All continuous.
:rtype: List
Usage:
======
>> df = Load('data.csv')
>> df.get_all_continuous()
>> ['Lower_CI', 'Upper_CI', 'Units']
"""
# complete code here
return self.df._get_numeric_data().columns
@classmethod
def addition(cls, x, y):
"""
Take X and Y as input and now return the sum of both
:param x: { first number}
:type x: { integer}
:param y: { secon number }
:type y: { type_description }
Usage:
======
>> df = Load('data.csv')
>> df.addition(10, 20)
>> 30
"""
# complete code here
return x+y
if __name__ == '__main__':
# instantiate the object
df = Load('data.csv')
print(df.addition(10, 20))
print(df.pick_numbers())
print(df.sum_all_numbers())
print(df.extract_vowels())
print(df.pick_odd_numbers())
print(df.get_mean())
print(df.get_all_categorical())
print(df.get_all_continuous())
|
VarunGaikwad-XenStack/Intelligence-Extraction | ETM_CSV/__init__.py | from ETM_CSV.CSV import csv
|
VarunGaikwad-XenStack/Intelligence-Extraction | ETM_CSV/CSV.py | <filename>ETM_CSV/CSV.py
import csv
import pandas as pd
class csv():
def uOpen():
a= input('Enter the name of your input file: ')
global y
y= input("Enter the name of column to split: ")
with open(a,'r') as csv_file:
csv_reader= csv.DictReader(csv_file)
for line in csv_reader:
global b
b=input('Enter new name of csv File: ')
with open(b,'w') as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
for line in csv_reader:
#csv_writer.writerow(line[y].split('/'))
csv_writer.writerow(line[y])
def split():
r=pd.read_csv(b,error_bad_lines=False)
r.columns=[y]
r.dropna(inplace=True)
new = r[y].str.split("/", n = 4, expand = True)
r[1]=new[0]
r[2]=new[1]
r[3]=new[2]
r[4]=new[3]
r[5]=new[4]
r.drop(columns =[y], inplace = True)
new= new.fillna(0)
c=input('Enter the Output file name:')
new.to_csv(c)
|
jainmickey/admin_scraper | scraper.py | <filename>scraper.py
import config
import json
import requests
from bs4 import BeautifulSoup
def get_page_links(bs4_parsed_content):
theaders = bs4_parsed_content.find(id="content-main").find_all("th")
all_links = [head.find("a") for head in theaders]
valid_links = [link for link in all_links
if link and link.get('href')[0] == '/']
return valid_links
def get_parsed_content_from_link(session, headers, link):
response = session.get(config.base_url+link, headers=headers,
cookies=dict(session.cookies))
if response.status_code == 200:
return BeautifulSoup(response.content, "html.parser")
return None
def get_form_data(session, headers, link):
page = get_parsed_content_from_link(session, headers, link)
values = {'id': link.split('/')[-2]}
fields = page.find_all("div", class_="form-row")
for field in fields:
label = field.find("label")
val = field.find(id=label['for'])
values[label.get_text()] = ''
value = ''
if str(val.name) == 'select':
val = val.find('option', selected=True)
if val:
value = {'id': val.get("value"), "name": val.get_text()}
else:
if val:
value = val.get("value") if val.get("value") else val.get_text()
values[label.get_text()] = value
print("Values", values)
return values
def get_all_links_pages(session, headers):
home_page = get_parsed_content_from_link(session, headers, config.admin_url)
all_links = get_page_links(home_page)
page_wise_links = dict()
for link in all_links:
page = get_parsed_content_from_link(session, headers, link.get('href'))
paginator = page.find(id="content-main").find(class_="paginator")
page_wise_links[link.get_text()] = get_page_links(page)
if paginator and paginator.find_all("a"):
try:
num_pages = int(paginator.find_all("a")[-1].get("href").split("p=")[-1])
except ValueError:
num_pages = 0
for page_num in range(1, num_pages + 1):
new_page = get_parsed_content_from_link(session, headers, link.get('href')+'?p={}'.format(page_num))
page_wise_links[link.get_text()].extend(get_page_links(new_page))
for key in page_wise_links.keys():
data = list()
for link in page_wise_links.get(key):
data.append({link.get_text(): get_form_data(session, headers, link.get("href"))})
with open('{}.txt'.format(key), 'w') as outfile:
json.dump(data, outfile)
def get_logged_in_session():
s = requests.Session()
s.get(config.base_url+config.login_url)
token = s.cookies['csrftoken']
login_data = dict(username=config.username, password=<PASSWORD>, csrfmiddlewaretoken=token)
headers = {"X-CSRFToken": token}
s.post(config.base_url+config.login_url, data=login_data, headers=headers, cookies=dict(s.cookies))
return {'session': s, 'headers': headers, 'token': token}
if __name__ == '__main__':
session_data = get_logged_in_session()
get_all_links_pages(session_data.get('session'), session_data.get('headers'))
|
jainmickey/admin_scraper | config.py | username=''
password=''
base_url=''
admin_url=''
login_url=''
username=''
password=''
|
spr-networks/super | api/scripts/iw_dev.py | <filename>api/scripts/iw_dev.py
"""jc - JSON Convert `foo` command output parser
<<Short foo description and caveats>>
Usage (cli):
$ iw list | jc --iw_dev
or
$ jc iw dev
Usage (module):
import jc
result = jc.parse('iw_dev', foo_command_output)
Schema:
[
{
"foo": string,
"bar": boolean,
"baz": integer
}
]
Examples:
$ iw list | jc --iw_dev -p
[]
$ foo | jc --iw_dev -p -r
[]
"""
import jc.utils
import jc.parsers.universal
import re
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'iw dev command parser'
author = 'spr'
author_email = '<EMAIL>'
details = 'parse iw dev output'
compatible = ['linux', 'freebsd']
magic_commands = ['iw dev']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (Dictionary) raw structured data to process
Returns:
Dictionary structured to conform to the schema.
"""
return proc_data
def post_parse(data):
cleandata=data
return _process(cleandata)
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
#jc.utils.input_type_check(data)
raw_output = {}
section = {}
phy = None
kv_section = None
kv_keys = None
nicekey = lambda x: x.lower().replace(' ', '_').replace('-', '_').replace('#', '')
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
if line.startswith('phy#'):
if section:
raw_output[phy] = section
section = {}
phy = nicekey(line)
continue
if line.strip().startswith('Interface'):
iface = line.strip().split(' ')[1]
section[iface] = {}
continue
if line.strip().startswith('multicast TXQ:'):
kv_section = 'multicast_txq'
continue
if kv_section and 'qsz-byt' in line:
kv_keys = list(map(nicekey, line.strip().split('\t')))
continue
if kv_keys and 'qsz-byt' not in line:
kv_values = line.strip().split('\t')
kv_values = filter(lambda x: len(x), kv_values)
kv_values = map(int, kv_values)
section[iface][kv_section] = dict(zip(kv_keys, kv_values))
kv_section = kv_keys = None
continue
if line.strip().find(' '):
split_line = line.strip().split(' ')
section[iface][split_line[0]] = ' '.join(split_line[1:])
continue
if section:
raw_output[phy] = section
return raw_output if raw else _process(raw_output)
|
spr-networks/super | api/scripts/iw_list.py | """jc - JSON Convert `foo` command output parser
<<Short foo description and caveats>>
Usage (cli):
$ iw list | jc --iw_list
or
$ jc iw list
Usage (module):
import jc
result = jc.parse('iw_list', foo_command_output)
Schema:
[
{
"foo": string,
"bar": boolean,
"baz": integer
}
]
Examples:
$ iw list | jc --iw_list -p
[]
$ foo | jc --iw_list -p -r
[]
"""
import jc.utils
import jc.parsers.universal
import re
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'iw list command parser'
author = 'spr'
author_email = '<EMAIL>'
details = 'parse iw list output'
compatible = ['linux', 'freebsd']
magic_commands = ['iw list']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured to conform to the schema.
"""
# convert ints and floats for top-level keys
for item in proc_data:
for key in item:
try:
item[key] = int(item[key])
except (Exception):
try:
item[key] = float(item[key])
except (Exception):
pass
# convert ints and floats for lists
if isinstance(item[key], list):
new_list = []
for list_item in item[key]:
try:
new_list.append(int(list_item))
except (Exception):
try:
new_list.append(float(list_item))
except (Exception):
# list of strings
new_list = item[key]
pass
item[key] = new_list
return proc_data
def post_parse(data):
cleandata=data
return _process(cleandata)
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
#jc.utils.input_type_check(data)
raw_output: List = []
section = {}
subsection_key = None
subsection = None
nicekey = lambda x: re.sub('\(.*', '', x.lower().replace(':', '')).strip().replace(' ', '_').replace('_#_', '_')
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
if line.startswith('Wiphy'):
if section:
if subsection_key and subsection:
section[subsection_key] = subsection
subsection_key = None
subsection = None
raw_output.append(section)
section = {}
split_line = line.split()
section['wiphy'] = split_line[1]
continue
if line.strip().startswith('Band '):
if 'bands' not in section:
section['bands'] = []
section['bands'].append({ 'band': line.strip().strip(':') })
continue
lists = [
'Supported Ciphers',
'Supported interface modes',
'Supported commands',
'software interface modes (can always be added)',
'valid interface combinations',
'HT Capability overrides',
'Supported TX frame types',
'Supported RX frame types',
'Supported extended features',
]
lists_bands = [
'VHT Capabilities',
'VHT RX MCS set',
'VHT TX MCS set',
'Bitrates (non-HT)',
'Frequencies',
'Capabilities'
]
lists = lists + lists_bands
if any(name in line for name in lists):
if subsection and len(subsection):
if isinstance(subsection_key, list):
# append to last band
section[subsection_key[0]][-1][subsection_key[1]] = subsection
else:
section[subsection_key] = subsection
subsection_key = None
subsection = None
subsection = []
# Capabilities: 0x1ff
split = line.split(': ')
if len(split) == 2:
line = split[0]
subsection.append(split[1])
if any(name in line for name in lists_bands):
subsection_key = ['bands', nicekey(line)]
else:
subsection_key = nicekey(line)
continue
# subsection array, could use \t * x as index here
#if line.strip().startswith('* ') and subsection_key:
if subsection_key and len(line.strip()):
subsection.append(line.strip().strip('* '))
continue
# * #{ managed } <= 2048, #{ AP, mesh point } <= 8, #{ P2P-client, P2P-GO } <= 1,
# total <= 2048, #channels <= 1, STA/AP BI must match
# match the `total`-line here
if re.match(r"^\s{4,}", line) and subsection:
subsection[len(subsection)-1] = subsection[len(subsection)-1] + ' ' + line.strip()
continue
if line.strip().startswith('Device supports '):
if 'device_supports' not in section:
section['device_supports'] = []
section['device_supports'].append(line.strip().replace('Device supports ', '').strip('.'))
continue
if re.match(r"^\s+.+", line):
# ignore problematic lines
#if 'Maximum RX AMPDU length' in line:
# continue
split_line = line.split(':', maxsplit=1)
if len(split_line) == 2:
if subsection and len(subsection):
if isinstance(subsection_key, list):
# append to last band
section[subsection_key[0]][-1][subsection_key[1]] = subsection
else:
section[subsection_key] = subsection
subsection_key = None
subsection = None
key = nicekey(split_line[0])
if split_line[1].find('(') < 0:
split_line[1] = split_line[1].replace(')', '')
section[key] = split_line[1].strip()
continue
if section:
raw_output.append(section)
return raw_output if raw else _process(raw_output)
|
tactile-graphics/3d-printing-tools | scad_generator.py | # Copyright 2022 Adventium Labs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cv2
import math
def generateScadText(contours, userxmax, userymax, userzheight, resolution):
print("Scaling to maximum X span " + str(userxmax) + "mm, maximum Y span " + str(userymax) + "mm, resolution " + str(resolution) + "mm, and height " + str(userzheight) + "mm.")
scad_full_text ="fudge = 0.1;"
scad_full_text += "\n"
resolutionScaleFactor = 1
if resolution is not None:
# Resolution is in px per inch
# we need px per mm
pxPerMm = resolution / 25.4
resolutionScaleFactor = 1/pxPerMm
maxX = 0
minX = 0
maxY = 0
minY = 0
for contour in contours:
approx_poly = cv2.approxPolyDP(contour, 0.001 * cv2.arcLength(contour, True),True)
for pnt in approx_poly:
if pnt[0][0] > maxX:
maxX = pnt[0][0]
if pnt[0][0] < minX:
minX = pnt[0][0]
if pnt[0][1] > maxY:
maxY = pnt[0][1]
if pnt[0][1] < minY:
minY = pnt[0][1]
xSpan = maxX - minX
ySpan = maxY - minY
scaleFactor=1
xscaleFactor=1
yscaleFactor=1
if userxmax is not None:
if xSpan > userxmax:
xscaleFactor = userxmax / xSpan
if userymax is not None:
if ySpan > userymax:
yscaleFactor = userymax / ySpan
scaleFactor = min(scaleFactor, xscaleFactor, yscaleFactor, resolutionScaleFactor)
print("Using scale factor " + '{0:.3g}'.format(scaleFactor) + "mm per pixel")
if(scaleFactor == xscaleFactor and userxmax is not None):
print("Scaling model based on -x (--xmax) constraint of " + str(userxmax) + "mm maximum horizontal size.")
if(scaleFactor == yscaleFactor and userymax is not None):
print("Scaling model based on -y (--ymax) constraint of " + str(userymax) + "mm maximum horizontal.")
if(scaleFactor == resolutionScaleFactor and resolution is not None):
print("Scaling model based on requested resolution (-r, --resolution) of " + str(resolution) + "pixels per inch (" + '{0:.3g}'.format((resolution / 25.4)) + " pixels per mm).")
xOffset = (xSpan // 2) * scaleFactor
yOffset = (ySpan // 2) * scaleFactor
scad_modules = []
i = 0
for contour in contours:
approx_poly = cv2.approxPolyDP(contour, 0.001 * cv2.arcLength(contour, True),True)
i += 1
scad_modulename = "poly_path_" + str(i)
scad_modules.append(scad_modulename)
# The approximate polygon can also work for SCAD
scad_moduletext = "module " + scad_modulename + "(h)"
scad_moduletext += "\n{"
scad_moduletext += "\nscale([1,1,1]) union()"
scad_moduletext += "\n{"
scad_moduletext += "linear_extrude(height=h)"
scad_moduletext += "polygon(points=["
first = True
for pnt in approx_poly:
xPnt = (pnt[0][0]*scaleFactor)
xPnt -= xOffset
yPnt = (-1 * pnt[0][1])*scaleFactor
yPnt += yOffset
text = "[" + str(xPnt) + "," + str(yPnt) + "]"
if first:
scad_moduletext = scad_moduletext + "\n" + text
first = False
else:
scad_moduletext = scad_moduletext + ",\n" + text
scad_moduletext = scad_moduletext + "\n]"
scad_moduletext += ");"
scad_moduletext += "}"
scad_moduletext += "}"
scad_full_text += "\n" + scad_moduletext
scad_full_text += "module diagonal_line(h)"
scad_full_text += "\n{"
for module in scad_modules:
scad_full_text += "\n" + module + "(h);"
scad_full_text += "\n}"
scad_full_text += "\ndiagonal_line(" + str(userzheight) + ");\n"
return scad_full_text |
tactile-graphics/3d-printing-tools | braille_text_converter.py | # Copyright 2022 Adventium Labs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from https://en.wikipedia.org/wiki/Braille_ASCII
braille_table = {
"a": "⠁",
"b": "⠃",
"c": "⠉",
"d": "⠙",
"e": "⠑",
"f": "⠋",
"g": "⠛",
"h": "⠓",
"i": "⠊",
"j": "⠚",
"k": "⠅",
"l": "⠇",
"m": "⠍",
"n": "⠝",
"o": "⠕",
"p": "⠏",
"q": "⠟",
"r": "⠗",
"s": "⠎",
"t": "⠞",
"u": "⠥",
"v": "⠧",
"w": "⠺",
"x": "⠭",
"y": "⠽",
"z": "⠵",
" ": "⠀",
"0": "⠴",
"1": "⠂",
"2": "⠆",
"3": "⠒",
"4": "⠲",
"5": "⠢",
"6": "⠖",
"7": "⠶",
"8": "⠦",
"9": "⠔",
":": "⠱",
";": "⠰",
"<": "⠣",
"=": "⠿",
">": "⠜",
"?": "⠹",
"(": "⠷",
")": "⠾",
"*": "⠡",
"+": "⠬",
"-": "⠤"
}
# Convert the provided text to braille, or return an empty string if conversion is not possible
# TODO unit test
def convert(text):
braille = ""
if isinstance(text, str):
lower = text.lower()
for c in lower:
if c in braille_table:
braille = braille + braille_table[c]
return braille
|
tactile-graphics/3d-printing-tools | svg_braille_converter.py | # Copyright 2022 Adventium Labs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import os
from xml.etree import ElementTree as ET
from braille_text_converter import convert
def main():
parser = argparse.ArgumentParser(description="Convert text in a .SVG image to Braille.")
parser.add_argument('-i', '--infile', dest='inputfile', required=True, help='Input file, SVG required.')
parser.add_argument('-f', '--overwrite', dest='overwrite', required=False, action='store_true', help='Overwrite existing file')
parser.add_argument('-d', '--debug', dest='debug', required=False, action='store_true', help='Debug flag to generate additional output.')
args = parser.parse_args()
input_filename = args.inputfile
if not os.path.isfile(input_filename):
print("Input file " + input_filename + " does not exist or is not a file.")
return 1
tree = ET.parse(input_filename)
root = tree.getroot()
# TODO is there a better way to check for text in the SVG namespace?
for elem in root.iter(tag='{http://www.w3.org/2000/svg}text'):
# TODO check if style is already set, if it is update it rather than replace
if elem.text is not None:
if args.debug:
print("Found text with text " + elem.text)
elem.text = convert(elem.text)
elem.set("style", "font-family:'Courier New'")
tspans = elem.findall('{http://www.w3.org/2000/svg}tspan')
for tspan in tspans:
if args.debug:
print("found tspan with text " + tspan.text)
tspan.text = convert(tspan.text)
tspan.set("style", "font-family:'Courier New'")
output_filename = os.path.splitext(input_filename)[0] + "-braille.svg"
if args.overwrite:
output_filename = input_filename
else:
i = 0
while os.path.isfile(output_filename):
i += 1
output_filename = os.path.splitext(input_filename)[0] + "-" + str(i) + "-braille.svg"
if args.debug:
print("Writing output file to " + output_filename)
tree.write(output_filename)
if __name__ == '__main__':
sys.exit(main()); |
RiverHeart/Python | extended_features.py | # Provides useful functions
import os
import sys
import argparse
import subprocess
import signal
# Default arguments
ex_parser = argparse.ArgumentParser()
ex_parser.add_argument("-V", "--version",action="store_true", help="Show version")
ex_parser.add_argument("-q", "--quiet", action="store_true", help="Disable output")
ex_parser.add_argument("--no-color", action="store_true", help="Disable colored output")
ex_parser.add_argument("--dry-run", action="store_true", help="Run without making changes")
ex_parser.add_argument("-v", "--verbose", action="store_true", help="Show verbose output")
ex_parser.add_argument("-d", "--debug", action="store_true", help="Show debug output")
ex_parser.add_argument("--no-warn", action="store_true", help="Disable warnings")
ex_parser.add_argument("--no-error", action="store_true", help="Disable errors")
# Allow the user to define more arguments in the main script.
options, args = ex_parser.parse_known_args()
# Bash Terminal Colors
bcolors = {
"red" : "\033[31m",
"green" : "\033[32m",
"yellow" : "\033[33m",
"blue" : "\033[34m",
"purple" : "\033[35m",
"cyan" : "\033[36m",
"normal" : "\033[0m"
}
# Instead of checking the args on every call, check once
# and if it shouldn't run, define it as a do nothing function.
# Modifying flags during the running of the program will no
# re-enable these functions.
if options.quiet:
def print_color(*args, **kwargs):
pass
elif options.no_color:
def print_color(*args, **kwargs):
for arg in args:
print(arg)
else:
def print_color(*args, color="normal"):
# Get color or default to normal.
color = bcolors.get(color, bcolors["normal"])
for arg in args:
print(color + arg + bcolors["normal"])
# FYI, these functions depend on print_color
if options.verbose:
def print_verbose(*args):
for arg in args:
print_color("VERBOSE: {}".format(arg), color="normal")
else:
def print_verbose(*args):
pass
if options.debug:
def print_debug(*args):
for arg in args:
print_color("DEBUG: {}".format(arg), color="cyan")
else:
def print_debug(*args):
pass
if options.no_warn:
def print_warn(*args):
pass
else:
def print_warn(*args):
for arg in args:
print_color("WARN: {}".format(arg), color="yellow")
if options.no_error:
def print_error(*args):
pass
else:
def print_error(*args):
for arg in args:
print_color("ERROR: {}".format(arg), color="red")
# Version Check
def RequireVersion(version):
if sys.version_info[0] < version:
print_error("Python {} or higher is required.".format(version))
sys.exit(1)
# Root Check
def require_root():
if (os.geteuid() != 0):
print_error("This script must be run as root")
sys.exit(-1)
def require_file(path, type="file", provider="none"):
"""
Checks if a required file is present.
Allows additional information such as filetype (file, deb, csv)
and provider (apt, github)
"""
if not os.path.isfile(path):
print_error("File Required: {}".format(path))
sys.exit(-1)
def require_single_instance(process_name):
"""
Checks if a process of the same name already exists and terminates if one does.
"""
# Restrict results to only python processes
child = subprocess.Popen("""pgrep -lf python |
grep {} |
grep -v grep |
grep -v {}""".format(process_name, os.getpid())
shell=True, stdout=subprocess.PIPE)
child.communicate()[0]
if (child.returncode == 0):
print_warn("Process already running. Terminating.")
sys.exit(-1)
def lock_file(path, message=os.getpid()):
lockfile = "{0}.lock".format(path)
if not os.path.isfile(lockfile):
try:
f = open(lockfile, "w")
f.write(str(message))
f.close()
except OSError as e:
print_error(e)
else:
print_warn("Lockfile already exists")
def unlock_file(path):
lockfile = "{0}.lock".format(path)
is os.path.isfile(lockfile):
try:
os.remove(lockfile)
except OSError as e:
print_error(e)
def test_lockfile(path):
lockfile = "{0}.lock".format(path)
if os.path.isfile(lockfile):
return True
else:
return False
class GracefulKiller:
"""
Ref: https://stackoverflow.com/a/31464349/5339918/
Catches terminations and interrupts that can be tested for
at regular intervals and allow graceful process shutdown.
"""
kill_now = False
warn = False
def __init__(self, warn=False):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.warn = warn
def exit_gracefully(self, signum, frame):
if self.warn:
print_warn("Termination signal caught. Stopping...")
self.kill_now = True
|
BrainInnovation/bvbabel | bvbabel/vtc.py | <reponame>BrainInnovation/bvbabel
"""Read, write, create Brainvoyager VTC file format."""
import struct
import numpy as np
from bvbabel.utils import read_variable_length_string
from bvbabel.utils import write_variable_length_string
# =============================================================================
def read_vtc(filename):
"""Read Brainvoyager VTC file.
Parameters
----------
filename : string
Path to file.
Returns
-------
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
header = dict()
with open(filename, 'rb') as f:
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["File version"] = data
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["Source FMR name"] = data
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["Protocol attached"] = data
if header["Protocol attached"] > 0:
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["Protocol name"] = data
else:
header["Protocol name"] = ""
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["Current protocol index"] = data
data, = struct.unpack('<h', f.read(2))
header["Data type (1:short int, 2:float)"] = data
data, = struct.unpack('<h', f.read(2))
header["Nr time points"] = data
data, = struct.unpack('<h', f.read(2))
header["VTC resolution relative to VMR (1, 2, or 3)"] = data
data, = struct.unpack('<h', f.read(2))
header["XStart"] = data
data, = struct.unpack('<h', f.read(2))
header["XEnd"] = data
data, = struct.unpack('<h', f.read(2))
header["YStart"] = data
data, = struct.unpack('<h', f.read(2))
header["YEnd"] = data
data, = struct.unpack('<h', f.read(2))
header["ZStart"] = data
data, = struct.unpack('<h', f.read(2))
header["ZEnd"] = data
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["L-R convention (0:unknown, 1:radiological, 2:neurological)"] = data
data, = struct.unpack('<B', f.read(1))
header["Reference space (0:unknown, 1:native, 2:ACPC, 3:Tal)"] = data
# Expected binary data: char (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["TR (ms)"] = data
# ---------------------------------------------------------------------
# Read VTC data
# ---------------------------------------------------------------------
# NOTE(Users Guide 2.3): Each data element (intensity value) is
# represented in 2 bytes (unsigned short) or 4 bytes (float) as
# specified in "data type" entry. The data is organized in four loops:
# DimZ
# DimY
# DimX
# DimT
#
# The axes terminology follows the internal BrainVoyager (BV) format.
# The mapping to Talairach axes is as follows:
# BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space
# BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space
# BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space
# Prepare dimensions of VTC data array
VTC_resolution = header["VTC resolution relative to VMR (1, 2, or 3)"]
DimX = (header["XEnd"] - header["XStart"]) // VTC_resolution
DimY = (header["YEnd"] - header["YStart"]) // VTC_resolution
DimZ = (header["ZEnd"] - header["ZStart"]) // VTC_resolution
DimT = header["Nr time points"]
data_img = np.zeros(DimZ * DimY * DimX * DimT)
if header["Data type (1:short int, 2:float)"] == 1:
for i in range(data_img.size):
data_img[i], = struct.unpack('<h', f.read(2))
elif header["Data type (1:short int, 2:float)"] == 2:
for i in range(data_img.size):
data_img[i], = struct.unpack('<f', f.read(4))
else:
raise("Unrecognized VTC data_img type.")
data_img = np.reshape(data_img, (DimZ, DimY, DimX, DimT))
data_img = np.transpose(data_img, (0, 2, 1, 3)) # BV to Tal
data_img = data_img[::-1, ::-1, ::-1, :] # Flip BV axes
return header, data_img
# =============================================================================
def write_vtc(filename, header, data_img):
"""Protocol to write Brainvoyager VTC file.
Parameters
----------
filename : string
Path to file.
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
with open(filename, 'wb') as f:
# Expected binary data: short int (2 bytes)
data = header["File version"]
f.write(struct.pack('<h', data))
# Expected binary data: variable-length string
data = header["Source FMR name"]
write_variable_length_string(f, data)
# Expected binary data: short int (2 bytes)
data = header["Protocol attached"]
f.write(struct.pack('<h', data))
if header["Protocol attached"] > 0:
# Expected binary data: variable-length string
data = header["Protocol name"]
write_variable_length_string(f, data)
# Expected binary data: short int (2 bytes)
data = header["Current protocol index"]
f.write(struct.pack('<h', data))
data = header["Data type (1:short int, 2:float)"]
f.write(struct.pack('<h', data))
data = header["Nr time points"]
f.write(struct.pack('<h', data))
data = header["VTC resolution relative to VMR (1, 2, or 3)"]
f.write(struct.pack('<h', data))
data = header["XStart"]
f.write(struct.pack('<h', data))
data = header["XEnd"]
f.write(struct.pack('<h', data))
data = header["YStart"]
f.write(struct.pack('<h', data))
data = header["YEnd"]
f.write(struct.pack('<h', data))
data = header["ZStart"]
f.write(struct.pack('<h', data))
data = header["ZEnd"]
f.write(struct.pack('<h', data))
# Expected binary data: char (1 byte)
data = header["L-R convention (0:unknown, 1:radiological, 2:neurological)"]
f.write(struct.pack('<B', data))
data = header["Reference space (0:unknown, 1:native, 2:ACPC, 3:Tal)"]
f.write(struct.pack('<B', data))
# Expected binary data: char (4 bytes)
data = header["TR (ms)"]
f.write(struct.pack('<f', data))
# ---------------------------------------------------------------------
# Write VTC data
# ---------------------------------------------------------------------
data_img = data_img[::-1, ::-1, ::-1, :] # Flip BV axes
data_img = np.transpose(data_img, (0, 2, 1, 3)) # Tal to BV
data_img = np.reshape(data_img, data_img.size)
if header["Data type (1:short int, 2:float)"] == 1:
for i in range(data_img.size):
f.write(struct.pack('<h', data_img[i]))
elif header["Data type (1:short int, 2:float)"] == 2:
for i in range(data_img.size):
f.write(struct.pack('<f', data_img[i]))
else:
raise("Unrecognized VTC data_img type.")
def generate_vtc():
"""Generate Brainvoyager VTC file with default values."""
header = dict()
# Expected binary data: short int (2 bytes)
header["File version"] = 3
# Expected binary data: variable-length string
header["Source FMR name"] = ""
# Expected binary data: short int (2 bytes)
header["Protocol attached"] = 0
# if header["Protocol attached"] > 0:
# # Expected binary data: variable-length string
# data = header["Protocol name"]
# write_variable_length_string(f, data)
# Expected binary data: short int (2 bytes)
header["Current protocol index"] = 0
# NOTE: float vtc does not seem to work in BV so I make it short int
header["Data type (1:short int, 2:float)"] = 1
header["Nr time points"] = 10
header["VTC resolution relative to VMR (1, 2, or 3)"] = 1
header["XStart"] = 100
header["XEnd"] = 200
header["YStart"] = 100
header["YEnd"] = 200
header["ZStart"] = 100
header["ZEnd"] = 200
# Expected binary data: char (1 byte)
header["L-R convention (0:unknown, 1:radiological, 2:neurological)"] = 1
header["Reference space (0:unknown, 1:native, 2:ACPC, 3:Tal)"] = 1
# Expected binary data: char (4 bytes)
header["TR (ms)"] = 1
# -------------------------------------------------------------------------
# Create data
dims = [100, 100, 100, 10]
data = np.random.random(np.prod(dims)) * 225 # 225 for BV visualization
data = data.reshape(dims)
data = data.astype(np.short) # NOTE: float vtc does not seem to work in BV
return header, data
|
BrainInnovation/bvbabel | wip/batch_convert-neuroparc.py | <reponame>BrainInnovation/bvbabel<filename>wip/batch_convert-neuroparc.py
"""Convert Nifti images with labels (integers only) to BrainVoyager VOI.
TODO:
[ ] Read labels from csvs
[ ] Read anatomical nifti to adjust voi headers
"""
import os
import csv
import numpy as np
import nibabel as nb
import matplotlib.pyplot as plt
NII_FILE = "/home/faruk/Downloads/neuroparc/atlases/label/Human/Yeo-7_space-MNI152NLin6_res-1x1x1.nii.gz"
COLORMAP = plt.cm.get_cmap('Spectral')
# =============================================================================
# Read nifti data
print("Reading nifti data...")
nii = nb.load(NII_FILE)
data = np.asarray(nii.dataobj)
# Adjust origin
data = np.transpose(data, [2, 1, 0])
data = data[::-1, :, :]
labels = np.unique(data)[1:] # Do not include zeros
nr_voi = len(labels)
print(" Min: {} | Max : {}".format(data.min(), data.max()))
print(" Data type: {}".format(data.dtype))
print(" Nr. labels: {}".format(nr_voi))
print("Writing voi file...")
basename = NII_FILE.split(os.extsep, 1)[0]
voi_file = open(r"{}.voi".format(basename), "w")
voi_file.write("FileVersion: 4\n")
voi_file.write("\n")
voi_file.write("ReferenceSpace: BV\n")
voi_file.write("\n")
voi_file.write("OriginalVMRResolutionX: 1\n")
voi_file.write("OriginalVMRResolutionY: 1\n")
voi_file.write("OriginalVMRResolutionZ: 1\n")
voi_file.write("OriginalVMROffsetX: 0\n")
voi_file.write("OriginalVMROffsetY: 0\n")
voi_file.write("OriginalVMROffsetZ: 0\n")
voi_file.write("OriginalVMRFramingCubeDim: 256\n")
voi_file.write("\n")
voi_file.write("LeftRightConvention: 1\n")
voi_file.write("\n")
voi_file.write("SubjectVOINamingConvention: <VOI>_<SUBJ>\n")
voi_file.write("\n")
voi_file.write("\n")
voi_file.write("NrOfVOIs: {}\n".format(nr_voi))
voi_file.write("\n")
for n, i in enumerate(labels):
label = "Label {}".format(str(i))
color = COLORMAP((n / nr_voi) * 255)
# Find voxel indices
idx_voxels = np.argwhere(data == i)
nr_voxels = idx_voxels.shape[0]
voi_file.write("NameOfVOI: {}\n".format(label))
voi_file.write("ColorOfVOI: {} {} {}\n".format(*color))
voi_file.write("\n")
voi_file.write("NrOfVoxels: {}\n".format(nr_voxels))
for indices in idx_voxels:
voi_file.write("{} {} {}\n".format(*indices))
voi_file.write("\n")
voi_file.close()
print('Finished.')
|
BrainInnovation/bvbabel | wip/read_gii_fsaverage_write_obj.py | <filename>wip/read_gii_fsaverage_write_obj.py
"""Convert Freesurfer *.gii triangular mesh surface to Wavefront .obj file."""
import os
import numpy as np
import nibabel as nb
import bvbabel
import timeit
FILE = "/home/faruk/Documents/temp_bv_fsaverage/white_left.gii"
# -----------------------------------------------------------------------------
# Load data
gii = nb.load(FILE)
basename = FILE.split(os.extsep, 1)[0]
def compute_vertex_normals(verts, faces):
""""Compute vertex normals.
Parameters
----------
verts: 2d numpy array, shape [nvertices, 3]
Coordinates of vertices
faces: 2d numpy array [nfaces, 3]
Vertex indices forming triangles.
Returns
-------
normals: 2d numpy array, shape [nvertices, 3]
Unit vector vertex normals.
Reference
---------
https://sites.google.com/site/dlampetest/python/calculating-normals-of-a-triangle-mesh-using-numpy
"""
def normalize_v3(arr):
"""Normalize a numpy array of 3 component vectors shape=(n, 3)."""
lens = np.sqrt(arr[:, 0]**2. + arr[:, 1]**2. + arr[:, 2]**2.)
arr[:, 0] /= lens
arr[:, 1] /= lens
arr[:, 2] /= lens
return arr
norm = np.zeros(verts.shape, dtype=verts.dtype)
# Create an indexed view into the vertex array
tris = verts[faces]
# Calculate the normals (cross product of the vectors v1-v0 & v2-v0)
n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
# Normalize weights in each normal equally.
n = normalize_v3(n)
# Convert face normals to vertex normals and normalize again
norm[faces[:, 0]] += n
norm[faces[:, 1]] += n
norm[faces[:, 2]] += n
return normalize_v3(norm)
# =============================================================================
# Extract vertices and faces
verts = gii.darrays[0].data
faces = gii.darrays[1].data
faces = faces[:, [0, 2, 1]]
norms = compute_vertex_normals(verts, faces)
nr_verts = verts.shape[0]
nr_faces = faces.shape[0]
# -----------------------------------------------------------------------------
# Save OBJ
basename = FILE.split(os.extsep, 1)[0]
outname = "{}_bvbabel.obj".format(basename)
print("Writing OBJ file...")
bvbabel.obj.write_obj(outname, verts, norms, faces)
print("Finished.")
|
BrainInnovation/bvbabel | wip/read_freesurfer_surface_map_write_smp.py | <gh_stars>1-10
"""Convert Freesurfer *.pct.mgh file into BrainVoyager SMP format."""
import os
import numpy as np
import nibabel as nb
import bvbabel
# NOTE: Full path to `<subjid>/surf/?h.w-g.pct.mgh` file. This file is a
# surface map that has gray to white matter signal intensity ratios for each
# vertex (ref: <https://surfer.nmr.mgh.harvard.edu/fswiki/pctsurfcon>)
FILE = "/home/faruk/Documents/temp_bvbabel_mgh/lh.w-g.pct.mgh"
# -----------------------------------------------------------------------------
# Read Freesurfer `*.w-g.pct.mgh` surface map
mgh = nb.load(FILE)
mgh_data = np.squeeze(np.asarray(mgh.dataobj))
nr_vertices = mgh_data.shape[0]
# Generate dummy SMP file
smp_header, smp_data = bvbabel.smp.generate_smp(nr_vertices=nr_vertices)
# Update some fields with mgh information
smp_header["Map"][0]["Threshold min"] = np.percentile(mgh_data, 5)
smp_header["Map"][0]["Threshold max"] = np.percentile(mgh_data, 95)
# Determine output name
basename = FILE[:-4] # get rid of mgh extension
basename = basename.replace(".", "_")
outname = "{}_bvbabel.smp".format(basename)
# Save SMP file while using the freesurfer MGH data
bvbabel.smp.write_smp(outname, smp_header, mgh_data[:, None])
print('Finished.')
|
BrainInnovation/bvbabel | wip/smp_compute_cortical_magnification.py | """Read Brainvoyager srf & smp files to compute cortical magnification."""
import os
import numpy as np
from copy import copy
import bvbabel
FILE_SRF = "/home/faruk/Documents/test_bvbabel/SRF/surface.srf"
FILE_SMP = "/home/faruk/Documents/test_bvbabel/SRF/maps.smp"
# These values are required to compute vertex-wise distance in mm
VMR_IMAGE_DIMS = 512 # Stands for e.g. 512 x 512 x 512, or 256 x 256 x 256
VMR_VOXEL_DIMS = 0.4 # Stands for e.g 0.4 x 0.4 x 0.4 mm^3 ot 1 x 1 x 1 mm^3
# =============================================================================
# Load files
header_srf, data_srf = bvbabel.srf.read_srf(FILE_SRF)
header_smp, data_smp = bvbabel.smp.read_smp(FILE_SMP)
# Get vertex coordinates (2D numpy array)
vtx = data_srf["vertices"]
# Get vertex neighbors (python list)
nbr = data_srf["vertex neighbors"]
# Get PRF mapping visual field c & y coordinates
print(header_smp["Map"][1]["Name"])
print(header_smp["Map"][2]["Name"])
prf_xy = data_smp[:, 1:3]
# -----------------------------------------------------------------------------
print("Computing cortical magnification factors...")
# Prepare useful variables
nr_vtx = header_srf["Nr vertices"]
map_cmf = np.zeros(nr_vtx)
# Compute cortical magnification for each vertex
for v in range(nr_vtx):
if prf_xy[v, 0] != 0 and prf_xy[v, 1] != 0:
cmf_sum = 0
n_count = 0
for n in nbr[v][1:]: # Loop over neighbor vertices
# Compute vertex to vertex mesh distance
dist_cortex = np.linalg.norm(vtx[v, :] - vtx[n, :])
# Convert vertex to vertex mesh distance to millimeters
dist_cortex *= (VMR_IMAGE_DIMS / 256) * VMR_VOXEL_DIMS
# Compute vertex to vertex PRF xy coordinates distance
dist_vfield = np.linalg.norm(prf_xy[v, :] - prf_xy[n, :])
# Compute cortical magnification factor (CMF)
# NOTE: CMF = "mm of cortical surface" / "degree of visual angle"
if dist_vfield > 0:
cmf_sum += dist_cortex / dist_vfield
n_count += 1
# Normalize cumulative CMF with the number of non-zero neighbours
if cmf_sum > 0:
cmf = cmf_sum / n_count
# Put the vertex-wise average CMF into smp map format
map_cmf[v] = cmf
else:
map_cmf[v] = 0
# -----------------------------------------------------------------------------
# Prepare new SMP map
header_smp["Nr maps"] += 1
header_smp["Map"].append(copy(header_smp["Map"][4]))
header_smp["Map"][-1]["Name"] = "CMF, UseThreshMap: R"
header_smp["Map"][-1]["Threshold min"] = 0.001
header_smp["Map"][-1]["Threshold max"] = 5.
header_smp["Map"][-1]["LUT file"] = "default_v21_inv.olt"
data_smp = np.hstack([data_smp, map_cmf[:, None]])
# Add reciprocal of CMF as it linearly increases with eccentricity
header_smp["Nr maps"] += 1
header_smp["Map"].append(copy(header_smp["Map"][4]))
header_smp["Map"][-1]["Name"] = "CMF reciprocal, UseThreshMap: R"
header_smp["Map"][-1]["Threshold min"] = 0.001
header_smp["Map"][-1]["Threshold max"] = 1.5
header_smp["Map"][-1]["LUT file"] = "default_v21_inv.olt"
map_cmf[map_cmf > 0] = 1 / map_cmf[map_cmf > 0] # Reciprocal of non-zeros
data_smp = np.hstack([data_smp, map_cmf[:, None]])
# Save SMP
basename = FILE_SMP.split(os.extsep, 1)[0]
outname = "{}_bvbabel-CMF.smp".format(basename)
bvbabel.smp.write_smp(outname, header_smp, data_smp)
print("Finished.")
|
BrainInnovation/bvbabel | wip/read_gii_fsaverage_write_srf.py | <filename>wip/read_gii_fsaverage_write_srf.py<gh_stars>0
"""Convert Freesurfer *.gii triangular mesh surface to Wavefront .obj file."""
import os
import numpy as np
import nibabel as nb
import bvbabel
import timeit
FILE = "/home/faruk/Documents/temp_bv_fsaverage/white_left.gii"
# -----------------------------------------------------------------------------
# Load data
gii = nb.load(FILE)
basename = FILE.split(os.extsep, 1)[0]
def compute_vertex_normals(verts, faces):
""""Compute vertex normals.
Parameters
----------
verts: 2d numpy array, shape [nvertices, 3]
Coordinates of vertices
faces: 2d numpy array [nfaces, 3]
Vertex indices forming triangles.
Returns
-------
normals: 2d numpy array, shape [nvertices, 3]
Unit vector vertex normals.
Reference
---------
https://sites.google.com/site/dlampetest/python/calculating-normals-of-a-triangle-mesh-using-numpy
"""
def normalize_v3(arr):
"""Normalize a numpy array of 3 component vectors shape=(n, 3)."""
lens = np.sqrt(arr[:, 0]**2. + arr[:, 1]**2. + arr[:, 2]**2.)
arr[:, 0] /= lens
arr[:, 1] /= lens
arr[:, 2] /= lens
return arr
norm = np.zeros(verts.shape, dtype=verts.dtype)
# Create an indexed view into the vertex array
tris = verts[faces]
# Calculate the normals (cross product of the vectors v1-v0 & v2-v0)
n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
# Normalize weights in each normal equally.
n = normalize_v3(n)
# Convert face normals to vertex normals and normalize again
norm[faces[:, 0]] += n
norm[faces[:, 1]] += n
norm[faces[:, 2]] += n
return normalize_v3(norm)
# =============================================================================
# Extract vertices and faces
print("Converting vertices and faces...")
verts = gii.darrays[0].data
faces = gii.darrays[1].data
nr_verts = verts.shape[0]
nr_faces = faces.shape[0]
# Manipulate coordinates to fit BrainVoyager's format
verts = np.stack((verts[:, 1], verts[:, 2], verts[:, 0]), axis=1)
verts[:, 1] *= -1
# faces = faces[:, [0, 2, 1]] # change winding (BV normals point inward)
norms = compute_vertex_normals(verts, faces)
# center = 127.75;
# range = verts.max() - verts.min()
# mid = np.mean(verts, axis=0)
# verts[:, 0] = verts[:, 0] + center - mid[0];
# verts[:, 1] = verts[:, 1] + center - mid[1];
# verts[:, 2] = verts[:, 2] + center - mid[2];
# -----------------------------------------------------------------------------
# Compute_vertex neighbours
print("Finding vertex neighbors...")
# TODO: Convert this inta a function.
start_time = timeit.default_timer()
nn = []
temp = faces.flatten() # This is done for speeding up argwhere
for i in range(nr_verts): # loop over each vertex
# Find faces that contain a given vertex id
idx_faces = np.argwhere(temp == i)//3
# Reduce to unique vertex ids with consistent winding
# NOTE: If this part is wrong black triangles will appear when Brainvoyager
# updates the vertex normals.
temp_faces = np.squeeze(faces[idx_faces])
nr_neighbors = temp_faces.shape[0]
# Derive ordered edges
ord_edges = np.zeros((nr_neighbors, 3, 2), dtype=int)
for j, t in enumerate(temp_faces):
ord_edges[j, 0, :] = t[0:2]
ord_edges[j, 1, :] = t[1:3]
ord_edges[j, 2, :] = t[2], t[0]
# Remove edges that include the reference
x = ord_edges != i
x = x * x[:, :, ::-1] # Boolean cast
edges = ord_edges[x].reshape((nr_neighbors, 2))
# Step through ordered edges in order
idx_verts = []
idx_verts.append(edges[0, 0])
idx_verts.append(edges[0, 1])
edges_0 = edges[:, 0]
edges_1 = edges[:, 1]
n = 0
while n < nr_neighbors-2:
j = edges_1[edges_0 == idx_verts[-1]][0]
idx_verts.append(j)
n += 1
# Construct nearest neighbour array that starts with nr of neighbours
idx_verts.insert(0, nr_neighbors)
nn.append(idx_verts)
elapsed = timeit.default_timer() - start_time
print(elapsed)
# -----------------------------------------------------------------------------
# Save SRF
print("Writing SRF file...")
header = dict()
header["File version"] = 4
header["Surface type"] = 2
header["Nr vertices"] = nr_verts
header["Nr triangles"] = nr_faces
header["Mesh center X"] = np.mean(verts[:, 0])
header["Mesh center Y"] = np.mean(verts[:, 1])
header["Mesh center Z"] = np.mean(verts[:, 2])
header["Vertex convex curvature R"] = 0.11999999731779099
header["Vertex convex curvature G"] = 0.38999998569488525
header["Vertex convex curvature B"] = 0.6499999761581421
header["Vertex convex curvature A"] = 1.0
header["Vertex concave curvature R"] = 0.05999999865889549
header["Vertex concave curvature G"] = 0.19499999284744263
header["Vertex concave curvature B"] = 0.32499998807907104
header["Vertex concave curvature A"] = 1.0
header["Nr triangle strip elements"] = 0
header["MTC name"] = ''
mesh_data = dict()
mesh_data["vertices"] = verts
mesh_data["vertex normals"] = norms
mesh_data["faces"] = faces
mesh_data["vertex colors"] = np.ones((verts.shape[0]))
mesh_data["vertex neighbors"] = nn
outname = "{}_bvbabel.srf".format(basename)
bvbabel.srf.write_srf(outname, header, mesh_data)
mesh_data["vertex neighbors"]
print("Finished.")
|
BrainInnovation/bvbabel | examples/create_vtc.py | """Create Brainvoyager VTC file."""
import bvbabel
OUTNAME = "/home/faruk/Documents/test_bvbabel/vtc/default_bvbabel.vtc"
# -----------------------------------------------------------------------------
header, data = bvbabel.vtc.generate_vtc()
bvbabel.vtc.write_vtc(OUTNAME, header, data)
print("Finished.")
|
BrainInnovation/bvbabel | wip/read_mgz_write_voi.py | <reponame>BrainInnovation/bvbabel
"""Convert mgz to nifti.
Dependencies
------------
- Freesurfer color look up table text file from:
<https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/AnatomicalROI/FreeSurferColorLUT>
- `aparc+aseg.mgz` file from Freesurfer.
"""
import os
import csv
import numpy as np
import nibabel as nb
NII_FILE = "/home/faruk/Documents/temp_bvbabel_mgh/aseg.mgz"
FREESURFER_LUT = "/home/faruk/Documents/temp_bvbabel_mgh/freesurfer_LUT.txt"
# =============================================================================
# Read freesurfer color look up table into dictionary
print("Reading freesurfer look up table...")
freesurfer_lut = {}
with open(FREESURFER_LUT) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if row: # non-empty list
temp = row[0].split()
if temp[0][0] == "#": # Comment
pass
else:
freesurfer_lut.update({temp[0]: temp[1:]})
# =============================================================================
# Read nifti data
print("Reading nifti data...")
nii = nb.load(NII_FILE)
data = np.asarray(nii.dataobj)
# Adjust origin
data = np.transpose(data, [2, 1, 0])
data = data[::-1, :, :]
labels = np.unique(data)[1:] # Do not include zeros
nr_voi = len(labels)
print(" Min: {} | Max : {}".format(data.min(), data.max()))
print(" Data type: {}".format(data.dtype))
print(" Nr. labels: {}".format(nr_voi))
print("Writing voi file...")
basename = NII_FILE.split(os.extsep, 1)[0]
voi_file = open(r"{}.voi".format(basename), "w")
voi_file.write("FileVersion: 4\n")
voi_file.write("\n")
voi_file.write("ReferenceSpace: BV\n")
voi_file.write("\n")
voi_file.write("OriginalVMRResolutionX: 1\n")
voi_file.write("OriginalVMRResolutionY: 1\n")
voi_file.write("OriginalVMRResolutionZ: 1\n")
voi_file.write("OriginalVMROffsetX: 0\n")
voi_file.write("OriginalVMROffsetY: 0\n")
voi_file.write("OriginalVMROffsetZ: 0\n")
voi_file.write("OriginalVMRFramingCubeDim: 256\n")
voi_file.write("\n")
voi_file.write("LeftRightConvention: 1\n")
voi_file.write("\n")
voi_file.write("SubjectVOINamingConvention: <VOI>_<SUBJ>\n")
voi_file.write("\n")
voi_file.write("\n")
voi_file.write("NrOfVOIs: {}\n".format(nr_voi))
voi_file.write("\n")
for i in labels:
label = freesurfer_lut[str(i)][0]
color = freesurfer_lut[str(i)][1:4]
# Find voxel indices
idx_voxels = np.argwhere(data == i)
nr_voxels = idx_voxels.shape[0]
voi_file.write("NameOfVOI: {}\n".format(label))
voi_file.write("ColorOfVOI: {} {} {}\n".format(*color))
voi_file.write("\n")
voi_file.write("NrOfVoxels: {}\n".format(nr_voxels))
for indices in idx_voxels:
voi_file.write("{} {} {}\n".format(*indices))
voi_file.write("\n")
voi_file.close()
print('Finished.')
|
wrighting/upgrade-assist | src/java_compare.py | import os
import getopt
import sys
import re
import xml.etree.ElementTree as ET
import string
import difflib
import xmlUtil
import copy
import fnmatch
import filecmp
import json
import report
class JavaCompare(object):
nsbeansuri = 'http://www.springframework.org/schema/beans'
nsbeans = '{' + nsbeansuri +'}'
def __init__(self, customPath, reporter):
self.reporter = reporter
self.mappings = {}
# print (os.path.join(customPath, "upgrade-assist.mapping.json"))
self.customPath = customPath
mappingFile = os.path.join(customPath, "upgrade-assist.mapping.json")
if os.path.isfile(mappingFile):
with (open(mappingFile)) as json_data:
self.mappings = json.load(json_data)
def findDef(self, defList, defName):
beanDef = None
if defName in defList:
beanDef = defList[defName]
if not beanDef:
if "beans" in self.mappings and defName in self.mappings["beans"]:
self.reporter.info("Found mapping config")
mappedBean = self.mappings["beans"][defName]
if "reference-bean-id" in mappedBean and mappedBean["reference-bean-id"] in defList:
self.reporter.info("Using " + mappedBean["reference-bean-id"] + " instead of " + defName)
beanDef = defList[mappedBean["reference-bean-id"]]
return beanDef
def compareJava(self, oldPath, newPath, myClass, oldClass, newClass):
self.reporter.info("Custom class is being used:" + myClass)
self.reporter.info("Checking java for:" + oldClass + " " + newClass)
oldSource = ""
for c in self.locateClass(oldClass, oldPath):
oldSource = c
newSource = ""
for c in self.locateClass(newClass, newPath):
newSource = c
if oldSource == "":
self.reporter.error("Cannot find java for class:" + oldClass)
if newSource == "":
self.reporter.error("Cannot find java for class:" + newClass)
try:
if filecmp.cmp(oldSource, newSource):
self.reporter.info("Same java:" + oldSource + " " + newSource)
else:
mySource = ""
for c in self.locateClass(myClass, self.customPath):
mySource = c
if mySource == "":
self.reporter.error("Cannot find java for class:" + myClass)
self.reporter.actionRequired("Different java between versions", mySource, oldSource, newSource)
fromfile = oldSource
tofile = newSource
with open(fromfile) as fromf, open(tofile) as tof:
fromlines, tolines = list(fromf), list(tof)
diff = difflib.context_diff(fromlines, tolines, fromfile=oldSource, tofile=newSource)
sys.stdout.writelines(diff)
except OSError as ose:
print(ose)
def compareBeanDefs(self, oldPath, newPath):
myIdList, myOtherXML = self.collectBeanIds(self.customPath) # print str(myIdList)
oldIdList, oldOtherXML = self.collectBeanIds(oldPath)
# print str(oldIdList)
newIdList, newOtherXML = self.collectBeanIds(newPath)
for beanDef in myIdList:
myDef = self.findDef(myIdList, beanDef)
oldDef = self.findDef(oldIdList, beanDef)
newDef = self.findDef(newIdList, beanDef)
if oldDef and newDef:
self.reporter.info("BeanDef in all versions:" + beanDef)
if self.compareBeans(oldDef['element'],newDef['element'],oldDef['path'],newDef['path']) == 0:
myClass = myDef['element'].get('class')
oldClass = oldDef['element'].get('class')
newClass = newDef['element'].get('class')
if myClass == newClass and newClass == oldClass:
self.reporter.info("No change so can keep same customization for:" + beanDef)
else:
self.compareJava(oldPath, newPath, myClass, oldClass, newClass)
else:
self.reporter.actionRequired("Different bean definition:", myDef['path'], oldDef['path'], newDef['path'])
self.compareBeans(myDef['element'],oldDef['element'],myDef['path'],oldDef['path'])
elif oldDef:
self.reporter.info("BeanDef not in new version:" + beanDef)
self.reporter.info("Custom file:" + myDef['path'])
self.reporter.info("Old version file:" + oldDef['path'])
self.compareBeans(myDef['element'], oldDef['element'],myDef['path'],oldDef['path'])
elif newDef:
self.reporter.info("BeanDef not in old version:" + beanDef)
self.reporter.info("Custom file:" + myDef['path'])
self.reporter.info("New version file:" + newDef['path'])
self.compareBeans(myDef['element'], newDef['element'],myDef['path'],newDef['path'])
else:
self.reporter.info("BeanDef only in custom code:" + beanDef + ":" + myDef['path'])
return myIdList, myOtherXML, oldIdList, oldOtherXML, newIdList, newOtherXML
def compareAspects(self, oldPath, newPath):
if not self.mappings:
return
if 'aspects' not in self.mappings:
print(self.mappings)
return
for mapping in self.mappings['aspects']:
for aClass in self.mappings['aspects'][mapping]['classes']:
self.compareJava(oldPath, newPath, mapping, aClass, aClass)
def parseContextFile(self, tree, xpath, namespaces):
ids = []
for bean in tree.findall(xpath, namespaces):
if 'id' in bean.attrib:
ids.append({ 'id': bean.attrib['id'], 'element': copy.deepcopy(bean)})
elif 'name' in bean.attrib:
ids.append({ 'id': bean.attrib['name'], 'element': copy.deepcopy(bean)})
elif 'class' in bean.attrib:
ids.append({ 'id': bean.attrib['class'], 'element': copy.deepcopy(bean)})
elif 'parent' in bean.attrib:
pass
else:
print("Bean missing Id" + str(ET.tostring(bean)))
return ids
def addBeanToList(self, filePath, idList, bean):
bean['path'] = filePath
if 'id' in bean:
beanId = bean['id']
if beanId in idList:
self.reporter.info("multiple definition for " + beanId + " in " + filePath + " and " + idList[beanId]['path'])
idList[beanId] = copy.deepcopy(bean)
#Special case for post processing - see https://github.com/wrighting/upgrade-assist/issues/1
for props in bean['element'].findall("./property[@name='targetBeanName']"):
beanId = None
if 'value' in props.attrib:
beanId = props.attrib['value']
else:
for value in props.iter('value'):
if value.text:
beanId = value.text
className = None
for props in bean['element'].findall("./property[@name='replacementClassName']"):
if 'value' in props.attrib:
className = props.attrib['value']
else:
for value in props.iter('value'):
if value.text:
className = value.text
if beanId:
idList[beanId] = copy.deepcopy(bean)
if className:
idList[beanId]['element'].set('class', className)
def collectBeansFromFile(self, filePath):
idList = {}
try:
tree = ET.parse(filePath)
except ET.ParseError:
# print "Parse error:" + filePath
return idList
#Needs to be done twice because of the different ways the XML is defined
#There may be a better way...
for bean in self.parseContextFile(tree,self.nsbeans + 'bean', {}):
self.addBeanToList(filePath, idList, bean)
for bean in self.parseContextFile(tree,'bean', {}):
self.addBeanToList(filePath, idList, bean)
return idList
def collectBeanIds(self, startDir):
idList = {}
otherXML = {}
ET.register_namespace('bean',self.nsbeansuri)
for dirName, subdirList, fileList in os.walk(startDir):
if re.match('.*/(target|\.svn|alf_data_dev)/.*', dirName):
continue
if True: #re.match('^(.*/templates/.*)', dirName):
for fileName in fileList:
if re.match('.*\.xml$', fileName):
filePath = os.path.join(dirName,fileName)
beans = self.collectBeansFromFile(filePath)
if beans:
idList.update(beans)
else:
key = filePath.replace(startDir,'')
otherXML[key] = { 'path': filePath }
#print ("No beans in:" + key + ':' + filePath)
# print "Parsing:" + filePath
# print str(idList)
return idList, otherXML
def locateClass(self, className, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
pattern = className.split(".")[-1].rstrip() + '.java'
#for filename in fnmatch.filter(files, pattern):
if pattern in files:
yield os.path.join(path, pattern)
def preDiffProcess(self, inString):
return inString
#Less results but harder to read
# return inString.translate(None, string.whitespace)
def compareBeans(self, bean1, bean2, file1, file2):
cmpResult = xmlUtil.cmp_el(bean1, bean2)
if cmpResult != 0:
myAsString = ET.tostring(bean1, encoding="unicode")
newAsString = ET.tostring(bean2, encoding="unicode")
for line in difflib.context_diff( \
list(map(self.preDiffProcess, myAsString.splitlines(True))),\
list(map(self.preDiffProcess, newAsString.splitlines(True))),\
fromfile=file1, tofile=file2):
sys.stdout.write(line)
print()
else:
self.reporter.info("Bean definitions match")
return cmpResult
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], 'v', ['version'])
except getopt.GetOptError:
sys.exit(1)
customPath = args[0]
oldPath = os.path.join(args[1], args[2])
newPath = os.path.join(args[1], args[3])
jc = JavaCompare(customPath, Report())
jc.compareBeanDefs(oldPath, newPath)
jc.compareAspects(oldPath, newPath)
|
wrighting/upgrade-assist | src/compare_alfresco.py | <gh_stars>1-10
import getopt
import sys
import os
import re
import filecmp
import difflib
import sys
import json
import report
import java_compare
import comparitor
class ScriptChecker():
def collectExtensions(self, startDir, extHomes):
srcFileList = {}
for dirName, subdirList, fileList in os.walk(startDir):
if re.match('.*/(target|\.svn|\.git|alf_data_dev)/.*', dirName):
continue
srcRoot = None
for extHome in extHomes:
match = re.match('(.*/' + extHome + ')(/.*)', dirName)
if match:
srcRoot = match.group(2)
if not srcRoot:
continue
for fileName in fileList:
srcFile = os.path.join(srcRoot, fileName)
if srcFile in srcFileList:
self.reporter.actionRequired("Conflicting customization", "", srcFileList[srcFile]["path"],os.path.join(dirName, fileName))
else:
srcFileList[srcFile] = {
"path": os.path.join(dirName, fileName),
"dir": dirName,
"file": fileName,
"srcRoot": srcRoot
}
return srcFileList
def collectOriginals(self, startDir, customFiles):
srcFileList = {}
for dirName, subdirList, fileList in os.walk(startDir):
if re.match('.*/(target|\.svn|alf_data_dev)/.*', dirName):
continue
found = False
for key, value in customFiles.items():
match = re.match('.*'+value["srcRoot"], dirName)
if match:
found = True
if not found:
continue
for fileName in fileList:
srcFile = os.path.join(value["srcRoot"], fileName)
orig = os.path.join(dirName, fileName)
for custom in customFiles:
if orig.endswith(custom):
srcFileList[custom] = {
"path": orig,
"dir": dirName,
"file": fileName,
}
return srcFileList
def compareFiles(self, customFiles, oldFiles, newFiles):
for customFile in customFiles:
if customFile in oldFiles and customFile in newFiles:
self.reporter.info("file in all versions:" + customFile)
if filecmp.cmp(oldFiles[customFile]['path'], newFiles[customFile]['path']):
self.reporter.info("No change between versions:" + customFile)
else:
msg = "Different file:"
if "bean" in customFiles[customFile]:
msg = "Implementation in bean:" + customFiles[customFile]['bean'] + " defined in " + customFiles[customFile]['beanDef']['path']
self.reporter.actionRequired(msg, customFiles[customFile]['path'], oldFiles[customFile]['path'], newFiles[customFile]['path'])
fromfile = oldFiles[customFile]['path']
tofile = newFiles[customFile]['path']
with open(fromfile) as fromf, open(tofile) as tof:
fromlines, tolines = list(fromf), list(tof)
diff = difflib.context_diff(fromlines, tolines, fromfile=fromfile, tofile=tofile)
sys.stdout.writelines(diff)
elif customFile in oldFiles:
self.reporter.info("file not in new version:" + customFile)
elif customFile in newFiles:
self.reporter.info("file not in old version:" + customFile)
else:
self.reporter.info("file only in custom code:" + customFile + ":" + customFiles[customFile]['path'])
def findDef(self, defList, defName, mapping):
fileDef = None
if defName in defList:
fileDef = defList[defName]
if not fileDef:
if "files" in mapping and defName in mapping["files"]:
self.reporter.info("Found mapping config")
mappedFile = mapping["files"][defName]
if "reference-file" in mappedFile and mappedFile["reference-file"] in defList:
self.reporter.info("Using " + mappedFile["reference-file"] + " instead of " + defName)
fileDef = defList[mappedFile["reference-file"]]
return fileDef
def run(self, customPath, oldPath, newPath):
mappings = {}
mappingsFile = os.path.join(customPath, "upgrade-assist.mapping.json")
if os.path.isfile(mappingsFile):
with (open(mappingsFile)) as json_data:
mappings = json.load(json_data)
self.reporter = report.Report()
extHomes = [ "src/main/amp/config/alfresco/extension", "src/main/resources/alfresco/extension"]
customFiles = self.collectExtensions(customPath, extHomes)
jc = java_compare.JavaCompare(customPath, self.reporter)
self.comparitor = comparitor.Comparitor(self.reporter)
myIdList, myOtherXML, oldIdList, oldOtherXML, newIdList, newOtherXML = jc.compareBeanDefs(oldPath, newPath)
jc.compareAspects(oldPath, newPath)
for bean in myIdList:
beanDef = myIdList[bean]
if not "beans" in mappings:
continue
if bean in mappings["beans"]:
self.reporter.info("Found mapping config")
mapping = mappings["beans"][bean]
if 'files' in mapping:
for mappedFile in mapping['files']:
self.reporter.info("bean " + bean + " script " + mappedFile)
customFiles[mappedFile] = {
"path": mappedFile,
"srcRoot": os.path.dirname(mappedFile),
"bean": bean,
"beanDef": beanDef
}
oldFiles = self.collectOriginals(oldPath, customFiles)
newFiles = self.collectOriginals(newPath, customFiles)
self.compareFiles(customFiles, oldFiles, newFiles)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], 'v', ['version'])
except getopt.GetOptError:
sys.exit(1)
customPath = args[0]
oldPath = os.path.join(args[1], args[2])
newPath = os.path.join(args[1], args[3])
checker = ScriptChecker()
checker.run(customPath, oldPath, newPath)
myXML = {
'repo-web.xml': { 'path': ''},
'share-web.xml': { 'path': ''},
'share-config-custom.xml': { 'path': ''}
}
oldXML = {
'repo-web.xml': { 'path': os.path.join(oldPath,'repo/root/projects/web-client/source/web/WEB-INF/web.xml')},
# 'share-web.xml': { 'path': os.path.join(oldPath,'share/share/src/main/webapp/WEB-INF/web.xml')},
'share-web.xml': { 'path': os.path.join(oldPath,'share/WEB-INF/web.xml')},
'share-config-custom.xml': { 'path': os.path.join(oldPath,'repo/root/packaging/installer/src/main/resources/bitrock/bitrock/alfresco/shared/web-extension/share-config-custom.xml')}
}
newXML = {
'repo-web.xml': { 'path': os.path.join(newPath,'repo/root/projects/web-client/source/web/WEB-INF/web.xml')},
'share-web.xml': { 'path': os.path.join(newPath,'share/share/src/main/webapp/WEB-INF/web.xml')},
'share-config-custom.xml': { 'path': os.path.join(newPath,'repo/root/packaging/installer/src/main/resources/bitrock/bitrock/alfresco/shared/web-extension/share-config-custom.xml')}
}
checker.comparitor.compareXML(myXML, oldXML, newXML)
|
wrighting/upgrade-assist | src/xmlUtil.py | import string
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def cmp_el(a,b):
if a.tag < b.tag:
return -1
elif a.tag > b.tag:
return 1
# print a.tag
# print a.tail
# print a.text
# print b.tail
# print b.text
translator = str.maketrans({key: None for key in string.whitespace})
if a.tail != None and b.tail !=None:
if a.tail.translate(translator) < b.tail.translate(translator):
return -1
elif a.tail.translate(translator) > b.tail.translate(translator):
return 1
if a.text != None and b.text !=None:
if a.text.translate(translator) < b.text.translate(translator):
return -1
elif a.text.translate(translator) > b.text.translate(translator):
return 1
#compare attributes
aitems = list(a.attrib.items())
aitems.sort()
bitems = list(b.attrib.items())
bitems.sort()
if aitems < bitems:
return -1
elif aitems > bitems:
return 1
#compare child nodes
achildren = list(a)
achildren.sort(key=cmp_to_key(cmp_el))
bchildren = list(b)
bchildren.sort(key=cmp_to_key(cmp_el))
for achild, bchild in zip(achildren, bchildren):
cmpval = cmp_el(achild, bchild)
if cmpval < 0:
return -1
elif cmpval > 0:
return 1
#must be equal
return 0
|
wrighting/upgrade-assist | src/report.py | <reponame>wrighting/upgrade-assist<filename>src/report.py
class Report(object):
def actionRequired(self, message, customFile, oldFile, newFile):
print()
print("You need to check: " + customFile)
print(message)
print("Old file:" + oldFile)
print("New file:" + newFile)
print()
def info(self, message):
print (message)
pass
def warning(self, message):
# print message
pass
def error(self, message):
print(message)
pass
|
wrighting/upgrade-assist | src/comparitor.py | <filename>src/comparitor.py
import filecmp
import difflib
import sys
import os
class Comparitor(object):
def __init__(self, reporter):
self.reporter = reporter
def compareXML(self, myOtherXML, oldOtherXML, newOtherXML):
for xmlFile in myOtherXML:
oldPath = oldOtherXML[xmlFile]['path']
newPath = newOtherXML[xmlFile]['path']
if xmlFile in oldOtherXML and os.path.isfile(oldPath) and xmlFile in newOtherXML and os.path.isfile(newPath):
self.reporter.info("XML file in all versions:" + xmlFile)
if filecmp.cmp(oldPath, newPath):
self.reporter.info("No change between versions:" + xmlFile)
else:
self.reporter.actionRequired("Different XML file:", myOtherXML[xmlFile]['path'], oldPath, newPath)
fromfile = oldPath
tofile = newPath
with open(fromfile) as fromf, open(tofile) as tof:
fromlines, tolines = list(fromf), list(tof)
diff = difflib.context_diff(fromlines, tolines, fromfile=fromfile, tofile=tofile)
sys.stdout.writelines(diff)
elif xmlFile in oldOtherXML:
self.reporter.info("XML file not in new version:" + xmlFile)
elif xmlFile in newOtherXML:
self.reporter.info("XML file not in old version:" + xmlFile)
else:
self.reporter.info("XML file only in custom code:" + xmlFile + ":" + myOtherXML[xmlFile]['path'])
|
wrighting/upgrade-assist | src/test/XMLutilTest.py | import unittest
import os
import xmlUtil
import xml.etree.ElementTree as ET
import java_compare
import report
import copy
class XMLutilTest(unittest.TestCase):
def testCompareDifferent(self):
filePath = 'test/bean-diff.xml'
tree = ET.parse(filePath)
bean1 = {}
bean2 = {}
#Always different because of the different id attibutes
for bean in tree.findall("bean[@id='SiteService_security_4.2.f']"):
bean1 = bean
for bean in tree.findall("bean[@id='SiteService_security_5.0.d']"):
bean2 = bean
cmpResult = xmlUtil.cmp_el(bean1, bean2)
self.assertEqual(cmpResult,-1)
def testCompareSame(self):
filePath = 'test/bean-diff.xml'
tree = ET.parse(filePath)
bean1 = {}
bean2 = {}
for bean in tree.findall("bean[@id='SiteService_security_5.0.d']"):
bean1 = bean
for bean in tree.findall("bean[@id='SiteService_security_5.0.d']"):
bean2 = bean
cmpResult = xmlUtil.cmp_el(bean1, bean2)
self.assertEqual(cmpResult,0)
def testCollectAndCompareDifferent(self):
filePath1 = 'test/old/public-services-security-context.xml'
filePath2 = 'test/new/public-services-security-context.xml'
reporter = report.Report()
jc = java_compare.JavaCompare(reporter)
beans1 = jc.collectBeansFromFile(filePath1)
beans2 = jc.collectBeansFromFile(filePath2)
bean1 = beans1['SiteService_security']
bean2 = beans2['SiteService_security']
cmpResult = xmlUtil.cmp_el(bean1['element'], bean2['element'])
print (bean1['path'])
print (ET.tostring(bean1['element']))
print (bean2['path'])
print (ET.tostring(bean2['element']))
self.assertEqual(cmpResult,-1)
if __name__ == '__main__':
unittest.main()
|
ipspace/ansible_helpers | filter_plugins/confparse.py | <reponame>ipspace/ansible_helpers
from __future__ import unicode_literals
from __future__ import print_function
from ciscoconfparse import CiscoConfParse
def confparse_parent(config, parent, child):
"""Returns [match, parent_line, child_line]
Where match is boolean indicating whether a match happened.
parent_line is the parent line that was matched
child_line is the child line that was matched
if match is false, then parent_line will be set, but not child_line.
"""
results = []
try:
# ConfParse requires a list, not a string
config = config.splitlines()
except AttributeError:
pass
try:
# Automatically handle if 'show run' from _command module
config = config['stdout_lines'][0]
except (KeyError, IndexError, TypeError):
pass
cfg_obj = CiscoConfParse(config)
search_results = cfg_obj.find_objects(parent)
for parent_line in search_results:
child_results = parent_line.re_search_children(child)
if child_results:
if len(child_results) > 1:
raise ValueError("Currently only a single child match is supported")
results.append((True, parent_line.text, child_results[0].text))
else:
results.append((False, parent_line.text, None))
return results
class FilterModule(object):
def filters(self):
return {
'confparse_parent': confparse_parent,
}
if __name__ == "__main__":
# Test code
with open("config.txt") as f:
config = f.read()
confparse_parent(config, parent=r"^interface", child=r"switchport access vlan 100")
|
fferen/makeheaders | __main__.py | """
Copyright (c) 2012, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import string
import itertools
import re
import glob
import argparse
from collections import namedtuple
from parsing import utils as putils
# match C comments that signal a possible function afterwards
COMMENT_RE = re.compile(r'^\s*(/\*.+?\*/)\s*$', flags=re.M | re.DOTALL)
class FuncData:
def __init__(self, name, header, endI, cmt=None):
self.name = name
self.header = header
self.endI = endI
self.cmt = cmt
def __repr__(self):
s = '(' + ', '.join((repr(self.name), repr(self.header), repr(self.endI)))
if self.cmt is not None:
s += ', ' + repr(self.cmt)
s += ')'
return s
def printV(*_args):
if args.verbose:
print ' '.join(str(arg) for arg in _args)
def isWS(c):
return c in string.whitespace
def getFuncData(code, i, fromDefn=False):
"""
Return function declaration data starting from i, or None if i does not mark
the start of a function declaration.
If fromDefn is True, i must mark the start of a definition instead, and the
return value is a FuncData object:
(name=<function name>, header=<function header>, endI=<index after closing brace>)
If False, return value looks like this:
(name=<function name>, header=<function header>, endI=<index after semicolon>)
>>> getFuncData('int main();', 0)
('main', 'int main()', 10)
>>> getFuncData('int main() {return 0;}', 0, fromDefn=True)
('main', 'int main()', 22)
>>> print getFuncData('int main();', 0, fromDefn=True)
None
"""
try:
parenStartI, parenEndI = putils.findMatching(code, '(', ')', startI=i)
except ValueError:
return None
header = code[i:parenEndI].strip()
if not header[0].isalpha():
return None
# braces not allowed in function declarations; this distinguishes from
# struct, etc.
if '{' in header or '}' in header:
return None
# check for control flow statements (if, while, etc)
if len(code[i:parenStartI].split()) <= 1:
return None
name = code[i:parenStartI].split()[-1].lstrip('*')
if not fromDefn:
return FuncData(
name=name,
header=header,
endI=code.find(';', parenEndI) + 1
)
# check if next char after parentheses is '{'
if itertools.dropwhile(isWS, code[parenEndI:]).next() != '{':
return None
return FuncData(
name=name,
header=header,
endI=putils.findMatching(code, '{', '}', startI=parenEndI)[1]
)
def makeDecl(fData):
"""Create declaration from fData."""
if fData.cmt:
return fData.cmt + '\n' + fData.header + ';'
else:
return fData.header + ';'
parser = argparse.ArgumentParser(
description='Given C source file(s) as input, creates/updates header files with relevant function declarations.'
)
parser.add_argument(
'files',
metavar='PATTERN',
nargs='+',
help='source files to process'
)
parser.add_argument(
'-q',
'--quiet',
dest='verbose',
default=True,
action='store_false',
help="don't print what it's doing"
)
args = parser.parse_args()
for path in itertools.chain(*(glob.glob(pat) for pat in args.files)):
newPath = path.rsplit('.', 1)[0] + '.h'
code = open(path).read()
printV('opened', path)
# extract function definitions with documentation
defnData = []
startI = 0
while True:
match = COMMENT_RE.search(code, startI)
if not match:
break
cmt = match.group(1)
fData = getFuncData(code, match.end(), fromDefn=True)
if fData is None:
startI = match.end()
else:
fData.cmt = cmt
defnData.append(fData)
startI = fData.endI
# remove local (private) functions, signified by starting with '_'
defnData = [d for d in defnData \
if not d.name.startswith('_') and not d.header.startswith('static')]
# mapping of name to FuncData
nameToDefn = dict((data.name, data) for data in defnData)
printV('read %d function definitions' % len(defnData))
## from pprint import pprint
## pprint(defnData)
## print len(defnData)
changed = False
if os.path.isfile(newPath):
headCode = open(newPath, 'r').read()
printV('opened', newPath)
else:
changed = True
headCode = '#pragma once\n'
startI = 0
while True:
match = COMMENT_RE.search(headCode, startI)
if not match:
break
cmt, decl = match.group(1), getFuncData(headCode, match.end())
if decl is None:
startI = match.end()
continue
if decl.name in nameToDefn:
printV('found', decl.name)
decl.cmt = cmt
defn = nameToDefn[decl.name]
if decl.cmt != defn.cmt or decl.header != defn.header:
changed = True
printV('updating', decl.name)
headCode = '%s%s%s' \
% (headCode[:match.start(1)], makeDecl(defn), headCode[decl.endI:])
del nameToDefn[decl.name]
else:
printV('%s not found in %s, deleting' % (decl.name, path))
headCode = '%s%s' \
% (headCode[:match.start(1)], headCode[decl.endI:])
startI = decl.endI
for defn in nameToDefn.values():
changed = True
printV('adding', defn.name)
headCode += '\n' + makeDecl(defn) + '\n'
if changed:
open(newPath, 'w').write(headCode)
printV('wrote', newPath)
else:
printV('nothing changed')
|
handsomeWeiye/TaobaoSpider | goods_spider.py | import os
import re
import json
import time
import random
import requests
import pandas as pd
from retrying import retry
from taobao_login import TaoBaoLogin
goodsSearchName = '零食'
# 关闭警告
requests.packages.urllib3.disable_warnings()
# 登录与爬取需使用同一个Session对象
req_session = requests.Session()
# 淘宝商品excel文件保存路径
GOODS_EXCEL_PATH = 'taobao_goods_{}.xlsx'.format(goodsSearchName)
class GoodsSpider:
def __init__(self, goodsSearchName):
self.q = goodsSearchName
# 超时
self.timeout = 15
self.goods_list = []
# 淘宝登录
tbl = TaoBaoLogin(req_session)
tbl.login()
@retry(stop_max_attempt_number=3)
def spider_goods(self, page):
"""
:param page: 淘宝分页参数
:return:
"""
s = page * 44
# 搜索链接,q参数表示搜索关键字,s=page*44 数据开始索引
search_url = f'https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q={self.q}&suggest=history_1&_input_charset=utf-8&wq=biyunt&suggest_query=biyunt&source=suggest&bcoffset=4&p4ppushleft=%2C48&s={s}&data-key=s&data-value={s + 44}'
# 代理ip,网上搜一个,猪哥使用的是 站大爷:http://ip.zdaye.com/dayProxy.html
# 尽量使用最新的,可能某些ip不能使用,多试几个。后期可以考虑做一个ip池
# 爬取淘宝ip要求很高,西刺代理免费ip基本都不能用,如果不能爬取就更换代理ip
# proxies = {'http': '172.16.31.10:8082',
# 'http': '192.168.3.11:9000',
# 'http': '192.168.127.12:63000',
# 'http': '192.168.3.11:8118',
# 'http': '192.168.127.12:50144',
# 'http': '192.168.127.12:50043',
# 'http': '172.16.17.32:50134'
# }
# 请求头
headers = {
'referer': 'https://www.taobao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# proxies = proxies,
response = req_session.get(search_url, headers=headers,
verify=False, timeout=self.timeout)
# print(response.text)
goods_match = re.search(r'g_page_config = (.*?)}};', response.text)
# 没有匹配到数据
if not goods_match:
print('提取页面中的数据失败!')
print(response.text)
raise RuntimeError
goods_str = goods_match.group(1) + '}}'
goods_list = self._get_goods_info(goods_str)
self._save_excel(goods_list)
# print(goods_str)
def _get_goods_info(self, goods_str):
"""
解析json数据,并提取标题、价格、商家地址、销量、评价地址
:param goods_str: string格式数据
:return:
"""
goods_json = json.loads(goods_str)
goods_items = goods_json['mods']['itemlist']['data']['auctions']
goods_list = []
for goods_item in goods_items:
goods = {
'goodsId': goods_item["nid"],
'title': goods_item['raw_title'],
'price': goods_item['view_price'],
'location': goods_item['item_loc'],
'sales': goods_item['view_sales'],
"shop_id": goods_item["user_id"],
"shop_name": goods_item["nick"],
"comment_count": goods_item["comment_count"],
'comment_url': goods_item['comment_url']}
goods_list.append(goods)
return goods_list
def _save_excel(self, goods_list):
"""
将json数据生成excel文件
:param goods_list: 商品数据
:param startrow: 数据写入开始行
:return:
"""
# pandas没有对excel没有追加模式,只能先读后写
if os.path.exists(GOODS_EXCEL_PATH):
df = pd.read_excel(GOODS_EXCEL_PATH)
df = df.append(goods_list)
else:
df = pd.DataFrame(goods_list)
writer = pd.ExcelWriter(GOODS_EXCEL_PATH)
# columns参数用于指定生成的excel中列的顺序
df.to_excel(excel_writer=writer, columns=['goodsId','title', 'price', 'location', 'sales', 'shop_id','shop_name',"comment_count",'comment_url'], index=False,
encoding='utf-8', sheet_name='Sheet')
writer.save()
writer.close()
def patch_spider_goods(self):
"""
批量爬取淘宝商品
如果爬取20多页不能爬,可以分段爬取
:return:
"""
# 写入数据前先清空之前的数据
# if os.path.exists(GOODS_EXCEL_PATH):
# os.remove(GOODS_EXCEL_PATH)
# 批量爬取,自己尝试时建议先爬取3页试试
for i in range(0, 100):
print('第%d页' % (i + 1))
self.spider_goods(i)
# 设置一个时间间隔
time.sleep(random.randint(10, 15))
if __name__ == '__main__':
try:
gs = GoodsSpider(goodsSearchName)
gs.patch_spider_goods()
except:
print('遭到淘宝的反爬虫测试,目前爬取的数据已保存,可进行分析')
|
shuntingyard/flowproc | src/flowproc/v9_fieldtypes.py | <filename>src/flowproc/v9_fieldtypes.py
# -*- coding: utf-8 -*-
"""
Code snippet copied from:
https://raw.githubusercontent.com/bitkeks/python-netflow-v9-softflowd/master/src/netflow/collector_v9.py
Copyright 2017, 2018 <NAME> <<EMAIL>>
Licensed under MIT License. See LICENSE.
Noted by shuntingyard:
In resource
https://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html
Cisco states:
For the information on the field types with the numbers
between 128 and 32768, please refer to the IANA registry of
IPFIX information elements at
https://www.iana.org/assignments/ipfix/ipfix.xhtml.
"""
LABEL = {
# Cisco specs for NetFlow v9
# https://tools.ietf.org/html/rfc3954
# https://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html
1: 'IN_BYTES',
2: 'IN_PKTS',
3: 'FLOWS',
4: 'PROTOCOL',
5: 'SRC_TOS',
6: 'TCP_FLAGS',
7: 'L4_SRC_PORT',
8: 'IPV4_SRC_ADDR',
9: 'SRC_MASK',
10: 'INPUT_SNMP',
11: 'L4_DST_PORT',
12: 'IPV4_DST_ADDR',
13: 'DST_MASK',
14: 'OUTPUT_SNMP',
15: 'IPV4_NEXT_HOP',
16: 'SRC_AS',
17: 'DST_AS',
18: 'BGP_IPV4_NEXT_HOP',
19: 'MUL_DST_PKTS',
20: 'MUL_DST_BYTES',
21: 'LAST_SWITCHED',
22: 'FIRST_SWITCHED',
23: 'OUT_BYTES',
24: 'OUT_PKTS',
25: 'MIN_PKT_LNGTH',
26: 'MAX_PKT_LNGTH',
27: 'IPV6_SRC_ADDR',
28: 'IPV6_DST_ADDR',
29: 'IPV6_SRC_MASK',
30: 'IPV6_DST_MASK',
31: 'IPV6_FLOW_LABEL',
32: 'ICMP_TYPE',
33: 'MUL_IGMP_TYPE',
34: 'SAMPLING_INTERVAL',
35: 'SAMPLING_ALGORITHM',
36: 'FLOW_ACTIVE_TIMEOUT',
37: 'FLOW_INACTIVE_TIMEOUT',
38: 'ENGINE_TYPE',
39: 'ENGINE_ID',
40: 'TOTAL_BYTES_EXP',
41: 'TOTAL_PKTS_EXP',
42: 'TOTAL_FLOWS_EXP',
# 43 vendor proprietary
44: 'IPV4_SRC_PREFIX',
45: 'IPV4_DST_PREFIX',
46: 'MPLS_TOP_LABEL_TYPE',
47: 'MPLS_TOP_LABEL_IP_ADDR',
48: 'FLOW_SAMPLER_ID',
49: 'FLOW_SAMPLER_MODE',
50: 'NTERVAL',
# 51 vendor proprietary
52: 'MIN_TTL',
53: 'MAX_TTL',
54: 'IPV4_IDENT',
55: 'DST_TOS',
56: 'IN_SRC_MAC',
57: 'OUT_DST_MAC',
58: 'SRC_VLAN',
59: 'DST_VLAN',
60: 'IP_PROTOCOL_VERSION',
61: 'DIRECTION',
62: 'IPV6_NEXT_HOP',
63: 'BPG_IPV6_NEXT_HOP',
64: 'IPV6_OPTION_HEADERS',
# 65-69 vendor proprietary
70: 'MPLS_LABEL_1',
71: 'MPLS_LABEL_2',
72: 'MPLS_LABEL_3',
73: 'MPLS_LABEL_4',
74: 'MPLS_LABEL_5',
75: 'MPLS_LABEL_6',
76: 'MPLS_LABEL_7',
77: 'MPLS_LABEL_8',
78: 'MPLS_LABEL_9',
79: 'MPLS_LABEL_10',
80: 'IN_DST_MAC',
81: 'OUT_SRC_MAC',
82: 'IF_NAME',
83: 'IF_DESC',
84: 'SAMPLER_NAME',
85: 'IN_PERMANENT_BYTES',
86: 'IN_PERMANENT_PKTS',
# 87 vendor property
88: 'FRAGMENT_OFFSET',
89: 'FORWARDING_STATUS',
90: 'MPLS_PAL_RD',
91: 'MPLS_PREFIX_LEN', # Number of consecutive bits in the MPLS prefix length.
92: 'SRC_TRAFFIC_INDEX', # BGP Policy Accounting Source Traffic Index
93: 'DST_TRAFFIC_INDEX', # BGP Policy Accounting Destination Traffic Index
94: 'APPLICATION_DESCRIPTION', # Application description
95: 'APPLICATION_TAG', # 8 bits of engine ID, followed by n bits of classification
96: 'APPLICATION_NAME', # Name associated with a classification
98: 'postipDiffServCodePoint', # The value of a Differentiated Services Code Point (DSCP) encoded in the Differentiated Services Field, after modification
99: 'replication_factor', # Multicast replication factor
100: 'DEPRECATED', # DEPRECATED
102: 'layer2packetSectionOffset', # Layer 2 packet section offset. Potentially a generic offset
103: 'layer2packetSectionSize', # Layer 2 packet section size. Potentially a generic size
104: 'layer2packetSectionData', # Layer 2 packet section data
# 105-127 reserved for future use by Cisco
# ASA extensions
# https://www.cisco.com/c/en/us/td/docs/security/asa/special/netflow/guide/asa_netflow.html
148: 'NF_F_CONN_ID', # An identifier of a unique flow for the device
176: 'NF_F_ICMP_TYPE', # ICMP type value
177: 'NF_F_ICMP_CODE', # ICMP code value
178: 'NF_F_ICMP_TYPE_IPV6', # ICMP IPv6 type value
179: 'NF_F_ICMP_CODE_IPV6', # ICMP IPv6 code value
225: 'NF_F_XLATE_SRC_ADDR_IPV4', # Post NAT Source IPv4 Address
226: 'NF_F_XLATE_DST_ADDR_IPV4', # Post NAT Destination IPv4 Address
227: 'NF_F_XLATE_SRC_PORT', # Post NATT Source Transport Port
228: 'NF_F_XLATE_DST_PORT', # Post NATT Destination Transport Port
281: 'NF_F_XLATE_SRC_ADDR_IPV6', # Post NAT Source IPv6 Address
282: 'NF_F_XLATE_DST_ADDR_IPV6', # Post NAT Destination IPv6 Address
233: 'NF_F_FW_EVENT', # High-level event code
33002: 'NF_F_FW_EXT_EVENT', # Extended event code
323: 'NF_F_EVENT_TIME_MSEC', # The time that the event occurred, which comes from IPFIX
152: 'NF_F_FLOW_CREATE_TIME_MSEC',
231: 'NF_F_FWD_FLOW_DELTA_BYTES', # The delta number of bytes from source to destination
232: 'NF_F_REV_FLOW_DELTA_BYTES', # The delta number of bytes from destination to source
33000: 'NF_F_INGRESS_ACL_ID', # The input ACL that permitted or denied the flow
33001: 'NF_F_EGRESS_ACL_ID', # The output ACL that permitted or denied a flow
40000: 'NF_F_USERNAME', # AAA username
# PaloAlto PAN-OS 8.0
# https://www.paloaltonetworks.com/documentation/80/pan-os/pan-os/monitoring/netflow-monitoring/netflow-templates
346: 'PANOS_privateEnterpriseNumber',
56701: 'PANOS_APPID',
56702: 'PANOS_USERID'
}
SCOPE_LABEL = {
# https://tools.ietf.org/html/rfc3954#section-6.1
1: "System",
2: "Interface",
3: "Line Card",
4: "Cache",
5: "Template",
}
|
shuntingyard/flowproc | src/flowproc/testasync.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Experimental
"""
import json
import logging
from datetime import datetime
from flowproc import __version__
from flowproc.collector_state import Collector
from flowproc.util import stopwatch
from flowproc.v9_classes import OptionsTemplate
from flowproc.v9_classes import Template
from flowproc import v9_fieldtypes
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# globals
logger = logging.getLogger(__name__)
class TreeVisitor:
"""
Return collector state nested in JSON format
"""
@stopwatch
def visit_Collector(self, host):
collector = {
"flowproc": __version__,
"at": str(datetime.utcnow()), # TODO add timezone info
"parser": "V9 someting...",
# "transport": "UDP",
# "security": None,
}
exp = {}
collector["exporters"] = exp
for child in host.children.values():
exp[child.ipa] = []
exp[child.ipa].append(child.accept(self))
# return as is, pretty-printing to be done on client side
return json.dumps(collector)
def visit_Exporter(self, host):
domain = {}
for child in host.children.values():
attr = {}
domain[child.odid] = attr
attr["options_records"] = [rec for rec in child.optrecs if rec is not None]
attr["templates"] = child.accept(self)
return domain
def visit_ObservationDomain(self, host):
templates = {}
for child in host.children.values():
attr = {}
templates[child.tid] = attr
# Format returned by str( timedelta ) e.g '0:02:41.411545' -
# we just show the first part and discard fractions of seconds.
attr["age"] = str(datetime.utcnow() - child.lastwrite).split(".")[
0
]
if isinstance(child, Template):
attr["types"] = [
v9_fieldtypes.LABEL.get(n, n) for n in child.types
]
if isinstance(child, OptionsTemplate):
attr["scope_types"] = [
v9_fieldtypes.SCOPE_LABEL.get(n, n)
for n in child.scope_types
]
attr["option_types"] = [
v9_fieldtypes.LABEL.get(n, n) for n in child.option_types
]
attr["option_lengths"] = child.option_lengths
return templates
def stats():
"""
Print basic statistics
"""
return """Collector version: {}
Collector started: {}
Packets processed: {:9d}
Headers record count: {:9d}
Records processed: {:9d}
Records diff: {:9d}""".format(
__version__,
Collector.created,
Collector.packets,
Collector.count,
Collector.record_count,
Collector.count - Collector.record_count,
)
|
shuntingyard/flowproc | src/flowproc/v9_parser.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Parser for NetFlow V9 packets
"""
import logging
import struct
from ipaddress import ip_address
from flowproc import util
from flowproc import v9_fieldtypes
from flowproc.collector_state import Collector
from flowproc.v9_classes import OptionsTemplate
from flowproc.v9_classes import Template
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# global settings
logger = logging.getLogger(__name__)
LIM = 1800 # out of sequence tdiff limit for discarding templates
lim = LIM
@util.stopwatch
def parse_options_data_records(ipa, odid, template, flowset):
"""
Responsibility: parse Data FlowSet with Options Data Records
Args:
ipa `str`: ip address of exporter
odid `int`: Observation Domain ID (aka Source ID)
template `OptionsTemplate`
flowset `bytes`: the DataFlowSet
Return:
number of records processed
"""
record_count = 0
# scopes part
scopes = {}
unpacked = []
start = 0
stop = start
for length in template.scope_lengths:
stop += length
unpacked.append(
struct.unpack(util.ffs(length), flowset[start:stop])[0]
)
start = stop
labels = [
v9_fieldtypes.SCOPE_LABEL.get(n, n) for n in template.scope_types
]
scopes.update(list(zip(labels, unpacked)))
# options part
options = {}
unpacked = []
# To fix a few bad field lengths, we work with lengt/type-pairs
pair = list(zip(template.option_lengths, template.option_types))
for length, ftype in pair:
stop += length
try:
unpacked.append(
struct.unpack(
util.ffs(length, ftype=ftype), flowset[start:stop]
)[0]
)
except KeyError:
# remove from 1st trailing \x00 and decode to `str`
unpacked.append(flowset[start:stop].partition(b"\0")[0].decode())
start = stop
labels = [v9_fieldtypes.LABEL.get(n, n) for n in template.option_types]
options.update(list(zip(labels, unpacked)))
# register record with corresponding odid
optrec = scopes.update(options)
Collector.register_optrec(ipa, odid, optrec)
print("OptionsDataRec: {}".format(optrec))
reclen = sum(template.scope_lengths) + sum(template.option_lengths)
record_count = len(flowset) // reclen # divide // to rule out padding
return record_count
@util.stopwatch
def parse_data_flowset(ipa, odid, tid, flowset):
"""
Responsibility: parse Data FlowSet
Args:
ipa `str`: ip address of exporter
odid `int`: Observation Domain ID (aka Source ID)
tid `int`: the setid here IS the tid (aka Template ID)
flowset `bytes`: the DataFlowSet
Return:
number of records processed
"""
record_count = 0
template = Collector.get_qualified(ipa, odid, tid)
if template:
if isinstance(template, OptionsTemplate):
return parse_options_data_records(ipa, odid, template, flowset)
else:
record = {}
unpacked = []
start = 0
stop = start
for length in template.lengths:
stop += length
unpacked.append(util.vunpack(flowset[start:stop]))
start = stop
labels = [v9_fieldtypes.LABEL.get(n, n) for n in template.types]
record.update(list(zip(labels, unpacked)))
# replace ont the fly, just for testing/ plausibility checking
# TODO Remove later!
for k, v in record.items():
if k in [
"IPV4_SRC_ADDR",
"IPV4_DST_ADDR",
"IPV4_NEXT_HOP",
"IPV6_SRC_ADDR",
"IPV6_DST_ADDR",
"IPV6_NEXT_HOP",
]:
record[k] = ip_address(v).exploded
print("DataRec: {}".format(record))
reclen = sum(template.lengths)
record_count = len(flowset) // reclen # divide // to rule out padding
else:
# TODO Stash all these away for later processing!
pass
return record_count
@util.stopwatch
def parse_options_template_flowset(ipa, odid, packed):
"""
Responsibility: parse Options Template FlowSet
Args:
ipa `str`: ip address of exporter
odid `int`: Observation Domain ID (aka Source ID)
packed `bytes`: data to parse
Return:
number of records processed
"""
record_count = 0
start = 0
# If padding, its length is 2, if not, at least 6 bytes are required.
while start < len(packed) - 2:
stop = start + 6
# next Template ID, Option Scope Length and Option Length
tid, scopelen, optionlen = struct.unpack("!HHH", packed[start:stop])
start = stop
# scope data
stop += scopelen
assert scopelen % 4 == 0 # assert before division and cast to `int`
scopes = struct.unpack(
"!" + "HH" * (scopelen // 4), packed[start:stop]
)
start = stop
# option data
stop += optionlen
assert optionlen % 4 == 0 # assert before division and cast to `int`
options = struct.unpack(
"!" + "HH" * (optionlen // 4), packed[start:stop]
)
start = stop
OptionsTemplate(ipa, odid, tid, scopes, options)
record_count += 1
return record_count
@util.stopwatch
def parse_template_flowset(ipa, odid, packed):
"""
Responsibility: parse Template FlowSet
Args:
ipa `str`: ip address of exporter
odid `int`: Observation Domain ID (aka Source ID)
packed `bytes`: data to parse
Return:
number of records processed
"""
record_count = 0
start = 0
while start < len(packed): # simple condition: no padding at end of set
stop = start + 4
# next Template ID and Field Count
tid, fieldcount = struct.unpack("!HH", packed[start:stop])
start = stop
# record data
stop += fieldcount * 4
tdata = struct.unpack("!" + "HH" * fieldcount, packed[start:stop])
start = stop
Template(ipa, odid, tid, tdata)
record_count += 1
return record_count
def dispatch_flowset(ipa, odid, setid, packed):
"""
Responsibility: dispatch FlowSet data to the appropriate parser
Args:
ipa `str`: ip address of exporter
odid `int`: Observation Domain ID (aka Source ID)
setid `int`: the setid here IS the tid (aka Template ID)
packed `bytes`: data to dispatch
Return:
number of records processed
"""
record_count = 0
if setid == 0:
# Template FlowSet
record_count = parse_template_flowset(ipa, odid, packed)
elif setid == 1:
# Options Template FlowSet
record_count = parse_options_template_flowset(ipa, odid, packed)
elif 255 < setid and setid < 65536:
# Data FlowSet
record_count = parse_data_flowset(ipa, odid, setid, packed)
else:
# interval [2, 255]
logger.error(
"No implementation for unknown ID {:3d} - {}".format(setid, packed)
)
return record_count
def parse_packet(datagram, ipa):
"""
Responsibility: parse UDP packet received from NetFlow V9 exporter
Args:
packet `bytes`: next packet to parse
ipa `str` or `int`: ip addr to use for exporter identification
"""
record_count = 0
header = struct.unpack("!HHIIII", datagram[:20])
ver, count, up, unixsecs, seq, odid = header
packed = datagram[20:]
while len(packed) > 0:
# FlowSet header
setid, setlen = struct.unpack("!HH", packed[:4])
# data
data = packed[4:setlen]
record_count += dispatch_flowset(ipa, odid, setid, data)
packed = packed[setlen:]
if count:
if count != record_count:
logger.warning(
"Record account not balanced {}/{}".format(record_count, count)
)
logger.info(
"Parsed {} WITHOUT checks, {}/{} recs processed from {}".format(
header, record_count, count, ipa
)
)
# stats
Collector.packets += 1
Collector.count += count
if record_count:
Collector.record_count += record_count
def parse_file(fh, ipa):
"""
Responsibility: parse raw NetFlow V9 data from disk.
Args:
fh `BufferedReader`, BytesIO` etc: input file handle
ipa `str` or `int`: ip addr to use for exporter identification
"""
lastseq = None
lastup = None
count = None
record_count = 0
odid = None
while True:
pos = fh.tell()
packed = fh.read(4)
try:
assert len(packed) == 4
except AssertionError:
# EOF
return
# Unpack, expecting the next FlowSet.
setid, setlen = struct.unpack("!HH", packed)
if setid != 9:
packed = fh.read(setlen - 4)
assert len(packed) == setlen - 4
record_count += dispatch_flowset(ipa, odid, setid, packed)
else:
# for completeness' sake
if count:
if count != record_count:
logger.warning(
"Record account not balanced {}/{}".format(
record_count, count
)
)
else:
logger.debug("Processed {} records".format(count))
# next packet header
fh.seek(pos)
packed = fh.read(20)
assert len(packed) == 20
header = struct.unpack("!HHIIII", packed)
ver, count, up, unixsecs, seq, odid = header
logger.info(header)
# stats
Collector.packets += 1
Collector.count += count
if record_count:
Collector.record_count += record_count
# sequence checks
if lastup and lastseq:
if seq != lastseq + 1:
updiff = up - lastup
logger.warning(
"Out of seq, lost {}, tdiff {:.1f} s".format(
seq - lastseq, round(updiff / 1000, 1)
)
)
if updiff > lim * 1000:
logger.warning("Discarding templates")
Template.discard_all()
lastup = up
lastseq = seq
count = header[1]
record_count = 0
|
shuntingyard/flowproc | src/flowproc/collector_state.py | <reponame>shuntingyard/flowproc<gh_stars>0
# -*- coding: utf-8 -*-
"""
Experiment on dealing with state
"""
import logging
from abc import ABC
from abc import abstractmethod
from collections import deque
from datetime import datetime
from ipaddress import ip_address
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# globals
logger = logging.getLogger(__name__)
class AbstractTemplate(ABC):
"""
The things every temlate class should implement
"""
@abstractmethod
def get_tid(self):
pass
class Visitable(ABC):
"""
Use a generic way to make classes visitable (thanks Wikipedia)
"""
def accept(self, visitor):
lookup = "visit_" + type(self).__qualname__.replace(".", "_")
return getattr(visitor, lookup)(self)
class Collector:
"""
Not to instantiate - contains exclusively class- and static methods.
"""
children = {}
created = datetime.now()
packets = 0
count = 0
record_count = 0
@classmethod
def accept(cls, visitor):
lookup = "visit_" + cls.__qualname__.replace(".", "_")
return getattr(visitor, lookup)(cls)
@classmethod
def check_header(cls, ipa, packet):
"""
Run sequence checks, restart checks etc. and manage templates
behind the scenes.
"""
pass
@classmethod
def get_qualified(cls, *args):
"""
Get an object under this collector
"""
# A simple idiom to fill a fixed len list from (variable len) *args:
path = [args[i] if i < len(args) else None for i in range(3)]
return cls.accept(RetrievingVisitor(*path))
@classmethod
def register(cls, ipa, odid, template):
"""
Create, update or replace anything implementing `AbstractTemplate`
"""
cls.accept(RegisteringVisitor(ipa, odid, template))
@classmethod
def register_optrec(cls, ipa, odid, dict):
"""
Register an Options Data Record (`dict`) with the `ObservationDomain`
given by path.
"""
try:
cls.children[ipa].children[odid].optrecs.append(dict)
return True
except KeyError:
return False
@classmethod
def unregister(cls, ipa, odid):
"""
Remove rightmost element in path (and its child nodes)
"""
class Exporter(Visitable):
"""
TODO Clarify relation to observation domains (and for V10) transport
protocols.
"""
def __init__(self, ipa):
self.children = {}
self.ipa = ip_address(ipa).exploded
def __repr__(self):
return self.ipa
class ObservationDomain(Visitable):
"""
TODO Clarify relation to exporters (and for V10) transport protocols.
"""
def __init__(self, odid, bufsize=8):
self.children = {}
self.odid = int(odid)
self.optrecs = deque(maxlen=bufsize) # option data records collected
def __repr__(self):
return str(self.odid)
class RetrievingVisitor:
"""
Return the object on the path defined by ID values received as
'__init__' args if it exists, else return 'None'.
Args:
ipa `str`, `int`: ip address of exporter
odid `int`: Observation Domain ID
tid `int`: Template ID
TODO Document collector's class methods with similar text (and maybe don't
even document visitors this extensively, since they're implementation
internal).
"""
def __init__(self, ipa, odid, tid):
self.ipa = ipa
self.odid = odid
self.tid = tid
def visit_Collector(self, host):
if self.ipa is not None: # if next element in path
try:
return host.children[self.ipa].accept(self)
except KeyError:
return None
return host
def visit_Exporter(self, host):
if self.odid is not None: # if next element in path
try:
return host.children[self.odid].accept(self)
except KeyError:
return None
return host
def visit_ObservationDomain(self, host):
if self.tid is not None: # if next element in path
try:
# Template level, just get - don't visit!
return host.children[self.tid]
except KeyError:
return None
return host
class RegisteringVisitor:
"""
Store the template object received as '__init__' arg and create other
objects on the path (Collector -> Exporter -> ObservationDomain) to it.
TODO See above, other visitor...
"""
def __init__(self, ipa, odid, template_obj):
self.ipa = ipa
self.odid = odid
self.template = template_obj
def visit_Collector(self, host):
exporter = host.children.get(self.ipa, Exporter(self.ipa))
host.children[self.ipa] = exporter
exporter.accept(self)
def visit_Exporter(self, host):
domain = host.children.get(self.odid, ObservationDomain(self.odid))
host.children[self.odid] = domain
domain.accept(self)
def visit_ObservationDomain(self, host):
tid = self.template.get_tid()
try:
val = host.children[self.template.get_tid()]
# hope this helps us work around the timestamp of refreshment
if val.__repr__() == self.template.__repr__():
logger.debug(
"Updating {} with tid {:d}".format(
type(self.template), tid
)
)
else:
logger.warning(
"Replacing {} with tid {:d}".format(
type(self.template), tid
)
)
except KeyError:
logger.info(
"Creating {} with tid {:d}".format(type(self.template), tid)
)
# at last DO it
host.children[tid] = self.template
class TraversingVisitor:
"""
The precursor to stats, should it have a return value or just do something
(like printing)?
"""
def visit_Collector(self, host):
for child in host.children.values():
print(child)
child.accept(self)
def visit_Exporter(self, host):
for child in host.children.values():
print(child)
child.accept(self)
def visit_ObservationDomain(self, host):
for child in host.children.values():
print(child)
class _TraversingDictVisitor:
"""
Mainly written for debugging
"""
def visit_Collector(self, host):
[print(k, type(k), v, type(v)) for k, v in host.children.items()]
for child in host.children.values():
child.accept(self)
def visit_Exporter(self, host):
[print(k, type(k), v, type(v)) for k, v in host.children.items()]
for child in host.children.values():
child.accept(self)
def visit_ObservationDomain(self, host):
[print(k, type(k), v, type(v)) for k, v in host.children.items()]
|
shuntingyard/flowproc | src/flowproc/flowprocctrl.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The flow collector daemon control program
"""
import argparse
import asyncio
import json
import pprint
import sys
from flowproc import __version__
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
def parse_args(args):
"""
Parse command line parameters
"""
parser = argparse.ArgumentParser(
description="Communicate with testlistener over UNIX socket"
)
parser.add_argument(
dest="cmd",
help="ping, stats",
type=str,
metavar="command"
)
parser.add_argument(
"-s",
"--sock",
help="unix socket path for control",
type=str,
action="store",
)
parser.add_argument(
"-V",
action="version",
version="flowproc {ver}".format(ver=__version__),
)
return parser.parse_args(args)
@asyncio.coroutine
def unix_socket_client(command, socketpath):
reader, writer = yield from asyncio.open_unix_connection(socketpath)
# command += "\n" # readline() in the other end!
# read(1024) on the other end
writer.write(command.encode())
data = yield from reader.read(-1)
msg = data.decode()
# If JSON, we might pretty-print:
try:
msg = json.loads(msg)
# pprint.PrettyPrinter(indent=1, width=80, compact=True)
pprint.pprint(msg, width=172, compact=True)
except json.decoder.JSONDecodeError:
print(msg)
writer.close()
def main(args):
"""
Main entry point allowing external calls
"""
args = parse_args(args)
loop = asyncio.get_event_loop()
loop.run_until_complete(unix_socket_client(args.cmd, args.sock))
loop.close()
def run():
"""
Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
shuntingyard/flowproc | src/flowproc/__init__.py | <reponame>shuntingyard/flowproc
# -*- coding: utf-8 -*-
"""
tbd
"""
import logging
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
logger = logging.getLogger(__name__)
# retrieve version info
try:
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
|
shuntingyard/flowproc | src/flowproc/util.py | <reponame>shuntingyard/flowproc
# -*- coding: utf-8 -*-
"""
Various utility functions to convert port numbers, tcpflags, ICMP-type and code
to text
A `dict` to look up textual labels for protocol numbers and
a stopwatch decorator function
A class to reflect netflow exporter attributes and options
"""
import functools
import logging
import socket
import time
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# globals
logger = logging.getLogger(__name__)
def stopwatch(fn):
"""
Log (DEBUG) how much time is spent in decorated fn.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = fn(*args, **kwargs)
end = time.perf_counter()
msec = (end - start) * 1000
# log results
logger = fn.__globals__.get("logger", None)
if logger:
logger.debug(
"{elapsed:8.3f} msec elapsed in '{name}'".format(
elapsed=round(msec, 3), name=fn.__qualname__
)
)
return result
return wrapper
def port_to_str(port):
"""
TODO
"""
try:
return socket.getservbyport(port)
except OSError:
return None
# ----- [ flag ] = label
TCPFLAGS = {}
TCPFLAGS[1 << 0] = "fin"
TCPFLAGS[1 << 1] = "syn"
TCPFLAGS[1 << 2] = "rst"
TCPFLAGS[1 << 3] = "psh"
TCPFLAGS[1 << 4] = "ack"
TCPFLAGS[1 << 5] = "urg"
TCPFLAGS[1 << 6] = "ecn"
TCPFLAGS[1 << 7] = "cwr"
def tcpflags_to_str(flags, brief=False):
"""Return TCP flags represented for humans.
Args:
flags byte, binary value representing TCP flags
brief if true: short (8 byte `str`) representation,
else: more verbose `list` representation
Return:
`str` or `list`
"""
short = str()
verbose = list()
for key, label in TCPFLAGS.items():
if key & flags == key:
if brief:
short += label[:1].upper()
else:
verbose.append(label)
else:
if brief:
short += " "
return short if brief else verbose
# @stopwatch
def fqdnlookup(ipa_str):
"""
Return either the fqdn or an ipa
"""
return socket.getfqdn(ipa_str)
# https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTO = {
0: "HOPOPT",
1: "ICMP",
2: "IGMP",
3: "GGP",
4: "IPv4",
5: "ST",
6: "TCP",
7: "CBT",
8: "EGP",
9: "IGP",
10: "BBN-RCC-MON",
11: "NVP-II",
12: "PUP",
13: "ARGUS (deprecated)",
14: "EMCON",
15: "XNET",
16: "CHAOS",
17: "UDP",
18: "MUX",
19: "DCN-MEAS",
20: "HMP",
21: "PRM",
22: "XNS-IDP",
23: "TRUNK-1",
24: "TRUNK-2",
25: "LEAF-1",
26: "LEAF-2",
27: "RDP",
28: "IRTP",
29: "ISO-TP4",
30: "NETBLT",
31: "MFE-NSP",
32: "MERIT-INP",
33: "DCCP",
34: "3PC",
35: "IDPR",
36: "XTP",
37: "DDP",
38: "IDPR-CMTP",
39: "TP++",
40: "IL",
41: "IPv6",
42: "SDRP",
43: "IPv6-Route",
44: "IPv6-Frag",
45: "IDRP",
46: "RSVP",
47: "GRE",
48: "DSR",
49: "BNA",
50: "ESP",
51: "AH",
52: "I-NLSP",
53: "SWIPE (deprecated)",
54: "NARP",
55: "MOBILE",
56: "TLSP",
57: "SKIP",
58: "IPv6-ICMP",
59: "IPv6-NoNxt",
60: "IPv6-Opts",
61: "NaN",
62: "CFTP",
63: "NaN",
64: "SAT-EXPAK",
65: "KRYPTOLAN",
66: "RVD",
67: "IPPC",
68: "NaN",
69: "SAT-MON",
70: "VISA",
71: "IPCV",
72: "CPNX",
73: "CPHB",
74: "WSN",
75: "PVP",
76: "BR-SAT-MON",
77: "SUN-ND",
78: "WB-MON",
79: "WB-EXPAK",
80: "ISO-IP",
81: "VMTP",
82: "SECURE-VMTP",
83: "VINES",
84: "TTP or IPTM",
85: "NSFNET-IGP",
86: "DGP",
87: "TCF",
88: "EIGRP",
89: "OSPFIGP",
90: "Sprite-RPC",
91: "LARP",
92: "MTP",
93: "AX.25",
94: "IPIP",
95: "MICP (deprecated)",
96: "SCC-SP",
97: "ETHERIP",
98: "ENCAP",
99: "NaN",
100: "GMTP",
101: "IFMP",
102: "PNNI",
103: "PIM",
104: "ARIS",
105: "SCPS",
106: "QNX",
107: "A/N",
108: "IPComp",
109: "SNP",
110: "Compaq-Peer",
111: "IPX-in-IP",
112: "VRRP",
113: "PGM",
114: "NaN",
115: "L2TP",
116: "DDX",
117: "IATP",
118: "STP",
119: "SRP",
120: "UTI",
121: "SMP",
122: "SM (deprecated)",
123: "PTP",
124: "ISIS over IPv4",
125: "FIRE",
126: "CRTP",
127: "CRUDP",
128: "SSCOPMCE",
129: "IPLT",
130: "SPS",
131: "PIPE",
132: "SCTP",
133: "FC",
134: "RSVP-E2E-IGNORE",
135: "Mobility Header",
136: "UDPLite",
137: "MPLS-in-IP",
138: "manet",
139: "HIP",
140: "Shim6",
141: "WESP",
142: "ROHC",
255: "Reserved",
}
def dstport_to_icmptc(dstport):
"""
Destination port to ICMP type- and code - definition taken from
https://www.erg.abdn.ac.uk/users/gorry/course/inet-pages/icmp-code.html
https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
Args:
dstport `int` destination port number
Return:
`int` icmp type, `int` icmp code
"""
return int(dstport / 256), dstport % 256
# TODO This is work in progress...
ICMPTEXT = {
(0, 0): "Echo Reply",
(3, 0): "Net Unreachable",
(3, 1): "Host Unreachable",
(3, 2): "Protocol Unreachable",
(3, 3): "Port Unreachable",
(3, 4): "Fragmentation Needed and Don't Fragment was Set",
(3, 5): "Source Route Failed",
(3, 6): "Destination Network Unknown",
(3, 7): "Destination Host Unknown",
(3, 8): "Source Host Isolated",
(
3,
9,
): "Communication with Destination Network is Administratively Prohibited",
(
3,
10,
): "Communication with Destination Host is Administratively Prohibited",
(3, 11): "Destination Network Unreachable for Type of Service",
(3, 12): "Destination Host Unreachable for Type of Service",
(3, 13): "Communication Administratively Prohibited ",
(3, 14): "Host Precedence Violation",
(3, 15): "Precedence cutoff in effect",
(5, 0): "Redirect Datagram for the Network (or subnet)",
(5, 1): "Redirect Datagram for the Host",
(5, 2): "Redirect Datagram for the Type of Service and Network",
(5, 3): "Redirect Datagram for the Type of Service and Host",
(7, 0): "Unassigned",
(8, 0): "Echo",
(11, 0): "Time to Live exceeded in Transit",
(11, 1): "Fragment Reassembly Time Exceeded",
}
def ffs(length, ftype=None):
"""Format Field String (ffs)
Args:
length `int`: the field length
ftype `int`: field type
Return:
Format string for 'struct.unpack' w. prefix '!' (network byte order)
"""
# TODO Revise/ improve!
# filtering dict
fdict = {
40: "Ixxxx", # these 4 types (from TOTAL_BYTES_EXP and 163
41: "Ixxxx", # send lengt=8
42: "Ixxxx",
163: "Ixxxx",
153: "Ixxxx", # 153, 160, NF_F_FLOW_CREATE_TIME_MSEC
160: "Ixxxx",
152: "Ixxxx",
}
try:
return fdict[ftype] # lookup for problematic fields
except KeyError:
pass
ldict = {1: "B", 2: "H", 4: "I", 8: "Q"}
return ldict[length]
def vunpack(dataslice):
"""
Variable (length) unpack
According to bitkeks (<NAME>): "Better solution than struct.unpack
with variable field length"
We just try it out here :)
"""
fdata = 0
for idx, byte in enumerate(reversed(bytearray(dataslice))):
fdata += byte << (idx * 8)
return fdata
|
shuntingyard/flowproc | codesnippets/asyncio_echo_C35.py | import asyncio
@asyncio.coroutine
def tcp_echo_client(message, loop):
reader, writer = yield from asyncio.open_unix_connection("./echosocket")
print('Send: %r' % message)
writer.write(message.encode())
data = yield from reader.read(100)
print('Received: %r' % data.decode())
print('Close the socket')
writer.close()
message = 'Hello World!'
loop = asyncio.get_event_loop()
loop.run_until_complete(tcp_echo_client(message, loop))
loop.close()
|
shuntingyard/flowproc | src/flowproc/fluent.py | # https://gist.github.com/thinkingserious/d1b06ee12a3613c0dc3b
import sys
from ipaddress import ip_address
class Fluent:
def __init__(self, cache=None):
self._cache = cache or []
# Build the cache, and handle special cases
def _(self, name):
# Enables method chaining
return Fluent(self._cache+[name])
# Final method call accepting an argument
def process(self, data):
for func in self._cache:
# Cool, yep - just inspect the cache and check if sequence found
# is allowed consulting a state transition table :))
data = func(data)
# Reflection to get an object
def __getattr__(self, name):
obj = globals()[name]
return self._(obj)
# Called with the object is deleted
def __del__(self):
print('Deleting', self)
def to_ip(data):
return ip_address(int(data))
def print_ip(data):
print(data)
return(data)
fluent = Fluent()
chain = fluent.to_ip.print_ip
chain.process(sys.argv[1] if len(sys.argv) > 1 else 0)
|
shuntingyard/flowproc | src/flowproc/netflowV5_lab.py | <reponame>shuntingyard/flowproc<gh_stars>0
# -*- coding: utf-8 -*-
"""
NetFlow v5 collector implementation
"""
import logging
import struct
from collections import namedtuple
from datetime import datetime
from flowproc import util
from ipaddress import ip_address
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# global
logger = logging.getLogger(__name__)
RECORD_LENGTH = 48
class Collector(util.AbstractCollector):
STRUCT_NFV5 = {
"srcaddr": (0, 4, "I"),
"dstaddr": (4, 4, "I"),
"nexthop": (8, 4, "I"),
"input": (12, 2, "H"),
"output": (14, 2, "H"),
"dPkts": (16, 4, "I"),
"dOctets": (20, 4, "I"),
"First": (24, 4, "I"),
"Last": (28, 4, "I"),
"srcport": (32, 2, "H"),
"dstport": (34, 2, "H"),
"pad1": (36, 1, "x"),
"tcp_flags": (37, 1, "B"),
"prot": (38, 1, "B"),
"tos": (39, 1, "B"),
"src_as": (40, 2, "H"),
"dst_as": (42, 2, "H"),
"src_mask": (44, 1, "B"),
"dst_mask": (45, 1, "B"),
"pad2": (46, 2, "x"),
}
SILK_TO_NFV5 = {
"sIP": "srcaddr",
"dIP": "dstaddr",
"nhIP": "nexthop",
"inNic": "input", # altered from 'in' for clarification
"outNic": "output", # altered from 'out' for clarification
"packets": "dPkts",
"bytes": "dOctets",
"sTime": "First",
"eTime": "Last",
"sPort": "srcport",
"dPort": "dstport",
"flags": "tcp_flags",
"protocol": "prot",
"tos": "tos", # not SiLK-like
"sASN": "src_as", # not SiLK-like
"dASN": "dst_as", # not SiLK-like
}
@classmethod
def _compose_format(cls, fields):
# Translate (SiLKy) fields to list of NetFlow v5 field labels.
selection = [v for k, v in cls.SILK_TO_NFV5.items() if k in fields]
format_string = "!" # starting with network big endian
for k, v in cls.STRUCT_NFV5.items():
if k in selection:
# Append format character for field.
format_string += v[2]
else:
# Add number of padding characters required to skip
# unwanted field.
format_string += v[1] * "x"
return format_string
def __init__(self, fields):
"""
Args:
fields either "all" or a subset of keys in SILK_TO_NFV5
"""
# FIXME When fields not enumerated in sequential order given by struct,
# they DO label the wrong fields (i.e. values)!
self.fields = (
[k for k in Collector.SILK_TO_NFV5.keys()]
if fields == "all"
else fields
)
# format string for `struct.unpack`
self.format_string = Collector._compose_format(self.fields)
# transform_pretty for now
self.TRANSFORM_NFV5 = {
"sIP": lambda x: str(ip_address(x)),
"dIP": lambda x: str(ip_address(x)),
"nhIP": lambda x: str(ip_address(x)),
"inNic": lambda x: x,
"outNic": lambda x: x,
"packets": lambda x: x,
"bytes": lambda x: x,
"sTime": lambda x: self._abs_time(x),
"eTime": lambda x: self._abs_time(x),
"sPort": lambda x: util.port_to_str(x),
"dPort": lambda x: util.port_to_str(x),
"flags": lambda x: util.tcpflags_h(x, brief=False),
"protocol": lambda x: util.proto_to_str(x),
"tos": lambda x: x,
"sASN": lambda x: x,
"dASN": lambda x: x,
}
# list of functions for transforming unpacked structure
self.xform_list = [
func for k, func in self.TRANSFORM_NFV5.items() if k in self.fields
]
# named tuple for flow records to return
self.FlowRec = namedtuple("FlowRec", self.fields)
def __repr__(self):
return str(self.fields)
@staticmethod
def _unpack_header(packet):
# header contents as enumerated in
# https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html#wp1006108
contents = [
"version",
"count",
"SysUptime",
"unix_secs",
"unix_nsecs",
"flow_sequence",
"engine_type",
"engine_id",
"sampling_interval",
]
unpacked = struct.unpack("!HHIIIIBBH", packet[:24])
assert len(contents) == len(unpacked)
return dict(zip(contents, unpacked))
def collect(self, client_addr, export_packet):
"""See `flowproc.util.AbstractCollector.collect`"""
# entry level test
ver = util.get_header_version(export_packet)
if ver != 5:
logger.error("Cannot process header version {}".format(ver))
return
# get header
header = Collector._unpack_header(export_packet)
# log export packet summary
logger.debug(
"Received {:4d} bytes from observation dom {:d} at {}".format(
len(export_packet), header["engine_id"], client_addr
)
)
# prepare variables for record processing
counter = header["count"]
self.exporter_start_t = (
header["unix_secs"]
+ round(header["unix_nsecs"] / 10 ** 6, 3)
- header["SysUptime"] / 1000
)
logger.debug(
"Exporter started on {}".format(
datetime.fromtimestamp(self.exporter_start_t).strftime(
"%b %m %Y %H:%M:%S"
)
)
)
flowrec_iterable = []
# loop over records
data_offset = 24
for i in range(counter):
ptr = data_offset + i * RECORD_LENGTH
record = export_packet[ptr : ptr + RECORD_LENGTH]
unpacked = struct.unpack(self.format_string, record)
transformed = list(
map(lambda f, y: f(y), self.xform_list, unpacked)
)
flowrec = self.FlowRec(*transformed)
flowrec_iterable.append(flowrec)
for rec in flowrec_iterable:
# For now just print every single record, prefixed with
# observation domain attributes.
print((client_addr, header["engine_id"]), rec)
# methods to be moved to prettyfiers
def _abs_time(self, rel_t):
return datetime.fromtimestamp(
self.exporter_start_t + rel_t / 1000
).strftime("%b %m %Y %H:%M:%S.%f")
|
shuntingyard/flowproc | tests/test_collector_state.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Tests for 'util' module
"""
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
import logging
import time
from datetime import datetime
from flowproc import testasync
from flowproc.collector_state import AbstractTemplate
from flowproc.collector_state import Collector
# globals
logger = logging.getLogger().setLevel(logging.DEBUG)
class T(AbstractTemplate):
"""
Helper template class for testing
"""
def __init__(self, tid):
self.tid = tid
# some simulation of real-life attrs
self.attr = list(map(lambda x: self.tid * x, [18, 22, 0.77, 3, 5]))
self.lastwrite = datetime.utcnow()
def get_tid(self):
return self.tid
def __str__(self):
return str("T({}, {})".format(self.tid, self.lastwrite))
def __repr__(self):
return str("T({}, {})".format(self.tid, self.attr))
# register some objects before testing
Collector.register("127.0.0.1", 0, T(300))
Collector.register("127.0.0.1", 0, T(387))
Collector.register("127.0.0.1", 1, T(387))
Collector.register("fc00:e968:6179::de52:7100", 0, T(326))
Collector.register("8.8.4.4", 3, T(300))
time.sleep(0.3) # updated template example:
Collector.register("127.0.0.1", 0, T(300))
def test_Collector_get():
# empty path
assert Collector.get_qualified().__name__ == "Collector"
# paths with ipa (ip address) only
assert str(Collector.get_qualified("127.0.0.1")) == "127.0.0.1" # existent
assert str(Collector.get_qualified("8.8.4.4")) == "8.8.4.4" # existent
assert (
str(Collector.get_qualified("fc00:e968:6179::de52:7100"))
== "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
)
assert Collector.get_qualified("8.8.8.8") is None # missing
# paths ipa, odid (observation domain ID)
assert str(Collector.get_qualified("fc00:e968:6179::de52:7100", 0)) == "0"
assert (
Collector.get_qualified("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", 0) is None
) # missing
assert (
Collector.get_qualified("fc00:e968:6179::de52:7100", 1) is None
) # missing
# paths ipa, odid, tid (Template ID)
assert Collector.get_qualified("127.0.0.1", 0, 300).get_tid() == 300
assert Collector.get_qualified("127.0.0.1", 1, 300) is None # missing)
assert Collector.get_qualified("127.0.0.1", 0, 301) is None # missing)
# see if the rest above has been registered
assert None not in (
Collector.get_qualified("127.0.0.1", 0),
Collector.get_qualified("127.0.0.1", 1),
Collector.get_qualified("127.0.0.1", 0, 387),
Collector.get_qualified("127.0.0.1", 1, 387),
Collector.get_qualified("8.8.4.4", 3),
Collector.get_qualified("fc00:e968:6179::de52:7100", 0, 326),
Collector.get_qualified("8.8.4.4", 3, 300),
)
# TODO Collector.check_header()
# TODO Collector.unregister()
def test_Collector_register_optrec():
rec = {
"System": 2130706433,
"INPUT_SNMP": 128,
"IF_NAME": "lo",
"IF_DESC": "Loopback",
}
# path not existing: False
assert not Collector.register_optrec("127.0.0.1", 2, rec)
# existing: True
assert Collector.register_optrec("127.0.0.1", 1, rec)
print(Collector.accept(testasync.TreeVisitor()))
|
shuntingyard/flowproc | codesnippets/example_udp_run_for_5.py | loop = asyncio.get_running_loop()
transport, protocol = await loop.create_datagram_endpoint(
lambda: EchoServerProtocol(),
local_addr=('127.0.0.1', 9999))
try:
await asyncio.sleep(5) # Serve for 5 seconds.
finally:
transport.close()
|
shuntingyard/flowproc | src/flowproc/v9_classes.py | """
Template classes for NetFlow V9 parsing
"""
import logging
from datetime import datetime
from flowproc import v9_fieldtypes
from flowproc.collector_state import AbstractTemplate
from flowproc.collector_state import Collector
# global settings
logger = logging.getLogger(__name__)
class Template(AbstractTemplate):
"""
Responsibility: represent Template Record
"""
def __init__(self, ipa, odid, tid, tdata):
self.tid = tid
self.tdata = tdata
self.lastwrite = datetime.utcnow() # TODO add timezone info
Collector.register(ipa, odid, self)
@classmethod
def get(cls, tid):
"""
Return:
`Template` or raise `KeyError`
"""
return cls.tdict[tid]
@classmethod
def discard_all(cls):
"""
Discard all templates
"""
cls.tdict = {}
@property
def types(self):
return self.tdata[0::2] # using start::step for all field types
@property
def lengths(self):
return self.tdata[1::2] # same for all field lengths
def __str__(self):
return "{:d} age={} types={}".format(
self.tid,
datetime.utcnow() - self.lastwrite,
list(zip(
[v9_fieldtypes.LABEL.get(n, n) for n in self.types],
self.lengths
)),
)
def __repr__(self):
return self.tid, self.tdata # TODO Should be string!
def get_tid(self):
return self.tid
class OptionsTemplate(AbstractTemplate):
"""
Responsibility: represent Options Template Record attributes
"""
def __init__(self, ipa, odid, tid, scopes, options):
self.tid = tid
self.scopes = scopes
self.options = options
self.lastwrite = datetime.utcnow() # TODO add timezone info
Collector.register(ipa, odid, self)
@classmethod
def get(cls, tid):
"""
Return:
`Template` or raise `KeyError`
"""
return cls.tdict[tid]
@property
def scope_types(self):
return self.scopes[0::2] # using start::step for all field types
@property
def scope_lengths(self):
return self.scopes[1::2] # same for all field lengths
@property
def option_types(self):
return self.options[0::2] # as above
@property
def option_lengths(self):
return self.options[1::2] # as above
def __str__(self):
return "{:d} age={} scopes={} options={}".format(
self.tid,
datetime.utcnow() - self.lastwrite,
list(zip(
[
v9_fieldtypes.SCOPE_LABEL.get(n, n)
for n in self.scope_types
],
self.scope_lengths
)),
list(zip(
[v9_fieldtypes.LABEL.get(n, n) for n in self.option_types],
self.option_lengths
)),
)
def __repr__(self):
return self.tid, self.scopes, self.options # TODO Should be string!
def get_tid(self):
return self.tid
|
shuntingyard/flowproc | src/flowproc/testreader.py | <reponame>shuntingyard/flowproc
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test parsers using raw NetFlow/IPFIX input from disk
"""
import argparse
import struct
import sys
import logging
from flowproc import __version__
from flowproc import testasync
# from flowproc import v5_parser
from flowproc import v9_parser
# from flowproc import v10_parser
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# global settings
logger = logging.getLogger() # root
fmt = logging.Formatter("%(levelname)-8s %(name)s: %(message)s")
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(fmt)
# logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Test parsers using raw NetFlow/IPFIX input from disk"
)
parser.add_argument(
"-V",
action="version",
version="flowproc {ver}".format(ver=__version__),
)
parser.add_argument(
dest="infile", help="input file to use", type=str, metavar="INPUT_FILE"
)
parser.add_argument(
"-d",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-i",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-w",
"--verbose",
dest="loglevel",
help="set loglevel to WARNING (default)",
action="store_const",
const=logging.WARNING,
)
parser.add_argument(
"-e",
dest="loglevel",
help="set loglevel to ERROR",
action="store_const",
const=logging.ERROR,
)
return parser.parse_args(args)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
logger.setLevel(logging.WARNING) if not args.loglevel else logger.setLevel(
args.loglevel
)
try:
with open(args.infile, "rb") as fh:
ver = struct.unpack("!H", fh.read(2))[0]
fh.seek(0) # reset
if ver == 9:
v9_parser.parse_file(fh, "0.0.0.0")
else:
print(
"Not equipped to parse ver {:d}, giving up...".format(ver)
)
except KeyboardInterrupt:
print() # newline
print("Closing infile...")
print(testasync.stats())
def run():
"""
Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
shuntingyard/flowproc | src/flowproc/testlistener.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test UDP listener
"""
import argparse
import asyncio
import sys
import logging
import os
from importlib import reload
from flowproc import __version__
from flowproc import testasync
# from flowproc import v5_parser
from flowproc import v9_classes
from flowproc import v9_fieldtypes
from flowproc import v9_parser
# from flowproc import v10_parser
from flowproc.collector_state import Collector
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
# global settings
logger = logging.getLogger() # root
fmt = logging.Formatter("[%(asctime)s] %(levelname)-8s %(name)s: %(message)s")
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(fmt)
logger.addHandler(sh)
def parse_args(args):
"""
Parse command line parameters
"""
parser = argparse.ArgumentParser(
description="Test parsers with UDP packets coming in"
)
parser.add_argument(
dest="parser",
help="set parser to use (values are: 'v5', 'v9' or 'ipfix'",
type=str,
)
parser.add_argument(
"-p",
"--port",
help="set port to listen on (defaults: NetFlow 2055, IPFIX 4739)",
type=int,
action="store",
)
parser.add_argument(
"-s",
"--sock",
help="unix socket path for control",
type=str,
action="store",
)
parser.add_argument(
"-d",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-i",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-w",
dest="loglevel",
help="set loglevel to WARNING (default)",
action="store_const",
const=logging.WARNING,
)
parser.add_argument(
"-e",
dest="loglevel",
help="set loglevel to ERROR",
action="store_const",
const=logging.ERROR,
)
parser.add_argument(
"-V",
action="version",
version="flowproc {ver}".format(ver=__version__),
)
return parser.parse_args(args)
def start(parser, host, port, socketpath):
"""
Fire up an asyncio event loop
"""
class NetFlow: # the protocol definition
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, datagram, addr):
parser.parse_packet(datagram, addr[0])
def connection_lost(self, exc):
pass
@asyncio.coroutine
def callback(reader, writer): # callback function for Unix Sockets
data = yield from reader.read(1024)
msg = data.decode()
msg = run_command(msg.split()) # seems to work on multiple whitespace
writer.write(str(msg).encode())
yield from writer.drain()
writer.close()
def load():
modules = (testasync, v9_classes, v9_fieldtypes, v9_parser)
[reload(m) for m in modules]
logger.info("Reloaded {}".format(modules))
return "reloaded {}".format([m.__name__ for m in modules])
def setloglevel(level):
logger.setLevel(int(level)) # Int because args are split-up `str`.
return logger.level
def stop():
loop.stop()
return "stopping event loop..."
def run_command(args):
"""
Reply to the few commands existing
"""
run = {
"ping": lambda: "pong",
"getloglevel": lambda: logger.level,
"setloglevel": setloglevel,
"stats": testasync.stats,
"tree": lambda: Collector.accept(testasync.TreeVisitor()),
"reload": load,
"shutdown": stop,
"help": lambda: "Command must be one of {}".format(
[c for c in run.keys()]
),
}
logger.info("Ctrl: {}".format(args))
cmd = args[0]
if cmd not in run.keys():
return "Command '{}' unknown".format(cmd)
else:
try:
if len(args) == 1:
return run[cmd]()
else:
return run[cmd](*args[1:])
except Exception as e:
return e
loop = asyncio.get_event_loop()
# UDP
logger.info("Starting UDP server on host {} port {}".format(host, port))
coro = loop.create_datagram_endpoint(NetFlow, local_addr=(host, port))
transport, protocol = loop.run_until_complete(coro)
# Unix Sockets (ctrl)
if socketpath:
logger.info("Starting Unix Socket on {}".format(socketpath))
coro = asyncio.start_unix_server(callback, socketpath, loop=loop)
socketserver = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
print() # newline for ^C
logger.info("Shutting down...")
transport.close()
if socketpath:
socketserver.close()
os.remove(socketpath)
loop.close()
def main(args):
"""
Main entry point allowing external calls
"""
parser = None
port = None
args = parse_args(args)
logger.setLevel(logging.WARNING) if not args.loglevel else logger.setLevel(
args.loglevel
)
logger.debug(args)
# configure
socketpath = args.sock
if args.parser.lower() == "v5":
# parser = v9_parser
port = 2055 if not args.port else args.port
elif args.parser.lower() == "v9":
parser = v9_parser
port = 2055 if not args.port else args.port
elif args.parser.lower() == "ipfix":
# parser = ipfix_parser
port = 4739 if not args.port else args.port
if not parser:
print("No suitable parser configured, giving up...")
exit(1)
# fire up event loop
start(parser, "0.0.0.0", port, socketpath)
def run():
"""
Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
shuntingyard/flowproc | src/flowproc/flowprocd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The flow collector daemon
"""
import argparse
import logging
import socketserver
import sys
from flowproc import process
from flowproc import __version__
__author__ = "<NAME>"
__copyright__ = "Tobias Frei"
__license__ = "mit"
logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Flow collector daemon command line interface",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
dest="socket",
choices=["udp", "tcp"],
default="udp",
help="select server socket type (future should bring sctp)",
)
parser.add_argument(
"--host",
default="0.0.0.0",
help="set address to listen on",
action="store",
metavar="ipaddr",
)
parser.add_argument(
"-p",
"--port",
default=2055,
help="set port to listen on",
type=int,
action="store",
metavar="int",
)
parser.add_argument(
"--logfile",
default="stderr",
help="set file to log to",
action="store",
metavar="path",
)
parser.add_argument(
"-v",
dest="loglevel",
help="set loglevel INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
dest="loglevel",
help="set loglevel DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-V",
"--version",
help="print version and exit",
action="version",
version="flowproc {ver}".format(ver=__version__),
)
# TODO add options to select output processing
return parser.parse_args(args)
class _NetFlowUDPHandler(socketserver.DatagramRequestHandler):
def handle(self):
client_addr = self.client_address[0] # [1] contains the port.
export_packet = self.request[0]
# collecting and output processing
process(client_addr, export_packet, None)
def start_listener(socket_type, addr):
"""Start socketserver
Args:
socket_Type `str` for the time being just UDP
addr `str`,`int` tuple (host, port)
"""
if socket_type.upper() == "UDP":
s = socketserver.UDPServer(addr, _NetFlowUDPHandler)
s.serve_forever()
else:
logger.error("There's no TCP without IPFIX support, exiting...")
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)-9s %(name)s: %(message)s"
logging.basicConfig(
level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%b %d %Y %H:%M:%S",
)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
logger.info("Starting version {}".format(__version__,))
logger.info("Args {}".format(vars(args)))
try:
start_listener(args.socket, (args.host, args.port))
except KeyboardInterrupt:
logger.info("Shutting down...")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
shuntingyard/flowproc | tests/test_util.py | # -*- coding: utf-8 -*-
"""
Tests for 'util' module
"""
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
import logging
from flowproc import util
# globals
logger = logging.getLogger().setLevel(logging.DEBUG)
@util.stopwatch
def test_watch():
pass
def test_port():
assert util.port_to_str(443) == "https"
def test_tcpflags():
flags = util.tcpflags_to_str(17)
assert "fin" in flags
assert "ack" in flags
def test_proto():
assert util.PROTO[132] == "SCTP"
def test_to_icmptc():
assert util.dstport_to_icmptc(769) == (3, 1,) # host unreachable
|
zyq-zhang/test001 | test/login.py | <reponame>zyq-zhang/test001
hahahaha
num1 = 10
num = 20
num = 300
num = 30
num = 40
num = 50
|
smamidi15/two-player-uno | Uno.py | # CS Seminar Quarter Project: A Game of UNO
# Author: <NAME>
# Date: 3/14/2021
import random
# Allows the user to play UNO with the computer -- through the terminal!
class Uno:
user_turn = True
user_cards = []
comp_cards = []
discard_pile = []
turns = 0
current_card = list()
user_won = False
comp_won = False
# Constructor for the Uno class
def __init__(self, num_cards):
self.cards_per_player = num_cards
def __str__(self):
intro = "Welcome to UNO in Python!\nDesigned by <NAME>\nHere, you can play UNO with your computer as long as you like! Enjoy!\n"
return(intro)
# Chooses a random color to be used by the computer
def choose_random_color(self):
colors = ["red ", "green", "blue", "yellow"]
color = random.choice(colors)
return(color)
# Uses the standard numbers from 0 to 9
# Considers a few special cards: reverse = "R", skip = "S", and +2 = "+2"
# References some wild cards: wild = "W" and +4 = "+4"
def draw_a_card(self):
cards = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "R", "S", "+2", "W", "+4"]
wild_cards = ["W", "+4"]
card = random.choice(cards)
color = self.choose_random_color()
if card in wild_cards:
return(card, "special")
else:
return(card, color)
# Generates a set of random cards for each player
def generate_cards(self):
print("Generating cards...")
for i in range(self.cards_per_player):
self.user_cards.append(list(self.draw_a_card()))
self.comp_cards.append(list(self.draw_a_card()))
self.display_user_cards()
# Begins the game, ensures that the game doesn't begin with a wild
def begin_game(self):
while True:
first_card = list(self.draw_a_card())
if first_card[0] not in ["S", "R", "+2", "W", "+4"]:
self.current_card = first_card
break
print("Deciding who gets to play first...")
user_input = input("Choose either the number 0 or 1: ")
rand = random.randrange(0, 2)
if int(user_input) == rand:
print("Yay! You chose the right number!\nYou get to play first!")
else:
print("Unfortunately, you chose the wrong number\nThe computer plays first!")
self.user_turn = False
print("Let the game begin!")
# Checks if a card is the same as the current card
def check_if_cards_are_equal(self, card):
are_equal = False
if card[0] == self.current_card[0]:
are_equal = True
elif card[1] in self.current_card[1]:
are_equal = True
elif card[0] == "W" or card[0] == "+4":
are_equal = True
return(are_equal)
# Outputs the user's cards to the terminal
def display_user_cards(self):
print("You have the following cards:")
counter = 1
for card in self.user_cards:
output = "(" + str(counter) + ") " + str(card[1]) + "\t" + str(card[0])
print(output)
counter += 1
# Allows a user to draw a card and displays a corresponding message
def draw_user_card(self):
new_card = list(self.draw_a_card())
self.user_cards.append(new_card)
print(f'You got a {new_card[1]} {new_card[0]} from the deck')
# Defines the actions that occur when a user plays a card
def place_card(self, card):
self.current_card = card
self.discard_pile.append(card)
if self.user_turn:
self.user_cards.remove(card)
else:
self.comp_cards.remove(card)
# Represents the functionality for a reverse and skip card
# In a 2 player game, reverse just keeps the same turn
def play_reverse_or_skip(self, card):
is_reverse_or_skip = False
if card[0] == "R" or card[0] == "S":
if self.user_turn and card[0] == "R":
print("You have put a reverse, so it's your turn again!")
elif not self.user_turn and card[0] == "R":
print("The computer has put a reverse, so it's the computer's turn again!")
elif self.user_turn and card[0] == "S":
print("You have put a skip, so it's your turn again!")
else:
print("The computer has put a skip, so it's the computer's turn again!")
is_reverse_or_skip = True
return(is_reverse_or_skip)
# Defines the functionality for a +2 card
def play_plus_two(self, card):
is_plus_two = False
if card[0] == "+2":
if self.user_turn:
print("You have put a +2, so the computer takes two cards!\nDrawing cards...")
self.comp_cards.append(self.draw_a_card())
self.comp_cards.append(self.draw_a_card())
else:
print("The computer has put a +2, so you take two cards!")
self.draw_user_card()
self.draw_user_card()
self.display_user_cards()
is_plus_two = True
return(is_plus_two)
# Represents the functionality for a WILD card
def play_wild_card(self, card):
is_wild = False
if card[0] == "W":
if self.user_turn:
print("You have put a wild card, so you can change the needed color!")
color = input("Which color would you like to choose? (red/blue/green/yellow) ")
print(f'The computer must put a {color} card...')
self.current_card[1] = color
else:
print("The computer has put a wild card, so it can change the needed color!")
color = self.choose_random_color()
print(f'You must put a {color} card...')
self.current_card[1] = color
is_wild = True
return(is_wild)
# Defines the functionality for a +4 card
def play_plus_four(self, card):
is_plus_four = False
if card[0] == "+4":
if self.user_turn:
print("You have put a +4 card, so the computer draws four cards!\nDrawing cards...")
for i in range(4):
self.comp_cards.append(list(self.draw_a_card()))
color = input("Now, choose a color! (red/blue/green/yellow) ")
print("Because you placed a +4, it's your turn once again!")
self.current_card[1] = color
else:
print("The computer has put a +4 card, so you have to draw four cards!")
for i in range(4):
self.draw_user_card()
color = self.choose_random_color()
self.current_card[1] = color
print(f'Now, the computer would like to change the color to {color}')
print("Because it's a +4 card, your turn is skipped :(")
self.display_user_cards()
is_plus_four = True
return(is_plus_four)
# Evaluates the effects of the special cards on the game's turns
# Based on the computer's functionality
def check_power_cards(self):
reverse_skip = self.play_reverse_or_skip(self.current_card)
plus_two = self.play_plus_two(self.current_card)
wild_card = self.play_wild_card(self.current_card)
plus_four = self.play_plus_four(self.current_card)
if reverse_skip or plus_two:
return(False)
elif wild_card:
return(True)
elif plus_four:
return(False)
else:
return(True)
# Defines the set of actions performed during the computer's turn
def play_turn_computer(self):
self.turns += 1
equal = False
for card in self.comp_cards:
equal = self.check_if_cards_are_equal(card)
if equal:
print(f'The computer has put a {card[1]} {card[0]}')
self.place_card(card)
self.user_turn = self.check_power_cards()
if len(self.comp_cards) == 0:
self.comp_won = True
return(self.user_turn)
if not equal:
print("The computer decided to pass its turn.")
self.comp_cards.append(list(self.draw_a_card()))
if len(self.comp_cards) == 0:
self.comp_won = True
self.user_turn = True
return(self.user_turn)
# Defines the set of actions performed during the user's turn
def play_turn_user(self):
self.turns += 1
self.display_user_cards()
choice = input("Would you like to (1) play a card or (2) draw another card? Answer (1/2): ")
if choice == "1":
which_card = input("Using the numbers from 1 to " + str(len(self.user_cards)) + " choose which card you would like to play: ")
chosen_card = self.user_cards[int(which_card) - 1]
if self.check_if_cards_are_equal(chosen_card) == False:
b = True
while (b):
which_card = input("It wasn't a match! Please choose a different card, or type 0 to draw another card: ")
if which_card == "0":
self.draw_user_card()
b = False
else:
chosen_card = self.user_cards[int(which_card) - 1]
if self.check_if_cards_are_equal(chosen_card):
b = False
print(f'You have put a {chosen_card[1]} {chosen_card[0]}')
self.place_card(chosen_card)
self.user_turn = not self.check_power_cards()
if len(self.user_cards) == 0:
self.user_won = True
return(self.user_turn)
else:
self.draw_user_card()
if len(self.user_cards) == 0:
self.user_won = True
self.user_turn = False
return(self.user_turn)
# Documents the process of playing the game
def play_game(self):
b = True
while b:
if self.turns == 0:
self.begin_game()
if self.user_won or self.comp_won:
break
if self.current_card[0] not in ["W", "+4"]:
print(f'The current card is a {self.current_card[1]} {self.current_card[0]}')
else:
print(f'The current color is {self.current_card[1]}')
if self.user_turn:
a = self.play_turn_user()
else:
print("Now, it's the computer's turn!")
a = self.play_turn_computer()
print("However, the game is actually over!")
print(f'This fierce battle consisted of {self.turns} turn(s), causing {len(self.discard_pile)} card(s) to enter the discard pile')
if self.user_won:
print("But, at the end of the day...\nYou won! Congrats!")
else:
print("But, at the end of the day...\nThe computer won! Better luck next time :)")
print("Thank you for playing!")
def main():
# Creates an UNO game with 7 cards, this can be changed!
uno = Uno(7)
print(uno)
uno.generate_cards()
uno.play_game()
if __name__ == "__main__":
main()
|
woogyumi/flaskvue | server.py | from flask import Flask, request, jsonify
from flask_cors import CORS
import pandas as pd
# set the project root directory as the static folder, you can set others.
app = Flask(__name__, static_url_path='')
data = {
"fields": ['host', 'cpu', 'mem', 'hdd'],
"data": [
{'host':'wgkim', 'cpu':'30', 'mem':'1800', 'hdd':'20'},
{'host':'bwlee', 'cpu':'40', 'mem':'1500', 'hdd':'30'},
{'host':'mgkim', 'cpu':'50', 'mem':'2000', 'hdd':'40'},
]
}
df = pd.read_csv('sample_data/bike_rental_status.csv')
data2 = {
"fields": df.columns.tolist(),
"data": df.to_dict('records')
}
# CORS(app)
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/rest/get')
def rest_get():
return jsonify(data)
@app.route('/rest/set', methods = ['POST'])
def rest_set():
req = request.get_json()
print(req)
for idx in range(len(data["data"])):
if(data["data"][idx]['host'] == req["key"]):
data["data"][idx] = req["row"]
break
return jsonify(req)
@app.route('/rest/table', methods=['GET', 'POST'])
def table():
if request.method == 'POST':
req = request.get_json()
print(req)
#dataframe으로 변환후 머지할 것
#for idx in range(len(data2["data"])):
# if(data2["data"][idx]['host'] == req["key"]):
# data2["data"][idx] = req["row"]
# break
return jsonify(req)
else:
return jsonify(data2)
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=8080) |
qkrdmsghk/ssl | ssl_graphmodels/utils/AnchorPool.py | import torch
from torch.nn import Parameter
from torch_scatter import scatter_add, scatter_max
from torch_geometric.utils import softmax
from torch_geometric.nn.inits import uniform
from torch_geometric.utils.num_nodes import maybe_num_nodes
def topk(x, ratio, batch, min_score=None, tol=1e-7):
if min_score is not None:
# Make sure that we do not drop all nodes in a graph.
scores_max = scatter_max(x, batch)[0][batch] - tol
scores_min = scores_max.clamp(max=min_score)
perm = torch.nonzero(x > scores_min).view(-1)
else:
num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0)
batch_size, max_num_nodes = num_nodes.size(0), num_nodes.max().item()
cum_num_nodes = torch.cat(
[num_nodes.new_zeros(1),
num_nodes.cumsum(dim=0)[:-1]], dim=0)
index = torch.arange(batch.size(0), dtype=torch.long, device=x.device)
index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes)
dense_x = x.new_full((batch_size * max_num_nodes, ), -2)
dense_x[index] = x
dense_x = dense_x.view(batch_size, max_num_nodes)
_, perm = dense_x.sort(dim=-1, descending=True)
perm = perm + cum_num_nodes.view(-1, 1)
perm = perm.view(-1)
k = (ratio * num_nodes.to(torch.float)).ceil().to(torch.long)
mask = [
torch.arange(k[i], dtype=torch.long, device=x.device) +
i * max_num_nodes for i in range(batch_size)
]
mask = torch.cat(mask, dim=0)
perm = perm[mask]
del k, mask, num_nodes,batch_size,cum_num_nodes,index,dense_x,_,
return perm
def filter_adj(edge_index, edge_attr, perm, num_nodes=None):
num_nodes = maybe_num_nodes(edge_index, num_nodes)
mask = perm.new_full((num_nodes, ), -1)
i = torch.arange(perm.size(0), dtype=torch.long, device=perm.device)
mask[perm] = i
row, col = edge_index
row, col = mask[row], mask[col]
mask = (row >= 0) & (col >= 0)
row, col = row[mask], col[mask]
if edge_attr is not None:
edge_attr = edge_attr[mask]
return torch.stack([row, col], dim=0), edge_attr
class AnchorPool(torch.nn.Module):
def __init__(self, in_channels, ratio=0.5, min_score=None, multiplier=1, nonlinearity=torch.tanh):
super(AnchorPool, self).__init__()
self.in_channels = in_channels
self.ratio = ratio
self.min_score = min_score
self.multiplier = multiplier
self.nonlinearity = nonlinearity
self.weight = Parameter(torch.Tensor(1, in_channels))
self.reset_parameters()
def reset_parameters(self):
size = self.in_channels
uniform(size, self.weight)
def forward(self, x, edge_index, edge_attr=None, batch=None, batch_=None, attn=None, batch_n=None, pos_n=None):
""""""
if batch is None:
batch = edge_index.new_zeros(x.size(0))
attn = x if attn is None else attn
attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn
score = (attn * self.weight).sum(dim=-1)
if self.min_score is None:
score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1))
else:
score = softmax(score, batch)
perm = topk(score, self.ratio, batch, self.min_score)
x = x[perm] * score[perm].view(-1, 1)
x = self.multiplier * x if self.multiplier != 1 else x
batch = batch[perm]
if pos_n != None:
pos_n = pos_n[perm]
else:
pos_n = None
if batch_ != None:
batch_ = batch_[perm]
else:
batch_ = None
if batch_n != None:
batch_n = batch_n[perm]
else:
batch_n = None
edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm,
num_nodes=score.size(0))
return x, edge_index, batch, batch_, batch_n, pos_n, perm, score[perm]
def __repr__(self):
return '{}({}, {}={}, multiplier={})'.format(
self.__class__.__name__, self.in_channels,
'ratio' if self.min_score is None else 'min_score',
self.ratio if self.min_score is None else self.min_score,
self.multiplier)
|
qkrdmsghk/ssl | ssl_graphmodels/config/parameters.py | import numpy as np
import random
import pandas as pd
param_grid = {
'patience': list(range(20, 21)),
'lr': list(np.logspace(np.log10(0.0005), np.log10(0.1), base=10, num=100)),
'lr_decay': list(np.linspace(0.6, 1, num=8)),
'weight_decay': [5e-6, 5e-5, 1e-5, 5e-4, 1e-4, 5e-3, 1e-3],
'drop_out': [0.5, 0.6, 0.7, 0.8, 0.9],
'batch_size': [64],
'hidden_dimension': [128]
}
params_20ng = {
'patience': [-1],
'lr': [0.0005, 0.0001],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_aclImdb = {
'patience': [-1],
'lr': [0.0001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_ohsumed = {
'patience': [-1],
'lr': [0.001],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_R52 = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [256, 512],
'batch_size': [128, 256]
}
params_R8 = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [96, 128],
'batch_size': [64, 128]
}
params_mr = {
'patience': [-1],
'lr': [0.001, 0.0005],
'lr_decay': [1],
'weight_decay': [0],
'drop_out': [0.5],
'hidden_dimension': [96, 128],
'batch_size': [64, 128]
}
# def save_parameters():
# '''
# random search
# :return:
# '''
# MAX_EVALS = 10
# dfs = []
# for tune_id in range(MAX_EVALS):
# np.random.seed(tune_id)
# hps = {k: random.sample(v, 1) for k, v in param_grid_for_docs.items()}
# dfs.append(pd.DataFrame.from_dict(hps))
# dfs = pd.concat(dfs).reset_index(drop=True)
# dfs.to_csv('parameters_for_tuning_docs_new', sep='\t', index=False)
# print(dfs)
from sklearn.model_selection import ParameterGrid
def save_parameters():
'''
grid search
:return:
'''
dataset = 'ohsumed'
dfs = []
grids = list(ParameterGrid(params_ohsumed))
for grid in grids:
print(pd.DataFrame.from_dict(grid, orient='index').T)
dfs.append(pd.DataFrame.from_dict(grid, orient='index').T)
dfs = pd.concat(dfs).reset_index(drop=True)
dfs.to_csv('params_{}'.format(dataset), sep='\t', index=False)
print(dfs)
save_parameters()
# hps_list = pd.read_csv('parameters_for_tuning_docs', sep='\t', header=0)
# hps_list = hps_list[(hps_list['drop_out']==0.8) & (hps_list['weight_decay']==0.00001)]
# hps_list = hps_list.to_dict('records')
# print(hps_list)
# import os
# df = pd.read_csv(os.path.join('../../mimic3benchmark/evaluation', 'MLP', 'aggr_sum'), sep='\t').sort_values(by='AUC of PRC_value', ascending=False)[:10]
#
# hps_list = hps_list[hps_list['drop_out'].isin(df['dropout'])]
# hps_list = hps_list[hps_list['weight_decay'].isin(df['weight_decay'])]
# hps_list['lr_decay_2'] = hps_list['lr_decay'].apply(lambda x: round(x, 2))
# hps_list['lr_4'] = hps_list['lr'].apply(lambda x: round(x, 4))
#
# hps_list = hps_list[hps_list['lr_decay_2'].isin(df['lr_decay'])]
# hps_list = hps_list[hps_list['lr_4'].isin(df['lr'])]
|
qkrdmsghk/ssl | ssl_graphmodels/config/conf.py | <gh_stars>10-100
import argparse
def arg_config():
parse = argparse.ArgumentParser('TextSSL Parameters')
parse.add_argument('--name', type=str, default='R8')
parse.add_argument('--pre_trained', type=str, default='')
parse.add_argument('--type', type=str, default='inter_all', help='')
parse.add_argument('--tr_split', type=float, default=1.0, help='0.025, 0.05, 0.1, 0.2, 0.25, 0.5, 0.75, 1.0')
parse.add_argument('--hidden_dim', type=int, default=96, help='96, 256, 512')
parse.add_argument('--dropout', type=float, default=0, help='0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0')
parse.add_argument('--lr', type=float, default=1e-3, help='1e-4, 5e-4, 1e-3')
parse.add_argument('--weight_decay', type=float, default='0', help='0, 0.001, 0.0001')
parse.add_argument('--batch_size', type=int, default='16', help='16, 64, 128, 256')
parse.add_argument('--methods', type=str, default='gnn_note_attn_gumbel',
help='gnn(WordCooc), gnn_note(Disjoint), gnn_note_attn_soft(Complete), '
'gnn_note_attn_gumbel(ours), gnn_note_attn_gumbel_reg(ours w/reg)')
parse.add_argument('--num_layer', type=int, default='2', help='1,2,3,4')
parse.add_argument('--aggregate', type=str, default='sum', help='sum, max, mean, attn')
parse.add_argument('--threshold', type=float, default=0.5, help='0,0.5,0.7,1')
parse.add_argument('--temperature', type=float, default=0.01, help='0, 0.1, 0.2, 0.5, 1')
parse.add_argument('--gpu', type=str, default='0')
parse.add_argument('--epoch', type=int, default=200)
parse.add_argument('--patience', type=int, default=-1)
parse.add_argument('--seed', type=int, default=123)
args = parse.parse_args()
return args |
qkrdmsghk/ssl | ssl_make_graphs/PairData.py | <reponame>qkrdmsghk/ssl
from torch_geometric.data import Data
class PairData(Data):
def __init__(self, x_n=None, edge_index_n=None, y_n=None, batch_n=None, pos_n=None,
x_p=None, edge_index_p=None, y_p=None, edge_attr_p=None):
super(PairData, self).__init__()
self.edge_index_n = edge_index_n
self.x_n = x_n
self.y_n = y_n
self.batch_n = batch_n
self.pos_n = pos_n
self.edge_index_p = edge_index_p
self.x_p = x_p
self.y_p = y_p
self.edge_attr_p = edge_attr_p
def __inc__(self, key, value):
if key == 'edge_index_n':
return self.x_n.size(0)
elif key == 'edge_index_p':
return self.x_p.size(0)
elif 'edge_index_pr' in key:
return self.x_pr.size(0)
elif 'edge_index_gr' in key:
return self.x_gr.size(0)
else:
return super(PairData, self).__inc__(key, value)
|
qkrdmsghk/ssl | re-extract_data/mk_R8_R52.py | <filename>re-extract_data/mk_R8_R52.py
import pandas as pd
pd.set_option('display.max_columns', 1000)
pd.set_option('display.max_rows', 1000)
import os
import shutil
import argparse
from tqdm import tqdm
'''
make R8 and R52 dataset!
using filtering methods in https://ana.cachopo.org/datasets-for-single-label-text-categorization;
by eliminating document with less than one or with more than one topic in file "Reuters21578-Apte-115Cat".
'''
def filter_samples(path):
s = {}
for split in ['train', 'test']:
for cla in os.listdir(os.path.join(path, split)):
for file in os.listdir(os.path.join(path, split, cla)):
if file in s:
s[file] += 1
else:
s[file] = 1
df = pd.DataFrame([[k, v] for k,v in s.items()], columns=['sample', 'freq'])
more_than_1_samples = df[df['freq']==1]['sample'].tolist()
return more_than_1_samples
def move_samples(samples, source_path, target_path, split, labels):
dic = {}
all = 0
source_path = source_path
for cla in tqdm(labels):
if not os.path.exists(os.path.join(target_path, split, cla)):
os.makedirs(os.path.join(target_path, split, cla))
dic[cla] = 0
for file in os.listdir(os.path.join(source_path, split, cla)):
if file in samples:
shutil.copyfile(os.path.join(source_path, split, cla, file), os.path.join(target_path, split, cla, file))
dic[cla] += 1
all+=1
return dic, all
def cleaned_vocab(name, textgcn_path):
clean_vocab = set()
path = os.path.join(textgcn_path, 'corpus', name+'.clean.txt')
docs = open(path)
for line in docs.readlines():
for word in line.split():
clean_vocab.add(word)
f = open(os.path.join(args.raw_path, name, '{}_vocab.txt'.format(name)), 'w')
f.write('\n'.join(clean_vocab))
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create data for decompensation prediction task.")
parser.add_argument('--source_path', type=str, default='Reuters21578-Apte-115Cat')
parser.add_argument('--raw_path', type=str, default='DATA_RAW')
parser.add_argument('--textgcn_path', type=str, default='textgcn_data')
parser.add_argument('--name', type=str, default='R8')
args, _ = parser.parse_known_args()
labels = os.path.join(args.textgcn_path, args.name + '.txt')
labels = open(labels).readlines()
labels = [x.strip().split('\t') for x in labels]
labels = pd.DataFrame(labels, columns=['index', 'split', 'class'])
labels = labels['class'].unique().tolist()
if args.name == 'R52':
assert len(labels) == 52
else:
assert len(labels) == 8
samples = filter_samples(args.source_path)
print('moving training samples...')
move_samples(samples, source_path=args.source_path, target_path=os.path.join(args.raw_path, args.name), split='test', labels=labels)
print('moving test samples...')
move_samples(samples, source_path=args.source_path, target_path=os.path.join(args.raw_path, args.name), split='train', labels=labels)
cleaned_vocab(name=args.name, textgcn_path=args.textgcn_path)
print('----done----, please check your `DATA_RAW/{}` directory'.format(args.name))
|
qkrdmsghk/ssl | ssl_graphmodels/utils/LoadData.py | import sys, os
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_make_graphs')
from PygDocsGraphDataset import PygDocsGraphDataset as PDGD
from torch_geometric.data import DataLoader
import torch
import numpy as np
import random
import argparse
from gensim.models import Word2Vec
import pandas as pd
from tqdm import tqdm
def show_statisctic(train_set, test_set):
# min_len = 10000
# aver_len = 0
# max_len = 0
training_sent_num = []
training_vocab = set()
training_words = []
training_jisjoint_words = []
for data in tqdm(train_set):
training_sent_num.append(data.batch_n[-1].item() + 1)
training_words.append(data.x_p.size(0))
training_jisjoint_words.append(data.x_n.size(0))
for word in data.x_n_id.data.numpy().tolist():
training_vocab.add(word)
train_cs = pd.DataFrame(train_set.data.y_p.tolist())[0].value_counts().values.tolist()
train_p = train_cs[-1] / train_cs[0]
test_vocab = set()
test_sent_num = []
intersected_vocab = set()
test_words = []
test_disjoint_words = []
for data in tqdm(test_set):
test_sent_num.append(data.batch_n[-1].item()+1)
test_words.append(data.x_p.size(0))
test_disjoint_words.append(data.x_n.size(0))
for word in data.x_n_id.data.numpy().tolist():
test_vocab.add(word)
if word in training_vocab:
intersected_vocab.add(word)
test_cs = pd.DataFrame(test_set.data.y_p.tolist())[0].value_counts().values.tolist()
test_p = test_cs[-1] / test_cs[0]
avg_trianing_sent_num = np.array(training_sent_num).mean()
avg_test_sent_num= np.array(test_sent_num).mean()
avg_sent_num = np.array(training_sent_num+test_sent_num).mean()
avg_training_words = np.array(training_words).mean()
avg_training_disjoint_words = np.array(training_jisjoint_words).mean()
avg_test_words = np.array(test_words).mean()
avg_test_disjoint_words = np.array(test_disjoint_words).mean()
avg_words = np.array(training_words+test_words).mean()
avg_disjoint_words = np.array(training_jisjoint_words+test_disjoint_words).mean()
print('\n statistic on datasets ... \n')
print('training_vocab {}, test_vocab {}, intersected_vocab {}, new word porportion {:.4f}'.format(len(training_vocab), len(test_vocab), len(intersected_vocab), 1-(len(intersected_vocab)/len(test_vocab))))
print('training_sent_num {:.4f}, test_sent_num {:.4f}, all_sent_num {:.4f}'.format(avg_trianing_sent_num, avg_test_sent_num, avg_sent_num))
print('training_joint_words {:.4f}, test_joint_words {:.4f}, all_joint_words {:.4f}'.format(avg_training_words, avg_test_words, avg_words))
print('training_disjoint_words {:.4f}, test_disjoint_words {:.4f}, all_disjoint_words {:.4f}'.format(avg_training_disjoint_words, avg_test_disjoint_words, avg_disjoint_words))
print('training_imbalanced_rate {:.4f}, test_imbalanced_rate {:.4f}, all_imbalanced_rate {:.4f}'.format(train_p, test_p, (train_p+test_p)) )
class LoadDocsData():
def __init__(self, name, type, pretrained=''):
self.name = name
self.type = type
self.pretrained = pretrained
super(LoadDocsData, self).__init__()
self.train_set = []
self.val_set = []
self.test_set = []
self.dic_path = '{}_vocab.txt'.format(name)
self.dictionary = open(os.path.join(Your_path+'re-extract_data/DATA_RAW', name, self.dic_path)).read().split()
class Handle_data(object):
def __init__(self, type, pretrained):
self.type = type
self.pretrained = pretrained
def __call__(self, data):
# pad = self.max_len - data.x_p.shape[0]
# data.mask_p = torch.from_numpy(np.zeros((self.max_len, 1)))
# data.mask_p[:data.x_p.shape[0], :] = 1.
# data.x_p = torch.from_numpy(np.pad(data.x_p, ((0, pad), (0, 0)), mode='constant'))
data.x_n_id = data.x_n[:, 0].to(torch.long)
data.x_p_id = data.x_p[:, 0].to(torch.long)
data.x_n = data.x_n[:, 1:].to(torch.float32)
data.x_p = data.x_p[:, 1:].to(torch.float32)
if self.type == 'inter_all':
'''
connect inter edge!
connect all the words in 1-hop neighbor sentence!
edge_mask == -1 --> intra_edge
edge_mask == 0 --> inter_edge
'''
row, col = data.edge_index_n
edge_mask = torch.full((row.size(0),), -1).to(torch.long)
edges = torch.combinations(torch.arange(data.x_n_id.size(0)), with_replacement=False).T
row_, col_ = edges
edges_attr = data.batch_n[row_] - data.batch_n[col_]
row_ = row_[abs(edges_attr) == 1]
col_ = col_[abs(edges_attr) == 1]
edge_mask_ = torch.full((row_.size(0), ), 0).to(torch.long)
row = torch.cat([row_, row])
col = torch.cat([col_, col])
data.edge_mask = torch.cat([edge_mask_, edge_mask])
data.edge_index_n = torch.stack([row, col])
else:
print('NO special data type!!')
return data
def split_train_val_data(self, seed, tr_split):
np.random.seed(seed)
cs = pd.DataFrame(self.train_set.data.y_p.tolist())[0].value_counts().to_dict()
train_id = []
cs_num = {}
for c in cs:
cs_num[c] = 0
for i, data in enumerate(self.train_set):
c = data.y_p.item()
if cs_num[c] < round(cs[c]*tr_split):
cs_num[c] += 1
train_id.append(i)
print('training perception->{}'.format(len(train_id)/len(self.train_set)))
val_id = random.sample(train_id, int(len(train_id) * 0.1))
train_id = [x for x in train_id if x not in val_id]
self.val_set = self.train_set[val_id]
self.train_set = self.train_set[train_id]
return self.train_set, self.val_set
def get_train_test(self, batch_size, seed, tr_split):
print('\nload test data...')
self.test_set = PDGD(name=self.name, split='test', dic=self.dictionary, pt=self.pretrained, transform=self.Handle_data(self.type, self.pretrained))
print(len(self.test_set))
print('load train data...')
self.train_set = PDGD(name=self.name, split='train', dic=self.dictionary, pt=self.pretrained, transform=self.Handle_data(self.type, self.pretrained))
print(len(self.train_set))
# show_statisctic(self.train_set, self.test_set)
self.train_set, self.val_set = self.split_train_val_data(seed, tr_split)
assert self.val_set.data.y_p.unique().size(0) == \
self.train_set.data.y_p.unique().size(0) == \
self.test_set.data.y_p.unique().size(0)
num_class = self.val_set.data.y_p.unique().size(0)
follow_batch = ['x_n', 'x_p']
if self.type != '':
follow_batch.append('x_'+self.type)
train_loader = DataLoader(self.train_set[:], batch_size=batch_size, follow_batch=follow_batch, shuffle=True)
val_loader = DataLoader(self.val_set[:], batch_size=batch_size, follow_batch=follow_batch, shuffle=True)
test_loader = DataLoader(self.test_set[:], batch_size=1, follow_batch=follow_batch, shuffle=False)
return train_loader, val_loader, test_loader, num_class
def label_distribution(data_set):
y_1 = 0
y_0 = 0
for d in data_set:
if d.y_p == torch.tensor([1]):
y_1 += 1
else:
y_0 += 1
print('#y_1: ', str(y_1), ';#y_0: ', str(y_0))
return y_1 + y_0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Load data")
parser.add_argument('--name', type=str, default='R52')
parser.add_argument('--type', type=str, default='inter_all')
parser.add_argument('--pretrained', type=str, default='')
args, _ = parser.parse_known_args()
loader = LoadDocsData(name=args.name, type=args.type, pretrained='')
train_loader, val_loader, test_loader, n_class = loader.get_train_test(batch_size=1, seed=2, tr_split=1.0)
|
qkrdmsghk/ssl | ssl_graphmodels/config/Tokenizer.py | <gh_stars>10-100
import numpy as np
from tqdm import tqdm
import os
class GloveTokenizer:
def __init__(self, filename, unk='<unk>', pad='<pad>'):
self.filename = filename
self.unk = unk
self.pad = pad
self.stoi = dict()
self.itos = dict()
self.embedding_matrix = list()
with open(filename, 'r', encoding='utf8') as f: # Read tokenizer file
for i, line in enumerate(tqdm(f.readlines())):
values = line.split()
self.stoi[values[0]] = i
self.itos[i] = values[0]
self.embedding_matrix.append([float(v) for v in values[1:]])
if self.unk is not None: # Add unk token into the tokenizer
i += 1
self.stoi[self.unk] = i
self.itos[i] = self.unk
self.embedding_matrix.append(np.random.rand(len(self.embedding_matrix[0])))
if self.pad is not None: # Add pad token into the tokenizer
i += 1
self.stoi[self.pad] = i
self.itos[i] = self.pad
self.embedding_matrix.append(np.zeros(len(self.embedding_matrix[0])))
self.embedding_matrix = np.array(self.embedding_matrix).astype(np.float32) # Convert if from double to float for efficiency
def encode(self, sentence):
if type(sentence) == str:
sentence = sentence.split(' ')
elif len(sentence): # Convertible to list
sentence = list(sentence)
else:
raise TypeError('sentence should be either a str or a list of str!')
encoded_sentence = []
for word in sentence:
encoded_sentence.append(self.stoi.get(word, self.stoi[self.unk]))
return encoded_sentence
def decode(self, encoded_sentence):
try:
encoded_sentence = list(encoded_sentence)
except Exception as e:
print(e)
raise TypeError('encoded_sentence should be either a str or a data type that is convertible to list type!')
sentence = []
for encoded_word in encoded_sentence:
sentence.append(self.itos[encoded_word])
return sentence
def embedding(self, encoded_sentence):
return self.embedding_matrix[np.array(encoded_sentence)]
class Pretrained_embedding_for_dataset: # This class is used to achieve parameters sharing among datasets
def __init__(self, tokenizer, train_vocab):
self.tokenizer = tokenizer
self.train_vocab = train_vocab
self.stoi = {'<unk>': 0, '<pad>': 1} # Re-index
self.itos = {0: '<unk>', 1: '<pad>'} # Re-index
self.vocab_count = len(self.stoi)
self.embedding_matrix = None
self.build_vocab()
def build_vocab(self):
for vocab in self.train_vocab:
if vocab in self.tokenizer.stoi.keys():
self.stoi[vocab] = self.vocab_count
self.itos[self.vocab_count] = vocab
self.vocab_count += 1
self.embedding_matrix = self.tokenizer.embedding(self.tokenizer.encode(list(self.stoi.keys())))
dataset = 'ohsumed'
path = os.path.join('/data/project/yinhuapark/DATA_PRE', dataset)
tokenizer = GloveTokenizer('glove.6B.300d.txt')
train_vocab = open(os.path.join(path, 'train_vocab.txt'), 'r').read().split()
dataset = Pretrained_embedding_for_dataset(tokenizer=tokenizer, train_vocab=train_vocab)
np.save(os.path.join(path, 'train_glove_embedding'), dataset.embedding_matrix)
f = open(os.path.join(path, 'train_glove_vocab.txt'), 'w')
f.write('\n'.join(list(dataset.stoi.keys())))
f.close() |
qkrdmsghk/ssl | re-extract_data/remove_words.py | import os
import re
from nltk.tokenize import sent_tokenize
from tqdm import tqdm
import argparse
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def filtering(line, vocab):
line = clean_str(line)
words = []
for word in line.split():
if word in vocab:
words.append(word)
else:
# print(word)
continue
line = ' '.join(words)
return line
def processing_dataset(dataset, raw_path):
'''
dataset= 20ng, ohsumed, R8, R52
:return:
'''
vocab = open(os.path.join(raw_path, dataset, '{}_vocab.txt'.format(dataset))).read().split('\n')
for split in ['train', 'test']:
for y in os.listdir(os.path.join(raw_path, dataset, split)):
sent_len = []
for name in tqdm(os.listdir(os.path.join(raw_path, dataset, split, y)), desc='processing split {}: category {}...'.format(split, y)):
if '_s' not in name:
doc_content_list = []
f = open(os.path.join(raw_path, dataset, split, y, name), 'r', encoding='utf8', errors='ignore')
strs = f.read()
sentences = sent_tokenize(strs)
sent_len.append(len(sentences))
for sent in sentences:
doc_content_list.append(filtering(sent, vocab))
f = open(os.path.join(raw_path, dataset, split, y, name + "_s"), 'w')
doc_content_str = '\n'.join(doc_content_list)
f.write(doc_content_str)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="remove the infrequent words")
parser.add_argument('--name', type=str, default='R52')
parser.add_argument('--raw_path', type=str, default='DATA_RAW')
args, _ = parser.parse_known_args()
processing_dataset(dataset=args.name, raw_path=args.raw_path)
print('----done----, please check your `DATA_RAW/{}/train or test` directory; the processed file is *** with \'s\''.format(args.name))
|
qkrdmsghk/ssl | ssl_make_graphs/create_cooc_document.py |
from __future__ import absolute_import
from __future__ import print_function
import os, sys
import argparse
from tqdm import tqdm
sys.path.append('../ssl_make_graphs')
from document_utils import *
def process_partition(partition, window_size, max_len):
output_dir = os.path.join(args.pre_path, args.name, partition+'_cooc')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for y in os.listdir(os.path.join(args.raw_path, args.name, partition))[:]:
if not os.path.exists(os.path.join(output_dir, y)):
os.mkdir(os.path.join(output_dir, y))
docs = list(filter(lambda x: x.find('s') != -1, os.listdir(os.path.join(args.raw_path, args.name, partition, y))))
for name in tqdm(docs[:], desc='Iterating over docs in {}_{}_{}'.format(args.name, partition, y)):
doc = open(os.path.join(args.raw_path, args.name, partition, y, name), 'r')
doc_cooc = document_cooc(doc, window_size=window_size, MAX_TRUNC_LEN=max_len)
if len(doc_cooc) > 0:
doc_cooc.to_csv(os.path.join(output_dir, y, name), sep='\t', index=False)
else:
print(y+'_'+name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="create document cooc graph")
parser.add_argument('--raw_path', type=str, default='../re-extract_data/DATA_RAW')
parser.add_argument('--pre_path', type=str, default='../re-extract_data/DATA_PRE')
parser.add_argument('--name', type=str, default='R52')
parser.add_argument('--window_size', type=str, default='3')
parser.add_argument('--max_len', type=str, default='350')
args, _ = parser.parse_known_args()
if not os.path.exists(args.pre_path):
os.makedirs(args.pre_path)
if not os.path.exists(os.path.join(args.pre_path, args.name)):
os.makedirs(os.path.join(args.pre_path, args.name))
process_partition('train', window_size=int(args.window_size), max_len=int(args.max_len))
process_partition('test', window_size=int(args.window_size), max_len=int(args.max_len))
print('----done----,\n'
' please check your `DATA_PRE/{}/train_cooc or test_cooc` directory;\n'
' the processed file is *** with \'s\' in dataframe format'.format(args.name))
|
qkrdmsghk/ssl | ssl_make_graphs/ConstructDatasetByDocs.py | import pandas as pd
import networkx as nx
import numpy as np
from scipy import sparse
import torch, sys
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_make_graphs')
sys.path.append(Your_path+'ssl_graphmodels')
from PairData import PairData
pd.set_option('display.max_columns', None)
import os.path as osp, os
from tqdm import tqdm
def combine_same_word_pair(df, col_name):
dfs = []
for w1, w1_df in df.groupby(by='word1'):
for w2, w2_df in w1_df.groupby(by='word2'):
freq_sum = w2_df['freq'].sum() / len(w2_df)
dfs.append([w2_df['word1'].values[0], w2_df['word2'].values[0], freq_sum])
dfs = pd.DataFrame(dfs, columns=['word1', 'word2', col_name])
return dfs
def graph_to_torch_sparse_tensor(G_true, edge_attr=None, node_attr=None):
G = nx.convert_node_labels_to_integers(G_true, label_attribute='word_name')
A_G = np.array(nx.adjacency_matrix(G).todense())
sparse_mx = sparse.csr_matrix(A_G).tocoo()
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
edge_attrs = []
if edge_attr != None:
for i in range(sparse_mx.row.shape[0]):
edge_attrs.append(G.edges[sparse_mx.row[i], sparse_mx.col[i]][edge_attr])
edge_index = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.long))
edge_attrs = torch.from_numpy(np.array(edge_attrs)).to(torch.float32)
x = []
batch_n = []
pos_n = []
for node in range(len(G)):
# word_name = G.nodes[node]['word_name']
x.append(G.nodes[node]['node_emb'])
if node_attr != None:
for attr in node_attr:
if attr == 'paragraph_id':
batch_n.append(G.nodes[node][attr])
elif attr == 'node_pos':
pos_n.append(G.nodes[node][attr])
else:
print('sth wrong with edge attribute')
x = np.array(x)
if len(x.shape) != 2:
print(x.shape)
x = torch.from_numpy(np.array(x)).to(torch.float32)
batch_n = torch.from_numpy(np.array(batch_n)).to(torch.long)
pos_n = torch.from_numpy(np.array(pos_n)).to(torch.long)
return edge_index, edge_attrs, x, batch_n, pos_n
def set_word_id_to_node(G, dictionary, node_emb, word_embeddings):
for node in G:
if node in dictionary:
ind = np.array([dictionary.index(node)])
emb = np.array(word_embeddings[node]) if node in word_embeddings else np.random.uniform(-0.01, 0.01, 300)
emb = np.concatenate([ind, emb]).reshape(301)
G.nodes[node][node_emb] = emb
else:
print('no!!')
assert node in dictionary
return G
class ConstructDatasetByDocs():
def __init__(self, pre_path, split, dictionary, pt):
self.pre_path = pre_path
self.split = split
self.dictionary = dictionary
self.pt = pt
super(ConstructDatasetByDocs).__init__()
self.all_cats = []
self.word_embeddings = {}
if pt == "":
print('importing glove.6B.300d pretrained word representation...')
with open(Your_path+'ssl_graphmodels/config/glove.6B.300d.txt', 'r') as f:
for line in f.readlines():
data = line.split()
self.word_embeddings[str(data[0])] = list(map(float, data[1:]))
else:
print('pre trained is not available!')
def generate_doc_graph(self, df):
# print('\nraw---df: ', len(df))
result_df = combine_same_word_pair(df, col_name='global_freq')
result_df['edge_attr'] = 1
result_graph = nx.from_pandas_edgelist(result_df, 'word1', 'word2', 'edge_attr')
return result_graph
def construct_datalist(self):
Data_list = []
cooc_path = osp.join(self.pre_path, self.split+'_cooc')
for y_id, y in enumerate(os.listdir(cooc_path)):
patients = os.listdir(os.path.join(cooc_path, y))
for patient in tqdm(patients, desc='Iterating over patients in {}_{}_cooc'.format(y, self.split)):
p_df = pd.read_csv(osp.join(cooc_path, y, patient), sep='\t', header=0)
G_p = self.generate_doc_graph(p_df)
G_p = set_word_id_to_node(G_p, self.dictionary, node_emb='node_emb', word_embeddings=self.word_embeddings)
edge_index_p, edge_attrs_p, x_p, _, _ = graph_to_torch_sparse_tensor(G_p, edge_attr='edge_attr')
y_p = torch.from_numpy(np.array([y_id])).to(torch.long)
G_n_list = []
y_n_list = []
for n_id, n_df in p_df.groupby(by='paragraph_id'):
n_df = n_df.dropna(axis=0)
G_n = nx.from_pandas_edgelist(n_df, 'word1', 'word2', ['freq'])
attrs = {}
for node in G_n:
attrs[node] = {'node_pos': list(G_p.nodes).index(node), 'paragraph_id': n_id}
nx.set_node_attributes(G_n, attrs)
G_n = set_word_id_to_node(G_n, self.dictionary, 'node_emb', self.word_embeddings)
G_n_list.append(G_n)
y_n_list.append([n_id])
G_n = nx.disjoint_union_all(G_n_list)
edge_index_n, _, x_n, batch_n, pos_n = graph_to_torch_sparse_tensor(G_n, node_attr=['paragraph_id', 'node_pos'])
y_n = torch.from_numpy(np.array(y_n_list)).to(torch.long)
# print(x_n, edge_index_n, y_n, batch_n)
# print(x_p, edge_index_p, y_p, edge_attrs_p)
data = PairData(x_n, edge_index_n, y_n, batch_n, pos_n, x_p, edge_index_p, y_p, edge_attrs_p)
assert data.x_p.squeeze().max().item() < len(self.dictionary)
Data_list.append(data)
return Data_list
if __name__ == '__main__':
pass
|
qkrdmsghk/ssl | ssl_graphmodels/pyg_models/train_docs.py | import sys, os
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_graphmodels')
import random
from config.conf import arg_config
from utils.LoadData import LoadDocsData
from models_docs import DocNet
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
use_gpu = torch.cuda.is_available()
def train(loader, training=True):
total_loss = 0
if training:
model.train()
else:
model.eval()
for index, data in enumerate(loader):
if training:
optimizer.zero_grad()
data = data.to(args.device)
out_p, kl_term = model(data)
y_p = data.y_p
loss_p = multi_crit(out_p, y_p)
total_loss += loss_p.item() + (kl_term.item())
if training:
loss_p.backward()
optimizer.step()
total_loss = total_loss / len(loader)
torch.cuda.empty_cache()
return total_loss
def test(loader):
model.eval()
nb_correct, nb_total = 0, 0
labels = []
preds = []
for data in loader:
data = data.to(args.device)
with torch.no_grad():
pred, _ = model(data)
y = data.y_p.data
nb_correct += (pred.data.argmax(dim=-1) == y).sum().item()
nb_total += len(y)
labels.append(y)
preds.append(pred.data)
labels = torch.cat(labels).detach().cpu().numpy()
preds = torch.cat(preds).detach().cpu().numpy()
return nb_correct / nb_total, labels, preds
def train_main(train_loader, val_loader, test_loader, patience):
min_val_loss = 20
max_val_acc = 0
step = 0
best_epoch = 0
best_model = ''
best_test_results = 0
all_results = []
with tqdm(total=args.epoch, bar_format='{desc}{n_fmt}/{total_fmt} |{bar}|{postfix}', ncols=80) as t:
for epoch in range(args.epoch):
t.set_description(desc='train and validate')
train_loss = train(train_loader, training=True)
val_loss = train(val_loader, training=False)
val_results, _, _ = test(val_loader)
if val_results >= max_val_acc:
step = 0
test_results, labels, preds = test(test_loader)
max_val_acc = val_results
best_model = model.state_dict()
best_epoch = epoch
best_test_results = test_results
best_preds = preds
elif val_results < max_val_acc:
step += 1
if step > patience and patience != -1:
break
all_results.append([epoch, train_loss, val_loss, val_results, test_results])
t.set_postfix_str('val_loss={:^7.3f};val_acc={:^7.3f};test_acc={:^7.3f}'.format(val_loss, val_results, test_results))
t.update()
del train_loader, test_loader, val_loader, val_loss, train_loss
return best_model, best_epoch, best_test_results, all_results, best_preds, labels
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
args = arg_config()
SEED = args.seed
torch.set_num_threads(1)
if torch.cuda.is_available() and int(args.gpu) >= 0:
args.device = torch.device('cuda:'+ args.gpu)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
setup_seed(SEED)
else:
args.device = torch.device('cpu')
loader = LoadDocsData(name=args.name, type=args.type)
result_dfs = []
methods = args.methods
train_loader, val_loader, test_loader, num_class = loader.get_train_test(batch_size=args.batch_size, seed=SEED, tr_split=args.tr_split)
params = vars(args)
params['input_dim'] = 300
params['output_dim'] = num_class
params['seed'] = SEED
print(pd.DataFrame.from_dict(params, orient='index'))
model = DocNet(params=params).to(params['device'])
optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
multi_crit = torch.nn.CrossEntropyLoss()
best_model, best_epoch, test_results, all_results, best_preds, labels = train_main(train_loader, val_loader, test_loader, params['patience'])
# del best_model, best_epoch, test_results, model |
qkrdmsghk/ssl | ssl_make_graphs/document_utils.py | <reponame>qkrdmsghk/ssl
import pandas as pd
import torch
import numpy as np
class COOC():
def __init__(self, window_size):
# self.item_lists = item_lists
self.window_size = window_size
super(COOC, self).__init__()
self.item_pair_set = {}
self.windows = 0
def make_item_pair(self, item_set):
item_pair_set = []
for i in range(1, len(item_set)):
for j in range(0, i):
if item_set[i] != item_set[j]:
item_pair = (item_set[i], item_set[j])
item_pair_ = (item_set[j], item_set[i])
item_pair_set.append(item_pair)
item_pair_set.append(item_pair_)
else:
continue
return item_pair_set
def sliding_find_cooc(self, item_list):
# for item_list in self.item_lists:
if len(item_list) <= self.window_size:
sub_pair_set = self.make_item_pair(item_list)
for pair in sub_pair_set:
if pair not in self.item_pair_set:
self.item_pair_set[pair] = 1
else:
self.item_pair_set[pair] += 1
else:
for i in range(len(item_list)-self.window_size+1):
sub_list = item_list[i: i+self.window_size]
sub_pair_set = self.make_item_pair(sub_list)
for pair in sub_pair_set:
if pair not in self.item_pair_set:
self.item_pair_set[pair] = 1
else:
self.item_pair_set[pair] += 1
self.windows += len(item_list)-self.window_size+1
return self.item_pair_set, self.windows
def dict2df(self, all_cooc_pairs, windows):
if len(all_cooc_pairs) > 0:
df = pd.DataFrame.from_dict(all_cooc_pairs, orient='index').reset_index()
df = df.sort_values(by=0, ascending=False).reset_index(drop=True)
df['word1'] = df['index'].apply(lambda x: x[0])
df['word2'] = df['index'].apply(lambda x: x[1])
df = df.iloc[:, 1:]
df.columns = ['freq', 'word1', 'word2']
df['freq'] = df['freq'] / windows
return df
else:
# print('no pairs')
return ''
def document_cooc(doc, window_size, MAX_TRUNC_LEN=350):
par_id = 0
dfs = []
doc_len = 0
flag = False
for line in doc.readlines():
line = line.split()
if MAX_TRUNC_LEN != None and doc_len+len(line) >= MAX_TRUNC_LEN:
break
doc_len += len(line)
cooc = COOC(window_size=window_size)
item_pair_set, windows = cooc.sliding_find_cooc(line)
if len(set(item_pair_set)) > 1:
ddf = cooc.dict2df(item_pair_set, windows)
ddf['paragraph_id'] = par_id
par_id += 1
dfs.append(ddf)
assert doc_len <= MAX_TRUNC_LEN
if len(dfs)>0:
dfs = pd.concat(dfs)
else:
dfs = ''
return dfs
def document_cooc_bert(doc, tokenizer, model, window_size, MAX_TRUNC_LEN=400):
par_id = 0
dfs = []
embs = []
doc_len = 0
flag = False
for line in doc.readlines():
# line = line.split()
# raw_len = len(line.split())
tokens = tokenizer.tokenize(line)
# print(len(line)-raw_len)
if MAX_TRUNC_LEN != None and doc_len+len(tokens) >= MAX_TRUNC_LEN:
break
doc_len += len(tokens)
cooc = COOC(window_size=window_size)
item_pair_set, windows = cooc.sliding_find_cooc(tokens)
if len(set(item_pair_set)) > 1:
ddf = cooc.dict2df(item_pair_set, windows)
ddf['paragraph_id'] = par_id
dfs.append(ddf)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
encoded_layers, _ = model(torch.tensor([indexed_tokens]))
emb = pd.DataFrame(np.array(encoded_layers[-1].squeeze(0).data))
emb['tokens'] = tokens
emb_ = []
for t, tdf in emb.groupby('tokens'):
if len(tdf) > 1:
tdf = tdf.iloc[:, :-1].sum()
# todo!! one word!!
tdf['tokens'] = t
tdf = pd.DataFrame(tdf).transpose()
assert len(tdf) ==1
emb_.append(tdf)
emb_ = pd.concat(emb_)
# print('concat #{} same words'.format(emb.shape[0]-emb_.shape[0]))
assert ddf['word1'].unique().shape[0] == emb_.shape[0]
emb_['paragraph_id'] = par_id
embs.append(emb_)
par_id += 1
assert doc_len <= MAX_TRUNC_LEN
if len(dfs)>0:
dfs = pd.concat(dfs)
embs = pd.concat(embs)
else:
dfs = ''
embs = ''
return dfs, embs
if __name__ == '__main__':
pass
|
qkrdmsghk/ssl | ssl_make_graphs/PygDocsGraphDataset.py | from torch_geometric.data import InMemoryDataset
import os, sys
import os.path as osp
import torch
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_make_graphs')
from ConstructDatasetByDocs import *
import argparse
class PygDocsGraphDataset(InMemoryDataset):
def __init__(self, name, split, dic, pt, transform=None, pre_transform=None):
imdb_path = Your_path+'re-extract_data/IMDB'
pre_path = Your_path+'re-extract_data/DATA_PRE'
if pt == '':
self.imdb_path = osp.join(imdb_path, name)
else:
self.imdb_path = osp.join(imdb_path, name+'_'+pt)
self.split = split
self.dic = dic
self.pt = pt
self.pre_path = osp.join(pre_path, name)
super(PygDocsGraphDataset, self).__init__(self.imdb_path, transform, pre_transform)
self.data, self.slices = torch.load(osp.join(self.processed_dir, f'{self.split}.pt'))
@property
def raw_file_names(self):
return []
@property
def processed_dir(self):
return osp.join(self.imdb_path)
@property
def processed_file_names(self):
return [f'{self.split}.pt']
def process(self):
# construct graph by note list.
if self.pt == "":
cdbn = ConstructDatasetByDocs(pre_path=self.pre_path, split=self.split, dictionary=self.dic, pt=self.pt)
data_list = cdbn.construct_datalist()
else:
cdbn = ConstructDatasetByDocs(pre_path=self.pre_path, split=self.split, dictionary=self.dic, pt=self.pt)
data_list = cdbn.construct_datalist_bert()
data, slices = self.collate(data_list)
torch.save((data, slices), osp.join(self.processed_dir, f'{self.split}.pt'))
print('Saving...')
def get_max_len(self, split):
slices = torch.load(osp.join(self.processed_dir, f'{split}.pt'))
return max([(slices['x_p'][i + 1] - slices['x_p'][i]).item() for i in range(len(slices['x_p']) - 1)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Construct In-Memory Datasets")
parser.add_argument('--name', type=str, default='R52')
parser.add_argument('--raw_path', type=str, default=Your_path+'re-extract_data/DATA_RAW')
parser.add_argument('--imdb_path', type=str, default=Your_path+'re-extract_data/IMDB')
args, _ = parser.parse_known_args()
if not os.path.exists(args.imdb_path):
os.makedirs(args.imdb_path)
vocab = open(os.path.join(args.raw_path, args.name, args.name+'_vocab.txt')).read().split()
test_set = PygDocsGraphDataset(name=args.name, split='test', dic=vocab, pt='') |
qkrdmsghk/ssl | ssl_graphmodels/pyg_models/layers_docs.py | <filename>ssl_graphmodels/pyg_models/layers_docs.py
import torch
from torch_geometric.nn.inits import glorot, zeros, reset
from torch.nn import Parameter
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool
import sys
sys.path.append('../ssl_graphmodels')
from utils.SSL_GCN import SSL_GCN
from torch_geometric.utils import softmax, add_remaining_self_loops
class StructureLearinng(torch.nn.Module):
def __init__(self, input_dim, sparse, emb_type='1_hop', threshold=0.5, temperature=0.5):
super(StructureLearinng, self).__init__()
self.att = Parameter(torch.Tensor(1, input_dim * 2))
glorot(self.att.data)
self.emb_layer = int(emb_type.split('_')[0]) - 1
if self.emb_layer > 0:
self.gnns = torch.nn.ModuleList()
for i in range(self.emb_layer):
self.gnns.append(SSL_GCN(input_dim, input_dim))
self.threshold = threshold
self.temperature = temperature
self.sparse = sparse
def forward(self, x, edge_index, edge_weight, edge_mask, layer):
raw_edges = edge_weight[edge_weight==1].shape[0]
_, edge_mask = add_remaining_self_loops(edge_index, edge_mask, -1, x.size(0))
edge_index, edge_weight = add_remaining_self_loops(edge_index, edge_weight, 0, x.size(0))
'''
embedding node using emb_type
'''
if self.emb_layer > 0:
for i in range(self.emb_layer):
x = torch.nn.functional.relu(self.gnns[i](x, edge_index[:, edge_weight!=0], edge_weight[edge_weight!=0]))
weights = (torch.cat([x[edge_index[0]], x[edge_index[1]]], 1) * self.att).sum(-1)
weights = torch.nn.functional.leaky_relu(weights) + edge_weight
row, col = edge_index
col, col_id = col.sort()
weights = weights[col_id]
row = row[col_id]
edge_index = torch.stack([row, col])
edge_weight = edge_weight[col_id]
edge_mask = edge_mask[col_id]
if self.sparse == 'soft':
edge_weight = softmax(weights, col)
elif self.sparse == 'hard':
weights = softmax(weights, col)
sample_prob = torch.distributions.relaxed_bernoulli.RelaxedBernoulli(self.temperature, probs=weights)
y = sample_prob.rsample()
y_soft = y
y_hard = (y>self.threshold).to(y.dtype)
y = (y_hard - y).detach() + y
intra_edges = y[edge_weight == 1]
inter_edges = y[edge_weight == 0]
edge_weight[edge_weight==0] = inter_edges
edge_mask[(edge_mask==0) & (y==1)] = layer+1
intra_soft_edge = y_soft[edge_mask==-1]
else:
print('sparse operation is not found...')
assert edge_index.size(1) == edge_weight.size(0)
torch.cuda.empty_cache()
return edge_index, edge_weight, y_soft, edge_mask, intra_soft_edge
class GRAPHLayer(torch.nn.Module):
def __init__(self, input_dim, output_dim, func, dropout=0, act=torch.nn.ReLU(), bias=True, num_layer=2, threshold=0.5, temperature=0.5):
super(GRAPHLayer, self).__init__()
self.dropout = dropout
self.act = act
self.num_layer = num_layer
self.func = func
self.threshold = threshold
self.temperature = temperature
self.weight = Parameter(torch.Tensor(input_dim, output_dim))
if bias:
self.bias = Parameter(torch.Tensor(output_dim))
else:
self.register_parameter('bias', None)
self.convs = torch.nn.ModuleList()
self.sls = torch.nn.ModuleList()
for _ in range(self.num_layer):
if 'lin' in self.func:
self.Lin = True
else:
self.Lin = False
self.convs.append(SSL_GCN(output_dim, output_dim, Lin=self.Lin))
if 'attn' in self.func:
sparse = ''
if 'soft' in self.func:
sparse = 'soft'
elif 'gumbel' in self.func:
sparse = 'hard'
self.sls.append(StructureLearinng(output_dim, sparse, threshold=self.threshold, temperature=self.temperature))
torch.cuda.empty_cache()
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, input, **kwargs):
x = input
batch = kwargs['batch']
edge_index = kwargs['edge_index']
x = torch.nn.functional.dropout(x, self.dropout, training=self.training)
x = self.act(torch.matmul(x, self.weight) + self.bias)
x = torch.nn.functional.dropout(x, self.dropout, training=self.training)
if 'attn' in self.func:
edge_mask = kwargs['edge_mask']
# regard self as intra nodes! --> -1
edge_weight = torch.ones((edge_mask.size(0), ), dtype=torch.float, device=edge_mask.device)
edge_weight[edge_mask!=-1] = 0
_, edge_mask = add_remaining_self_loops(edge_index, edge_mask, -1, x.size(0))
edge_index, edge_weight = add_remaining_self_loops(edge_index, edge_weight, 0, x.size(0))
raw_edge_intra_weight = torch.ones((edge_weight[edge_mask==-1].size(0), ), dtype=edge_weight.dtype, device=edge_mask.device)
raw_edge_intra_weight = softmax(raw_edge_intra_weight, edge_index[0][edge_mask==-1])
raw_size = edge_weight[edge_weight!=0].size(0)
self.kl_terms = []
soft_weights = []
inter_edge_indexs = []
edge_masks = []
for i in range(self.num_layer):
x = self.act(self.convs[i](x, edge_index[:, edge_weight!=0], edge_weight=edge_weight[edge_weight!=0]))
if i != self.num_layer-1:
edge_index, edge_weight, soft_weight, edge_mask, intra_soft_edge = self.sls[i](x, edge_index, edge_weight, edge_mask, layer=i)
soft_weights.append(soft_weight[edge_mask==i+1])
inter_edge_indexs.append(edge_index[:, edge_mask==i+1])
edge_masks.append(edge_mask[edge_mask==i+1])
if 'reg' in self.func:
assert intra_soft_edge.size() == raw_edge_intra_weight.size()
log_p = torch.log(raw_edge_intra_weight + 1e-12)
log_q = torch.log(intra_soft_edge+ 1e-12)
self.kl_terms.append(torch.mean(raw_edge_intra_weight * (log_p - log_q)))
if 'explain' in kwargs:
soft_weights = torch.cat(soft_weights)
inter_edge_indexs = torch.cat(inter_edge_indexs, 1)
edge_masks = torch.cat(edge_masks)
assert inter_edge_indexs.shape[1] == soft_weights.shape[0] == edge_masks.shape[0]
exp_dict = {'edge_index': inter_edge_indexs, 'edge_weight': soft_weights, 'edge_mask': edge_masks}
return x, exp_dict
else:
return x
else:
# gnn
for i in range(self.num_layer):
x = self.act(self.convs[i](x, edge_index))
return x
class DENSELayer(torch.nn.Module):
def __init__(self, input_dim, output_dim, dropout=0, act=torch.nn.ReLU(), bias=False):
super(DENSELayer, self).__init__()
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.Tensor(input_dim, output_dim))
if bias:
self.bias = Parameter(torch.Tensor(output_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, input, **kwargs):
x = input
# dropout
x = torch.nn.functional.dropout(x, 0.7, training=self.training)
# dense encode
x = self.act(torch.matmul(x, self.weight) + self.bias)
return x
class READOUTLayer(torch.nn.Module):
def __init__(self, input_dim, output_dim, func, aggr, dropout=0, act=torch.nn.ReLU()):
super(READOUTLayer, self).__init__()
self.func = func
self.dropout = dropout
self.act = act
self.aggr = aggr
self.emb_weight = Parameter(torch.Tensor(input_dim, input_dim))
self.emb_bias = Parameter(torch.Tensor(input_dim))
self.mlp_weight = Parameter(torch.Tensor(input_dim, output_dim))
self.mlp_bias = Parameter(torch.Tensor(output_dim))
self.reset_parameters()
def reset_parameters(self):
glorot(self.emb_weight)
glorot(self.mlp_weight)
zeros(self.emb_bias)
zeros(self.mlp_bias)
def global_pooling(self, x, batch):
x_emb = self.act(torch.matmul(x, self.emb_weight)+self.emb_bias)
# global pooling
if self.aggr == 'sum':
x = global_add_pool(x_emb, batch)
elif self.aggr == 'mean':
x = global_mean_pool(x_emb, batch)
elif self.aggr == 'max':
x = global_max_pool(x_emb, batch)
return x
def forward(self, input=None, **kwargs):
x = input
batch = kwargs['batch']
x = self.global_pooling(x, batch)
x = torch.nn.functional.dropout(x, self.dropout, training=self.training)
x = torch.matmul(x, self.mlp_weight) + self.mlp_bias
return x
|
qkrdmsghk/ssl | ssl_graphmodels/pyg_models/models_docs.py | import sys
Your_path = '/data/project/yinhuapark/ssl/'
sys.path.append(Your_path+'ssl_graphmodels')
import torch
from layers_docs import READOUTLayer, DENSELayer, GRAPHLayer
use_gpu = torch.cuda.is_available()
class DocNet(torch.nn.Module):
def __init__(self, params):
super(DocNet, self).__init__()
self.layers = torch.nn.ModuleList()
self.params = params
self.func = params['methods']
print('building {} model...'.format(self.func))
self.type = params['type']
getattr(self, 'gnn_build')()
def gnn_build(self):
self.layers.append(GRAPHLayer(self.params['input_dim'],
self.params['hidden_dim'],
self.func,
dropout=self.params['dropout'],
act=torch.nn.ReLU(),
bias=True,
num_layer=self.params['num_layer'],
temperature=self.params['temperature'],
threshold=self.params['threshold']
))
self.layers.append(READOUTLayer(self.params['hidden_dim'],
self.params['output_dim'],
func=self.func,
dropout=self.params['dropout'],
act=torch.nn.ReLU(),
aggr=self.params['aggregate']))
def mlp_build(self):
self.layers.append(DENSELayer(self.params['input_dim'],
self.params['hidden_dim'],
dropout=self.params['dropout'],
act=torch.nn.ReLU(),
bias=True
))
self.layers.append(READOUTLayer(self.params['hidden_dim'],
self.params['output_dim'],
func=self.func,
dropout=self.params['dropout'],
act=torch.nn.ReLU(),
aggr=self.params['aggregate']
))
def forward(self, data, explain=False):
if explain:
exp_dict = None
hidden_emb = None
if self.func == 'gnn_note':
output = data.x_n
if explain:
hidden_emb = self.layers[0](data.x_n, edge_index=data.edge_index_n, batch=data.x_n_batch)
for layer in self.layers:
output = layer(output, edge_index=data.edge_index_n, batch=data.x_n_batch, batch_n=data.batch_n)
elif 'gnn_note_attn' in self.func:
if 'inter' in self.type:
if explain:
hidden_emb, exp_dict = self.layers[0](data.x_n, edge_index=data.edge_index_n, batch=data.x_n_batch, edge_mask=data.edge_mask, explain=explain)
output = data.x_n
for layer in self.layers:
output = layer(output, edge_index=data.edge_index_n, batch=data.x_n_batch, edge_mask=data.edge_mask)
else:
print('wrong type, please use type=inter_all')
else:
output = data.x_p
if explain:
hidden_emb = self.layers[0](data.x_p, edge_index=data.edge_index_p, batch=data.x_p_batch)
for layer in self.layers:
output = layer(output, edge_index=data.edge_index_p, batch=data.x_p_batch)
if 'reg' in self.func:
kl_term = torch.mean(torch.Tensor(self.layers[0].kl_terms))
else:
kl_term = torch.zeros(1)
if explain:
return output, hidden_emb, exp_dict
else:
return output, kl_term
|
Mrprogrammernobrainz/duplicatefunc | main (5).py | def duplicate(iterat, word):
ans = ''
if iterat == 0:
return ''
for i in range(len(word)):
if word[i] != ' ' and word[i] != ',' and word[i] != '!' and word[i] != '?' and word[i] != '(' and word[i] != ')' and word[i] != ";" and word[i] != ":" and word[i] != '[' and word[i] != ']' and word[i] != '{' and word[i] != '}' and word[i] != '-':
ans = ans + word[i] * iterat
else:
ans = ans + word[i]
return ans
a = int(input())
b = input()
print(duplicate(a, b)) |
CristianContrera95/strict_df | src/strictdf/utils/dtypes.py | <gh_stars>0
"""
Author: <NAME> <<EMAIL>>
Date: 10/12/2020
License: MIT
"""
import re
__str_check_functions = {
# check boolean in this forms: [0, 1, .0, .1, 0., 1., true, false, t, f]
'bool': re.compile('^([.]?0|[.]?1|true|false|f|t){1}[.]?[0]*$'),
# check int in this forms: [11, -1, 1., -2., .000]
'int': re.compile('^[-]?([0-9]+[.]?0*|[.]0+)$'),
# check floats in this forms: [-11.12, -1.2, .1, -.2]
'float': re.compile('^[-]?([0-9]+[.][0-9]*|[0-9]*[.][0-9]+)$'),
}
def str_check_bool(string: str = ''):
return __str_check_functions['bool'].match(string.lower())
def str_check_int(string: str = ''):
return __str_check_functions['int'].match(string)
def str_check_float(string: str = ''):
return __str_check_functions['float'].match(string)
|
CristianContrera95/strict_df | src/strictdf/utils/dataset.py | <gh_stars>0
"""
Author: <NAME> <<EMAIL>>
Date: 10/12/2020
License: MIT
"""
import requests
import pandas as pd
from pathlib import Path
import os
def load_credit_data():
"""
Download credit_data file from S3 and load it with pandas in DataFrame
:return: pd.DataFrame
"""
local_file = Path.home() / '.strict_df' / 'datasets' / 'credit_date.csv'
if local_file.exists():
return pd.read_csv(str(local_file))
else:
os.makedirs(local_file.parent)
url = 'https://s3-us-west-2.amazonaws.com/fligoo.data-science/TechInterviews/StrictDF/data/credit-data.csv'
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_file, 'wb') as fp:
for chunk in r.iter_content(chunk_size=8192):
fp.write(chunk)
return pd.read_csv(str(local_file))
|
CristianContrera95/strict_df | src/strictdf/__init__.py | """
Author: <NAME> <<EMAIL>>
Date: 10/12/2020
License: MIT
"""
from .StrictDataFrame import StrictDataFrame
try:
import findspark
findspark.init()
except:
pass
|
CristianContrera95/strict_df | src/strictdf/StrictDataFrame.py | """
StrictDataFrame
---------------
A pd.DataFrame wrapper that provides utilities to handle in a "strict" DataFrames way with respect to the data schema
"""
import pandas as pd
import numpy as np
from copy import copy
from typing import Union, List
from .utils.dtypes import str_check_bool, str_check_int, str_check_float
from pyspark.sql import SparkSession
class StrictDataFrame():
"""
StrictDataFrame is a pd.DataFrame wrapper that provides utilities to handle in a "strict" DataFrames
way with respect to the data schema, so it imposes a suitable data type for each of its columns
Parameters
----------
df : pandas DataFrame
Contains data stored in DataFrame.
min_percentage : int or float optional (default=.90)
Values must be between 0 and 100 or 0. and 1. (if float if given)
Used to determine the type of a column, requires 'min_percentage' of rows to be 1 expected type
impute : bool optional (default=False)
binary2bool : bool optional (default=False)
Attributes
----------
old_df : pd.DataFrame
Original pd.DataFrame given
new_df : pd.DataFrame
Modified pd.DataFrame with "strict" types for each column
impute : bool (default=False)
Impute values that are being removed to not meet expected data types.
For columns float 'mean' is used, for integer 'mode' is used
binary2bool : bool (default=False)
Convert all columns with only 2 values to boolean
skip_col : list or array-like
Column names to be omitted
Examples
--------
from strictdf import StrictDataFrame
import pandas as pd
df = pd.read_csv("data/credit-data.csv")
sdf = StrictDataFrame(df)
Notes
-----
- NaNs values are dropped before data types inference.
"""
def __init__(self, df, min_percentage: Union[int, float] = 90, impute: bool = False,
binary2bool: bool = False, skip_col: Union[List, np.array] = None):
assert isinstance(df, pd.DataFrame), '"df" param must be a pd.DataFrame'
assert isinstance(min_percentage, float) or \
isinstance(min_percentage, int), '"min_percentage" param must be float or int'
self.impute = impute
self.binary2bool = binary2bool
self.skip_col = [] if skip_col is None else skip_col
self.min_percentage = min_percentage
if isinstance(self.min_percentage, float):
self.min_percentage *= 100
self.old_df = copy(df)
if not df.empty:
self.new_df = self.__strict_df(copy(self.old_df))
else:
self.new_df = copy(df)
@property
def dtypes(self) -> dict:
# change type object to str (why? there's doesn't why..)
return {a: (b.name if b.name != 'object' else 'str')
for a, b in self.new_df.dtypes.items()}
def __strict_df(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Function to infer the types of each column
"""
df.dropna(inplace=True)
if self.binary2bool:
def __binary2bool(col):
return (col - col.unique().max()).astype(bool) if len(col.unique()) == 2 else col
df = df.apply(__binary2bool)
cols_mask = df.columns.difference(self.skip_col)
columns = df[cols_mask].select_dtypes(exclude=['bool']).columns
# Drop all rows with unexpected value for given column
unexpected_mask = np.ones(len(df), dtype=bool)
for col in columns:
_mask = df[col].map(lambda x: bool(str_check_bool(str(x))) or
bool(str_check_int(str(x))) or bool(str_check_float(str(x)))
).values
if sum(~_mask) / len(_mask) * 100 < (100 - self.min_percentage):
# Columns with more than '1-min_percentage' values
unexpected_mask &= _mask
df.drop(df[~unexpected_mask].index, inplace=True)
# over each expected column infer dtype and cast it
for col in columns:
type_mask = df[col].map(lambda x: bool(str_check_bool(str(x))))
if all(type_mask):
# all values are booleans
df[col] = df[col].astype(bool)
else:
type_mask = df[col].map(lambda x: bool(str_check_int(str(x))))
if sum(type_mask) / len(type_mask) * 100 >= self.min_percentage:
# min_percentage values are int
df = self.__handle_unexpected_values(df, type_mask, col, 'int')
df.loc[:, col] = df[col].astype(float).astype(np.int64)
else:
type_mask = df[col].map(lambda x: bool(str_check_float(str(x))))
if sum(type_mask) / len(type_mask) * 100 >= self.min_percentage:
# min_percentage values are float
df = self.__handle_unexpected_values(df, type_mask, col, 'float')
df.loc[:, col] = df[col].astype(float)
return df
def __handle_unexpected_values(self, df, type_mask, col, dtype):
if self.impute:
return self.__impute_values(df, type_mask, col, dtype)
return df.drop(df[~type_mask].index)
def __impute_values(self, df, type_mask, col, dtype):
if dtype == 'int':
df.loc[df[~type_mask].index, col] = df[col].mode()
if dtype == 'float':
df.loc[df[~type_mask].index, col] = df[col].mean()
return df
def report(self) -> str:
"""
Returns shape of new and old DataFrames
"""
rows_diff = self.old_df.shape[0] - self.new_df.shape[0]
text = f"DataFrame having shape '{self.new_df.shape}' ({rows_diff} rows removed from original)"
return text
def to_spark(self):
try:
spark = SparkSession.builder.getOrCreate()
return spark.createDataFrame(self.new_df)
except:
'Spark not installed.'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.