repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
dongmengshi/easylearn | eslearn/GUI/lc_ui2py_for_main.py | import os
cmd_str = r'pyuic5 -o easylearn_main_gui.py D:\My_Codes\easylearn-fmri\eslearn\GUI\easylearn_main_gui.ui'
os.system(cmd_str)
|
dongmengshi/easylearn | eslearn/utils/lc_transform1D.py | <reponame>dongmengshi/easylearn<filename>eslearn/utils/lc_transform1D.py
# utf-8
"""
read .1D file
"""
import os
import pandas as pd
import numpy as np
from lc_read_write_Mat import write_mat
def read_1D(file_path):
"""
Transform the .1D file to .mat file
TODO: to other file type
Args:
file_path: .1D file path
Return:
data: .mat file
"""
data = pd.read_csv(file_path)
data = data.values
data = [list(d)[0].split('\t') for d in data]
data =[np.array(d, dtype=np.float).T for d in data]
data = pd.DataFrame(data)
data = pd.DataFrame(data)
return data
if __name__ == '__main__':
dir = r'F:\Data\ASD\Outputs\dparsf\filt_global\rois_dosenbach160'
save_path = r'F:\Data\ASD\Outputs\dparsf\filt_global\rois_dosenbach160_m'
file_name = os.listdir(dir)
file_path = [os.path.join(dir, fn) for fn in file_name]
nfile = len(file_path)
for i, file in enumerate(zip(file_path, file_name)):
print(f'{i+1}/{nfile}')
data = read_1D(file[0])
fn = os.path.join(save_path, file[1].split('.')[0] + '.mat')
write_mat(fileName=fn, dataset_name='timeseries', dataset=data.values)
|
dongmengshi/easylearn | eslearn/GUI/easylearn_logger.py | <gh_stars>0
import logging
from logging import handlers
def easylearn_logger(out_name=None, debug_message="", info_message="", warning_message="", error_message="", critical_message=""):
#初始化logger
logger = logging.getLogger()
#设置日志记录级别
logger.setLevel(logging.INFO)
#fmt设置日志输出格式,datefmt设置 asctime 的时间格式
formatter = logging.Formatter(fmt='[%(asctime)s]%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
#配置日志输出到控制台
# console = logging.StreamHandler()
# console.setLevel(logging.WARNING)
# console.setFormatter(formatter)
# logger.addHandler(console)
#配置日志输出到文件
file_logging = logging.FileHandler(out_name)
file_logging.setLevel(logging.INFO)
file_logging.setFormatter(formatter)
logger.addHandler(file_logging)
#配置日志输出到文件,限制单个日志文件的最大体积
# file_rotating_file = handlers.RotatingFileHandler('app_rotating.log', maxBytes=1024, backupCount=3)
# file_rotating_file.setLevel(logging.WARNING)
# file_rotating_file.setFormatter(formatter)
# logger.addHandler(file_rotating_file)
#配置日志输出到文件,在固定的时间内记录日志文件
# file_time_rotating = handlers.TimedRotatingFileHandler("app_time.log", when="s", interval=10, backupCount=5)
# file_time_rotating.setLevel(logging.INFO)
# file_time_rotating.setFormatter(formatter)
# logger.addHandler(file_time_rotating)
#use
logger.debug(debug_message)
logger.info(info_message)
logger.warning(warning_message)
logger.error(error_message)
logger.critical(critical_message)
if __name__ == '__main__':
easylearn_logger() |
dongmengshi/easylearn | eslearn/visualization/lc_boxplot.py | # -*- coding: utf-8 -*-
"""
箱图
当我们的数据是num_subj*num_var,且有几个诊断组时,我们一般希望把var name作为x,把var value作为y,把诊断组作为hue
来做箱图,以便于观察每个var的组间差异。
此时,用于sns的特殊性,我们要将数据变换未长列的形式。
行数目为:num_subj*num_var。列数目=3,分别是hue,x以及y
input:
data_path=r'D:\others\彦鸽姐\final_data.xlsx'
x_location=np.arange(5,13,1)#筛选数据的列位置
未来改进:封装为类,增加可移植性
@author: lenovo
"""
#==========================================================
# 载入绘图模块
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
#==========================================================
class BoxPlot():
# initial parameters
def __init__(self,
data_path=r'D:\others\彦鸽姐\final_data.xlsx',
x_location=np.arange(5,13,1),
x_name='脑区',
y_name='reho',
hue_name='分组',
hue_order=[2,1],
if_save_figure=0,
figure_name='violin.tiff'):
#======================================
self.data_path=data_path
self.x_location=x_location
self.x_name=x_name
self.y_name=y_name
self.hue_name=hue_name
self.hue_order=hue_order
self.if_save_figure=if_save_figure
self.figure_name=figure_name
#====================================================
def data_preparation(self):
# load data
df = pd.read_excel(self.data_path,index=False)
# 筛选数据
df_selected=df.iloc[:,self.x_location]
# 将'[]'去除
df_selected = pd.DataFrame(df_selected, dtype=np.str)
df_selected=df_selected.mask(df_selected =='[]', None, inplace=False)
df_selected=df_selected.dropna()
col_to_float=list(set(list(df_selected.columns))-set([self.hue_name]))
df_selected[col_to_float] = pd.DataFrame(df_selected[col_to_float], dtype=np.float32)
# a=pd.Series(df_selected['HAMD']).str.contains('\d',regex=False)
#把需要呈现的数据concat到一列
n_subj,n_col=df_selected.shape
df_long=pd.DataFrame([])
for nc in range(n_col):
df_long=pd.concat([df_long,df_selected.iloc[:,nc]])
# 整理columns
col_name=list(df_selected.columns)
col_name_long=[pd.DataFrame([name]*n_subj) for name in col_name]
col_name_long=pd.concat(col_name_long)
#整理分组标签
group=pd.DataFrame([])
for i in range(n_col):
group=pd.concat([group,df[self.hue_name].loc[df_selected.index]])
#整合
col_name_long.index=df_long.index # 解决index不统一问题
self.data=pd.concat([group,col_name_long,df_long],axis=1)
# 加列名
self.data.columns=[self.hue_name, self.x_name, self.y_name]
return self
#=========================================================================
def plot(self):
# plot
self.f,self.ax= plt.subplots()
# box框架
self.data=self.data_preparation().data
self.ax=sns.boxplot(x=self.x_name,
y=self.y_name,
hue=self.hue_name,
order=None,
hue_order=self.hue_order,
data=self.data,
palette="Set1",
saturation=0.7,
width=0.5,
fliersize=2,
whis=None,
notch=False,
dodge=True)
#设置网格
# plt.grid(axis="y",ls='--',c='g')
# 设置label,以及方位
label_x = self.ax.get_xticklabels()
label_y = self.ax.get_yticklabels()
plt.setp(label_x, size=15,rotation=0, horizontalalignment='right')
plt.setp(label_y, size=15,rotation=0, horizontalalignment='right')
# save figure
if self.if_save_figure:
self.f.savefig(self.figure_name, dpi=300, bbox_inches='tight')
return self
if __name__ == '__main__':
sel = BoxPlot(data_path=r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\results_cluster\results_of_individual\temploral_properties.xlsx',
x_location=np.arange(1, 3, 1),
hue_name='group',
hue_order=[1, 3, 2, 4])
df =sel.data_preparation()
sel.plot()
plt.savefig('MDT.tif',dpi=600)
|
dongmengshi/easylearn | eslearn/utils/lc_pydicom.py | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 19:02:42 2019
@author: lenovo
"""
import pydicom
import nibabel
import SimpleITK as sitk
def read_nii(filename):
nii = nibabel.nifti1.load(filename)
dataset = nii.get_data()
return dataset
def nii2dcm(dataset, filename):
"""
write dicom filename
filename = 'test.dcm'
"""
# TODO transfer array to dicom directly
pydicom.write_file(filename, dataset)
if __name__ == '__main__':
filename = r"D:\dms\13567701_CHENSHUANG_R03509555\13567701_CHENSHUANG_R03509555_S1\1_2_392_200036_9116_2_5_1_37_2420762357_1456450872_373480_S1_I1.dcm"
dataset = pydicom.dcmread(filename, force=True)
val = dataset.data_element('Columns').value
image = sitk.ReadImage(filename)
image_array = sitk.GetArrayFromImage(image)
reader = sitk.ImageFileReader()
reader.SetFileName(filename)
reader.LoadPrivateTagsOn()
reader.ReadImageInformation()
reader.GetMetaData('0008|0008')
reader.GetMetaDataKeys()
nii = read_nii(filename)
|
dongmengshi/easylearn | eslearn/SSD_classification/Data_Inspection/lc_anova_scip.py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 28 20:06:03 2019
@author: <NAME>
Email: <EMAIL>
"""
from scipy import stats
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.stats.multicomp import pairwise_tukeyhsd
import warnings
import pandas as pd
warnings.filterwarnings("ignore")
import itertools
df2=pd.DataFrame()
df2['group']=list(itertools.repeat(-1.,9))+ list(itertools.repeat(0.,9))+list(itertools.repeat(1.,9))
df2['noise_A']=0.0
for i in data['A'].unique():
df2.loc[df2['group']==i,'noise_A']=data.loc[data['A']==i,['1','2','3']].values.flatten()
df2['noise_B']=0.0
for i in data['B'].unique():
df2.loc[df2['group']==i,'noise_B']=data.loc[data['B']==i,['1','2','3']].values.flatten()
df2['noise_C']=0.0
for i in data['C'].unique():
df2.loc[df2['group']==i,'noise_C']=data.loc[data['C']==i,['1','2','3']].values.flatten()
df2 |
dongmengshi/easylearn | eslearn/utils/lc_scaler.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 14:33:00 2018
@author: lenovo
"""
# import
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
# def
def scaler(X, method):
if method == 'StandardScaler':
model = StandardScaler()
stdsc_x = model.fit_transform(X)
return stdsc_x, model
elif method == 'MinMaxScaler':
model = MinMaxScaler()
mima_x = model.fit_transform(X)
return mima_x, model
# origin_data = model.inverse_transform(mm_data)
else:
print(f'Please specify the standardization method!')
return
def scaler_apply(train_x, test_x, scale_method):
"""
Apply model to test data
"""
train_x, model = scaler(train_x, scale_method)
test_x = model.transform(test_x)
return train_x, test_x
|
dongmengshi/easylearn | eslearn/utils/lc_delect_sensitive_info.py | <filename>eslearn/utils/lc_delect_sensitive_info.py
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 10:03:49 2018
删除scale中敏感信息
@author: lenovo
"""
import pandas as pd
import numpy as np
d = pd.read_excel(
r'D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\修改表.xlsx')
col = list(d.columns)
del_ind = set(np.array([1, 2, 3, 25, 26, 36, 37, 50, 87, 88]))
all_ind = set(np.arange(0, len(col), 1))
rest_ind = all_ind - del_ind
dd = d.iloc[:, list(rest_ind)]
col_rest = list(dd.columns)
dd.to_excel('no_sensitive_scale.xlsx', index=False)
|
dongmengshi/easylearn | eslearn/GUI/easylearn_main_gui.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\My_Codes\easylearn-fmri\eslearn\GUI\easylearn_main_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(439, 703)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(300, 400))
MainWindow.setMaximumSize(QtCore.QSize(100000, 100000))
MainWindow.setMouseTracking(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setAcceptDrops(False)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.logo = QtWidgets.QLabel(self.centralwidget)
self.logo.setMinimumSize(QtCore.QSize(0, 100))
self.logo.setText("")
self.logo.setObjectName("logo")
self.gridLayout.addWidget(self.logo, 0, 0, 1, 1)
self.data_loading = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.data_loading.sizePolicy().hasHeightForWidth())
self.data_loading.setSizePolicy(sizePolicy)
self.data_loading.setStyleSheet("")
self.data_loading.setIconSize(QtCore.QSize(30, 30))
self.data_loading.setObjectName("data_loading")
self.gridLayout.addWidget(self.data_loading, 1, 0, 1, 1)
self.feature_engineering = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.feature_engineering.sizePolicy().hasHeightForWidth())
self.feature_engineering.setSizePolicy(sizePolicy)
self.feature_engineering.setIconSize(QtCore.QSize(30, 30))
self.feature_engineering.setObjectName("feature_engineering")
self.gridLayout.addWidget(self.feature_engineering, 2, 0, 1, 1)
self.machine_learning = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.machine_learning.sizePolicy().hasHeightForWidth())
self.machine_learning.setSizePolicy(sizePolicy)
self.machine_learning.setIconSize(QtCore.QSize(30, 30))
self.machine_learning.setObjectName("machine_learning")
self.gridLayout.addWidget(self.machine_learning, 3, 0, 1, 1)
self.model_evaluation = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.model_evaluation.sizePolicy().hasHeightForWidth())
self.model_evaluation.setSizePolicy(sizePolicy)
self.model_evaluation.setObjectName("model_evaluation")
self.gridLayout.addWidget(self.model_evaluation, 4, 0, 1, 1)
self.statistical_analysis = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statistical_analysis.sizePolicy().hasHeightForWidth())
self.statistical_analysis.setSizePolicy(sizePolicy)
self.statistical_analysis.setIconSize(QtCore.QSize(30, 30))
self.statistical_analysis.setObjectName("statistical_analysis")
self.gridLayout.addWidget(self.statistical_analysis, 5, 0, 1, 1)
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setMinimumSize(QtCore.QSize(0, 20))
self.textBrowser.setMaximumSize(QtCore.QSize(10000000, 100))
self.textBrowser.setMidLineWidth(30)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 6, 0, 1, 1)
self.save_load = QtWidgets.QHBoxLayout()
self.save_load.setObjectName("save_load")
self.quit = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.quit.sizePolicy().hasHeightForWidth())
self.quit.setSizePolicy(sizePolicy)
self.quit.setObjectName("quit")
self.save_load.addWidget(self.quit)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.save_load.addItem(spacerItem)
self.run = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.run.sizePolicy().hasHeightForWidth())
self.run.setSizePolicy(sizePolicy)
self.run.setIconSize(QtCore.QSize(30, 30))
self.run.setObjectName("run")
self.save_load.addWidget(self.run)
self.gridLayout.addLayout(self.save_load, 7, 0, 1, 1)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.gridLayout.addWidget(self.progressBar, 8, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 439, 26))
self.menubar.setObjectName("menubar")
self.menueasylearn = QtWidgets.QMenu(self.menubar)
self.menueasylearn.setObjectName("menueasylearn")
self.menuHelp_H = QtWidgets.QMenu(self.menubar)
self.menuHelp_H.setObjectName("menuHelp_H")
self.menuSkin = QtWidgets.QMenu(self.menubar)
self.menuSkin.setObjectName("menuSkin")
MainWindow.setMenuBar(self.menubar)
self.current_working_directory = QtWidgets.QAction(MainWindow)
self.current_working_directory.setObjectName("current_working_directory")
self.select_working_directory = QtWidgets.QAction(MainWindow)
self.select_working_directory.setObjectName("select_working_directory")
self.create_configuration_file = QtWidgets.QAction(MainWindow)
self.create_configuration_file.setObjectName("create_configuration_file")
self.choose_configuration_file = QtWidgets.QAction(MainWindow)
self.choose_configuration_file.setObjectName("choose_configuration_file")
self.actionDark = QtWidgets.QAction(MainWindow)
self.actionDark.setObjectName("actionDark")
self.actionBlack = QtWidgets.QAction(MainWindow)
self.actionBlack.setObjectName("actionBlack")
self.actionDarkOrange = QtWidgets.QAction(MainWindow)
self.actionDarkOrange.setObjectName("actionDarkOrange")
self.actionGray = QtWidgets.QAction(MainWindow)
self.actionGray.setObjectName("actionGray")
self.actionBlue = QtWidgets.QAction(MainWindow)
self.actionBlue.setObjectName("actionBlue")
self.actionNavy = QtWidgets.QAction(MainWindow)
self.actionNavy.setObjectName("actionNavy")
self.actionClassic = QtWidgets.QAction(MainWindow)
self.actionClassic.setObjectName("actionClassic")
self.actionLight = QtWidgets.QAction(MainWindow)
self.actionLight.setObjectName("actionLight")
self.menueasylearn.addSeparator()
self.menueasylearn.addAction(self.select_working_directory)
self.menueasylearn.addAction(self.create_configuration_file)
self.menueasylearn.addAction(self.choose_configuration_file)
self.menuSkin.addAction(self.actionDark)
self.menuSkin.addAction(self.actionBlack)
self.menuSkin.addAction(self.actionDarkOrange)
self.menuSkin.addAction(self.actionGray)
self.menuSkin.addAction(self.actionBlue)
self.menuSkin.addAction(self.actionNavy)
self.menuSkin.addAction(self.actionClassic)
self.menuSkin.addAction(self.actionLight)
self.menubar.addAction(self.menueasylearn.menuAction())
self.menubar.addAction(self.menuHelp_H.menuAction())
self.menubar.addAction(self.menuSkin.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.data_loading.setText(_translate("MainWindow", "Data Loading"))
self.feature_engineering.setText(_translate("MainWindow", "Feature Engineering"))
self.machine_learning.setText(_translate("MainWindow", "Machine Learning"))
self.model_evaluation.setText(_translate("MainWindow", "Model Evaluation"))
self.statistical_analysis.setText(_translate("MainWindow", "Statistical Analysis"))
self.quit.setText(_translate("MainWindow", "Quit"))
self.run.setText(_translate("MainWindow", "Run"))
self.menueasylearn.setTitle(_translate("MainWindow", "Project initialization(&I)"))
self.menuHelp_H.setTitle(_translate("MainWindow", "Help(&H)"))
self.menuSkin.setTitle(_translate("MainWindow", "Skin"))
self.current_working_directory.setText(_translate("MainWindow", "Current working directory"))
self.select_working_directory.setText(_translate("MainWindow", "Select working directory"))
self.create_configuration_file.setText(_translate("MainWindow", "Create configuration file"))
self.choose_configuration_file.setText(_translate("MainWindow", "Load configuration file"))
self.actionDark.setText(_translate("MainWindow", "Dark"))
self.actionBlack.setText(_translate("MainWindow", "Black"))
self.actionDarkOrange.setText(_translate("MainWindow", "DarkOrange"))
self.actionGray.setText(_translate("MainWindow", "Gray"))
self.actionBlue.setText(_translate("MainWindow", "Blue"))
self.actionNavy.setText(_translate("MainWindow", "Navy"))
self.actionClassic.setText(_translate("MainWindow", "Classic"))
self.actionLight.setText(_translate("MainWindow", "Light"))
|
dongmengshi/easylearn | eslearn/feature_engineering/feature_selection/el_relieff.py | # -*- coding: utf-8 -*-
"""
Created on 2020/03/14
Feature selection: Relief-based feature selection algorithm.
feature_train, label_train, feature_test, n_features_to_select
------
@author: <NAME>
"""
import numpy as np
from skrebate import ReliefF
def relief(feature_train, label_train, feature_test, n_features_to_select=None):
"""
This functio is used to select the features using relief-based feature selection algorithms.
Parameters
----------
feature_train: numpy.array
features of the training set. Dimension is
label_train: numpy.array
labels of the training set
feature_test: numpy.array
features of the test set
n_features_to_select: numpy.array
Path to save results
Returns
-------
Save all classification results and figures to local disk.
"""
[n_sub, n_features] = np.shape(feature_train)
if n_features_to_select is None:
n_features_to_select = np.int(np.round(n_features / 10))
if isinstance(n_features_to_select, np.float):
n_features_to_select = np.int(np.round(n_features * n_features_to_select))
fs = ReliefF(n_features_to_select=n_features_to_select,
n_neighbors=100, discrete_threshold=10, verbose=True, n_jobs=-1)
fs.fit(feature_train, label_train)
feature_train = fs.transform(feature_train)
feature_test = fs.transform(feature_test)
mask_selected = fs.top_features_[:n_features_to_select]
return feature_train, feature_test, mask_selected, n_features
|
dongmengshi/easylearn | setup.py | #-*- coding:utf-8 -*-
"""
Created on 2020/03/03
------
@author: <NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME>
Email: <EMAIL>; <EMAIL>; <EMAIL>;
<EMAIL>; <EMAIL>; <EMAIL>; <EMAIL>.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='easylearn',
version='0.1.12.alpha',
description=(
'This project is designed for machine learning in resting-state fMRI field'
),
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>; <NAME>; <NAME>; <NAME>; <NAME>; <NAME>',
maintainer_email='<EMAIL>',
license='MIT License',
packages=find_packages(),
platforms=["all"],
url='https://github.com/easylearn-fmri/',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Natural Language :: English',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
python_requires='>=3.5',
install_requires=[
'joblib',
'numpy',
'pandas',
'python-dateutil',
'pytz',
'scikit-learn',
'scipy',
'six',
'nibabel',
'imbalanced-learn',
'skrebate',
'matplotlib',
],
) |
dongmengshi/easylearn | eslearn/utils/lc_add_dcminfo_to_foldername_radiomics.py | <reponame>dongmengshi/easylearn<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 22:30:29 2019
读取dicom文件的信息,并在文件夹名字中添加dicom信息【ID_name_oldname】
@author: <NAME>
"""
import os
import pydicom
class ChangeFolderName():
"""change subjects' folder name"""
def __init__(self):
self.path = r'J:\lymph nodes\356_patients_data'
def fetch_all_subj_folder_path(self):
"""读取所有被试文件夹的路径"""
all_subj_folder_path = os.listdir(self.path)
self.all_subj_folder_path = [
os.path.join(
self.path,
path) for path in all_subj_folder_path]
print("### 读取了所有被试文件夹路径 ###\n")
return self
def fetch_all_subj_dicom_info(self):
"""
Get all subject's dicom information
"""
all_sample_dicom = [os.listdir(path)[0]
for path in self.all_subj_folder_path]
all_sample_dicom = [
os.path.join(
path, dicom) for path, dicom in zip(
self.all_subj_folder_path, all_sample_dicom)]
# read dicom info
print("### Reading dicom info... ###\n")
self.dicom_info = [
pydicom.read_file(
sample_dicom,
force=True) for sample_dicom in all_sample_dicom]
self.patient_ID = [info.PatientID for info in self.dicom_info]
self.patient_name = [
info.PatientName.components for info in self.dicom_info]
print("### dicom info read completed! ###\n")
return self
def re_name(self):
"""修改文件夹名字"""
my_count = 1
num_subj = len(self.patient_ID)
for ID, name, folder_path in zip(
self.patient_ID, self.patient_name, self.all_subj_folder_path):
print("changing the subject: {}/{}\n".format(my_count, num_subj))
my_count = my_count + 1
old_name = folder_path
new_name = "".join(
[ID, "_", name[0], "_", os.path.basename(folder_path)])
new_name = os.path.join(self.path, new_name)
# 如果old_name 中有ID则不重命名
if ID in old_name:
print("{}中以及存在ID号,不需修改".format(old_name))
continue
else:
os.rename(old_name, new_name)
print("### 修改了所有被试文件夹名字 ###\n")
if __name__ == "__main__":
read = ChangeFolderName()
results = read.fetch_all_subj_folder_path()
results = read.fetch_all_subj_dicom_info()
read.re_name()
|
dongmengshi/easylearn | eslearn/utils/multiprocessing_test.py | <filename>eslearn/utils/multiprocessing_test.py
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 22:41:05 2018
@author: lenovo
"""
from concurrent.futures import ThreadPoolExecutor
import time
# 参数times用来模拟网络请求的时间
def get_html(times):
time.sleep(times)
print("get page {}s finished\n".format(times))
return times
with ThreadPoolExecutor(2) as executor:
#executor = ThreadPoolExecutor(max_workers=2)
# 通过submit函数提交执行的函数到线程池中,submit函数立即返回,不阻塞
task1 = executor.submit(get_html, (0.5))
task2 = executor.submit(get_html, (0.5))
|
dongmengshi/easylearn | eslearn/SSD_classification/Data_Inspection/lc_preprocess_for_206.py | """
This script is used to transform the 206 dataset into .npy format.
1.Transform the .mat files to one .npy file
2. Give labels to each subject, concatenate at the first column
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import numpy as np
import pandas as pd
import os
from eslearn.utils.lc_read_write_Mat import read_mat
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC_206' # all mat files directory
scale = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx' # whole scale path
n_node = 246 # number of nodes in the mat network
# Transform the .mat files to one .npy file
allmatpath = os.listdir(matroot)
allmatpath = [os.path.join(matroot, matpath) for matpath in allmatpath]
mask = np.triu(np.ones(n_node),1)==1
allmat = [read_mat(matpath)[mask].T for matpath in allmatpath]
allmat = pd.DataFrame(np.float32(allmat))
# Give uid and labels to each subject, concatenate at the first column
uid = [os.path.basename(matpath) for matpath in allmatpath]
uid = pd.Series(uid)
uid = uid.str.findall('(NC.*[0-9]\d*|SZ.*[0-9]\d*)')
uid = [str(id[0]) for id in uid]
uid = pd.DataFrame([''.join(id.split('_')) for id in uid])
scale = pd.read_excel(scale)
selected_diagnosis = pd.merge(uid, scale, left_on=0, right_on='ID', how='inner')[['ID','group']]
selected_diagnosis['group'][selected_diagnosis['group']==2] = 0
allmat_plus_label = pd.concat([selected_diagnosis, allmat],axis=1)
allmat_plus_label['ID'] = allmat_plus_label['ID'].str.replace('NC','10');
allmat_plus_label['ID'] = allmat_plus_label['ID'].str.replace('SZ','20');
allmat_plus_label['ID'] = np.int32(allmat_plus_label['ID'])
np.save(r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_206.npy',allmat_plus_label)
#%% Extract covariances: age and sex
cov = pd.merge(uid, scale, left_on=0, right_on='ID', how='inner')[['ID','group', 'age', 'sex']]
cov[['ID', 'group']] = allmat_plus_label[['ID', 'group']]
cov.columns = ['folder', 'diagnosis', 'age', 'sex']
cov.to_csv(r'D:\WorkStation_2018\SZ_classification\Scale\cov_206.txt', index=False)
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_svc_oneVsRest.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 14:56:39 2018
multi-class classfication using one vs rest
In practice, one-vs-rest classification is usually preferred,
since the results are mostly similar,
but the runtime is significantly less.
@author: lenovo
"""
from sklearn import datasets
from sklearn.svm import LinearSVC
import numpy as np
#
X,y=datasets.make_classification(n_samples=1000, n_features=200, n_informative=2, n_redundant=2,
n_repeated=0, n_classes=3, n_clusters_per_class=1, weights=None,
flip_y=0.01, class_sep=1.0, hypercube=True,shift=0.0, scale=1.0,
shuffle=True, random_state=None)
#
def oneVsRest(X,y):
lin_clf = LinearSVC()
lin_clf.fit(X, y)
predict=lin_clf.predict(X)
dec = lin_clf.decision_function(X)
return predict,dec
if __name__=='__main__':
predict,dec=oneVsRest(X,y) |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_svc_rfe_cv.py | <filename>eslearn/machine_learning/classfication/lc_svc_rfe_cv.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 10:52:26 2018
rfe-svm-CV
input:
k=3:k-fold
step=0.1:rfe step
num_jobs=1: parallel
scale_method='StandardScaler':standardization method
pca_n_component=0.9
permutation=0
@author: lenovo
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\Machine_learning (Python)\Machine_learning\classfication')
from lc_featureSelection_rfe import rfeCV
import lc_dimreduction as dimreduction
import lc_scaler as scl
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
class svc_rfe_cv():
# initial parameters
def __init__(self,
k=3,
step=0.1,
num_jobs=1,
scale_method='StandardScaler',
pca_n_component=0.8,
permutation=0,
show_results=1,
show_roc=0):
self.k=k
self.step=step
self.num_jobs=num_jobs
self.scale_method=scale_method
self.pca_n_component=pca_n_component
self.permutation=permutation
self.show_results=show_results
self.show_roc=show_roc
#
def main_svc_rfe_cv(self,x,y):
#
print('\n'+'#'*10+' Running... '+'#'*10+'\n')
index_train,index_test=self.fetch_kFold_Index_for_allLabel(x,y,self.k)
predict=pd.DataFrame([])
dec=pd.DataFrame([])
y_real_sorted=pd.DataFrame([])
y=np.reshape(y,[len(y),])
for i in range(self.k):
print('{}/{}\n'.format(i+1,self.k))
# split
x_train,y_train=x[index_train[i]],y[index_train[i]]
X_test,y_test=x[index_test[i]],y[index_test[i]]
y_real_sorted=pd.concat([y_real_sorted,pd.DataFrame(y_test)])
# scale
x_train,X_test=self.scaler(x_train,X_test,self.scale_method)
# pca
x_train,X_test,trained_pca=self.dimReduction(x_train,X_test,self.pca_n_component)
# train
model,weight=self.training(x_train,y_train,\
step=self.step, cv=self.k,n_jobs=self.num_jobs,\
permutation=self.permutation)
# fetch orignal weight
weight=trained_pca.inverse_transform(weight)
# test
prd,de=self.testing(model,X_test)
prd=pd.DataFrame(prd)
de=pd.DataFrame(de)
predict=pd.concat([predict,prd])
dec=pd.concat([dec,de])
# 打印并显示模型性能
if self.show_results:
self.evalPrformance(dec,predict,y_real_sorted)
return predict,dec,y_real_sorted,weight
def splitData_kFold_oneLabel(self,x,y,k):
kf = KFold(n_splits=k)
x_train, X_test=[],[]
y_test=[]
for train_index, test_index in kf.split(x):
x_train.append(x[train_index]), X_test.append( x[test_index])
y_test.append(y[test_index])
return x_train, X_test,y_test
def fetch_kFold_Index_for_allLabel(self,x,y,k):
#分别从每个label对应的数据中,进行kFole选择,
#然后把某个fold的数据组合成一个大的fold数据
uni_y=np.unique(y)
loc_uni_y=[np.argwhere(y==uni) for uni in uni_y]
#
train_index,test_index=[],[]
for y_ in loc_uni_y:
tr_index,te_index=self.fetch_kFold_Index_for_oneLabel(y_,k)
train_index.append(tr_index)
test_index.append(te_index)
#
indexTr_fold=[]
indexTe_fold=[]
for k_ in range(k):
indTr_fold=np.array([])
indTe_fold=np.array([])
for y_ in range(len(uni_y)):
indTr_fold=np.append(indTr_fold,train_index[y_][k_])
indTe_fold=np.append(indTe_fold,test_index[y_][k_])
indexTr_fold.append(indTr_fold)
indexTe_fold.append(indTe_fold)
index_train,index_test=[],[]
for I in indexTr_fold:
index_train.append([int(i) for i in I ])
for I in indexTe_fold:
index_test.append([int(i) for i in I])
return index_train,index_test
#
def fetch_kFold_Index_for_oneLabel(self,originLable,k):
kf=KFold(n_splits=k)
train_index,test_index=[],[]
for tr_index,te_index in kf.split(originLable):
train_index.append(originLable[tr_index]), \
test_index.append(originLable[te_index])
return train_index,test_index
def scaler(self,train_X,test_X,scale_method):
train_X,model=scl.scaler(train_X,scale_method)
test_X=model.transform(test_X)
return train_X,test_X
def dimReduction(self,train_X,test_X,pca_n_component):
train_X,trained_pca=dimreduction.pca(train_X,pca_n_component)
test_X=trained_pca.transform(test_X)
return train_X,test_X,trained_pca
def training(self,x,y,\
step, cv,n_jobs,permutation):
# refCV
model,weight=rfeCV(x,y,step, cv,n_jobs,permutation)
return model,weight
def testing(self,model,test_X):
predict=model.predict(test_X)
dec=model.decision_function(test_X)
return predict,dec
def evalPrformance(self,dec,predict,y_real_sorted):
# accurcay, specificity(recall of negative) and sensitivity(recall of positive)
accuracy= accuracy_score (y_real_sorted.values,predict.values)
report=classification_report(y_real_sorted.values,predict.values)
report=report.split('\n')
specificity=report[2].strip().split(' ')
sensitivity=report[3].strip().split(' ')
specificity=float([spe for spe in specificity if spe!=''][2])
sensitivity=float([sen for sen in sensitivity if sen!=''][2])
# confusion matrix
confusion=confusion_matrix(y_real_sorted.values,predict.values)
# roc and auc
fpr, tpr, thresh = roc_curve(y_real_sorted.values,dec.values)
auc=roc_auc_score(y_real_sorted.values,dec.values)
# print performances
# print('混淆矩阵为:\n{}'.format(confusion))
print('\naccuracy={:.2f}\n'.format(accuracy))
print('sensitivity={:.2f}\n'.format(sensitivity))
print('specificity={:.2f}\n'.format(specificity))
print('AUC={:.2f}\n'.format(auc))
if self.show_roc:
plt.figure(figsize=(5, 5))
plt.title('ROC Curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid(True)
plt.plot(fpr, tpr,'-')
# plt.savefig('roc.png')
return accuracy,sensitivity,specificity,auc,confusion
#
if __name__=='__main__':
from sklearn import datasets
import lc_svc_rfe_cv as lsvc
x,y=datasets.make_classification(n_samples=500, n_features=500,random_state=1)
sel=lsvc.svc_rfe_cv(k=5)
predict,dec,y_real_sorted,weight=sel.main_svc_rfe_cv(x,y) |
dongmengshi/easylearn | eslearn/GUI/easylearn_feature_engineering_gui.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:/My_Codes/easylearn-fmri/eslearn/GUI/easylearn_feature_engineering_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(892, 727)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(500, 0))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_32 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_32.setObjectName("gridLayout_32")
self.tabWidget_items = QtWidgets.QTabWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget_items.sizePolicy().hasHeightForWidth())
self.tabWidget_items.setSizePolicy(sizePolicy)
self.tabWidget_items.setMinimumSize(QtCore.QSize(100, 100))
self.tabWidget_items.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget_items.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tabWidget_items.setObjectName("tabWidget_items")
self.tabWidget_itemsPage1 = QtWidgets.QWidget()
self.tabWidget_itemsPage1.setObjectName("tabWidget_itemsPage1")
self.gridLayout = QtWidgets.QGridLayout(self.tabWidget_itemsPage1)
self.gridLayout.setObjectName("gridLayout")
self.groupBox_preprocessing_setting = QtWidgets.QGroupBox(self.tabWidget_itemsPage1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_preprocessing_setting.sizePolicy().hasHeightForWidth())
self.groupBox_preprocessing_setting.setSizePolicy(sizePolicy)
self.groupBox_preprocessing_setting.setMinimumSize(QtCore.QSize(300, 80))
self.groupBox_preprocessing_setting.setMaximumSize(QtCore.QSize(10000, 16777215))
self.groupBox_preprocessing_setting.setObjectName("groupBox_preprocessing_setting")
self.gridLayout_26 = QtWidgets.QGridLayout(self.groupBox_preprocessing_setting)
self.gridLayout_26.setObjectName("gridLayout_26")
self.stackedWidget_preprocessing_methods = QtWidgets.QStackedWidget(self.groupBox_preprocessing_setting)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget_preprocessing_methods.sizePolicy().hasHeightForWidth())
self.stackedWidget_preprocessing_methods.setSizePolicy(sizePolicy)
self.stackedWidget_preprocessing_methods.setObjectName("stackedWidget_preprocessing_methods")
self.to_zscore = QtWidgets.QWidget()
self.to_zscore.setObjectName("to_zscore")
self.gridLayout_17 = QtWidgets.QGridLayout(self.to_zscore)
self.gridLayout_17.setObjectName("gridLayout_17")
self.label_zscore = QtWidgets.QLabel(self.to_zscore)
self.label_zscore.setText("")
self.label_zscore.setObjectName("label_zscore")
self.gridLayout_17.addWidget(self.label_zscore, 0, 0, 1, 1)
self.stackedWidget_preprocessing_methods.addWidget(self.to_zscore)
self.to_scaling = QtWidgets.QWidget()
self.to_scaling.setObjectName("to_scaling")
self.gridLayout_10 = QtWidgets.QGridLayout(self.to_scaling)
self.gridLayout_10.setObjectName("gridLayout_10")
self.label_scaling_max = QtWidgets.QLabel(self.to_scaling)
self.label_scaling_max.setObjectName("label_scaling_max")
self.gridLayout_10.addWidget(self.label_scaling_max, 0, 0, 1, 1)
self.lineEdit_scaling_min = QtWidgets.QLineEdit(self.to_scaling)
self.lineEdit_scaling_min.setObjectName("lineEdit_scaling_min")
self.gridLayout_10.addWidget(self.lineEdit_scaling_min, 1, 1, 1, 1)
self.label_scaling_min = QtWidgets.QLabel(self.to_scaling)
self.label_scaling_min.setObjectName("label_scaling_min")
self.gridLayout_10.addWidget(self.label_scaling_min, 1, 0, 1, 1)
self.lineEdit_scaling_max = QtWidgets.QLineEdit(self.to_scaling)
self.lineEdit_scaling_max.setObjectName("lineEdit_scaling_max")
self.gridLayout_10.addWidget(self.lineEdit_scaling_max, 0, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_10.addItem(spacerItem, 0, 2, 1, 1)
self.stackedWidget_preprocessing_methods.addWidget(self.to_scaling)
self.to_demean = QtWidgets.QWidget()
self.to_demean.setObjectName("to_demean")
self.stackedWidget_preprocessing_methods.addWidget(self.to_demean)
self.to_none = QtWidgets.QWidget()
self.to_none.setObjectName("to_none")
self.gridLayout_19 = QtWidgets.QGridLayout(self.to_none)
self.gridLayout_19.setObjectName("gridLayout_19")
self.label_none = QtWidgets.QLabel(self.to_none)
self.label_none.setText("")
self.label_none.setObjectName("label_none")
self.gridLayout_19.addWidget(self.label_none, 0, 0, 1, 1)
self.stackedWidget_preprocessing_methods.addWidget(self.to_none)
self.gridLayout_26.addWidget(self.stackedWidget_preprocessing_methods, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_preprocessing_setting, 0, 2, 1, 1)
self.groupBox_methods = QtWidgets.QGroupBox(self.tabWidget_itemsPage1)
self.groupBox_methods.setObjectName("groupBox_methods")
self.gridLayout_8 = QtWidgets.QGridLayout(self.groupBox_methods)
self.gridLayout_8.setObjectName("gridLayout_8")
self.radioButton_none_methods = QtWidgets.QRadioButton(self.groupBox_methods)
self.radioButton_none_methods.setObjectName("radioButton_none_methods")
self.gridLayout_8.addWidget(self.radioButton_none_methods, 3, 0, 1, 1)
self.radioButton_demean = QtWidgets.QRadioButton(self.groupBox_methods)
self.radioButton_demean.setObjectName("radioButton_demean")
self.gridLayout_8.addWidget(self.radioButton_demean, 2, 0, 1, 1)
self.radioButton_zscore = QtWidgets.QRadioButton(self.groupBox_methods)
self.radioButton_zscore.setObjectName("radioButton_zscore")
self.gridLayout_8.addWidget(self.radioButton_zscore, 0, 0, 1, 1)
self.radioButton_scaling = QtWidgets.QRadioButton(self.groupBox_methods)
self.radioButton_scaling.setObjectName("radioButton_scaling")
self.gridLayout_8.addWidget(self.radioButton_scaling, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_methods, 0, 0, 1, 1)
self.groupBox_level = QtWidgets.QGroupBox(self.tabWidget_itemsPage1)
self.groupBox_level.setObjectName("groupBox_level")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_level)
self.verticalLayout.setObjectName("verticalLayout")
self.radioButton_grouplevel = QtWidgets.QRadioButton(self.groupBox_level)
self.radioButton_grouplevel.setObjectName("radioButton_grouplevel")
self.verticalLayout.addWidget(self.radioButton_grouplevel)
self.radioButton_subjectlevel = QtWidgets.QRadioButton(self.groupBox_level)
self.radioButton_subjectlevel.setObjectName("radioButton_subjectlevel")
self.verticalLayout.addWidget(self.radioButton_subjectlevel)
self.gridLayout.addWidget(self.groupBox_level, 1, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 0, 1, 1, 1)
self.tabWidget_items.addTab(self.tabWidget_itemsPage1, "")
self.tabWidget_itemsPage2 = QtWidgets.QWidget()
self.tabWidget_itemsPage2.setObjectName("tabWidget_itemsPage2")
self.gridLayout_29 = QtWidgets.QGridLayout(self.tabWidget_itemsPage2)
self.gridLayout_29.setObjectName("gridLayout_29")
self.groupBox = QtWidgets.QGroupBox(self.tabWidget_itemsPage2)
self.groupBox.setObjectName("groupBox")
self.gridLayout_28 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_28.setObjectName("gridLayout_28")
self.radioButton_lda = QtWidgets.QRadioButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioButton_lda.sizePolicy().hasHeightForWidth())
self.radioButton_lda.setSizePolicy(sizePolicy)
self.radioButton_lda.setObjectName("radioButton_lda")
self.gridLayout_28.addWidget(self.radioButton_lda, 3, 0, 1, 1)
self.radioButton_ica = QtWidgets.QRadioButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioButton_ica.sizePolicy().hasHeightForWidth())
self.radioButton_ica.setSizePolicy(sizePolicy)
self.radioButton_ica.setObjectName("radioButton_ica")
self.gridLayout_28.addWidget(self.radioButton_ica, 1, 0, 1, 1)
self.radioButton_nmf = QtWidgets.QRadioButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioButton_nmf.sizePolicy().hasHeightForWidth())
self.radioButton_nmf.setSizePolicy(sizePolicy)
self.radioButton_nmf.setObjectName("radioButton_nmf")
self.gridLayout_28.addWidget(self.radioButton_nmf, 2, 0, 1, 1)
self.radioButton_pca = QtWidgets.QRadioButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioButton_pca.sizePolicy().hasHeightForWidth())
self.radioButton_pca.setSizePolicy(sizePolicy)
self.radioButton_pca.setObjectName("radioButton_pca")
self.gridLayout_28.addWidget(self.radioButton_pca, 0, 0, 1, 1)
self.radioButton_none = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_none.setObjectName("radioButton_none")
self.gridLayout_28.addWidget(self.radioButton_none, 4, 0, 1, 1)
self.gridLayout_29.addWidget(self.groupBox, 0, 0, 2, 1)
self.label = QtWidgets.QLabel(self.tabWidget_itemsPage2)
self.label.setText("")
self.label.setObjectName("label")
self.gridLayout_29.addWidget(self.label, 0, 2, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.tabWidget_itemsPage2)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.stackedWidget_dimreduction = QtWidgets.QStackedWidget(self.groupBox_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget_dimreduction.sizePolicy().hasHeightForWidth())
self.stackedWidget_dimreduction.setSizePolicy(sizePolicy)
self.stackedWidget_dimreduction.setObjectName("stackedWidget_dimreduction")
self.to_pca = QtWidgets.QWidget()
self.to_pca.setObjectName("to_pca")
self.gridLayout_11 = QtWidgets.QGridLayout(self.to_pca)
self.gridLayout_11.setObjectName("gridLayout_11")
self.label_n_components = QtWidgets.QLabel(self.to_pca)
self.label_n_components.setObjectName("label_n_components")
self.gridLayout_11.addWidget(self.label_n_components, 0, 0, 1, 1)
self.doubleSpinBox_pca_maxcomponents = QtWidgets.QDoubleSpinBox(self.to_pca)
self.doubleSpinBox_pca_maxcomponents.setMaximum(1.0)
self.doubleSpinBox_pca_maxcomponents.setSingleStep(0.1)
self.doubleSpinBox_pca_maxcomponents.setProperty("value", 0.99)
self.doubleSpinBox_pca_maxcomponents.setObjectName("doubleSpinBox_pca_maxcomponents")
self.gridLayout_11.addWidget(self.doubleSpinBox_pca_maxcomponents, 0, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_11.addItem(spacerItem2, 0, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.to_pca)
self.label_2.setObjectName("label_2")
self.gridLayout_11.addWidget(self.label_2, 1, 0, 1, 1)
self.doubleSpinBox_pca_mincomponents = QtWidgets.QDoubleSpinBox(self.to_pca)
self.doubleSpinBox_pca_mincomponents.setMaximum(1.0)
self.doubleSpinBox_pca_mincomponents.setSingleStep(0.1)
self.doubleSpinBox_pca_mincomponents.setProperty("value", 0.01)
self.doubleSpinBox_pca_mincomponents.setObjectName("doubleSpinBox_pca_mincomponents")
self.gridLayout_11.addWidget(self.doubleSpinBox_pca_mincomponents, 1, 1, 1, 1)
self.label_36 = QtWidgets.QLabel(self.to_pca)
self.label_36.setObjectName("label_36")
self.gridLayout_11.addWidget(self.label_36, 2, 0, 1, 1)
self.spinBox_pcanum = QtWidgets.QSpinBox(self.to_pca)
self.spinBox_pcanum.setMaximum(10000)
self.spinBox_pcanum.setSingleStep(5)
self.spinBox_pcanum.setProperty("value", 10)
self.spinBox_pcanum.setObjectName("spinBox_pcanum")
self.gridLayout_11.addWidget(self.spinBox_pcanum, 2, 1, 1, 1)
self.stackedWidget_dimreduction.addWidget(self.to_pca)
self.to_ica = QtWidgets.QWidget()
self.to_ica.setObjectName("to_ica")
self.gridLayout_6 = QtWidgets.QGridLayout(self.to_ica)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_n_ic = QtWidgets.QLabel(self.to_ica)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_n_ic.sizePolicy().hasHeightForWidth())
self.label_n_ic.setSizePolicy(sizePolicy)
self.label_n_ic.setObjectName("label_n_ic")
self.gridLayout_6.addWidget(self.label_n_ic, 0, 0, 1, 1)
self.label_37 = QtWidgets.QLabel(self.to_ica)
self.label_37.setObjectName("label_37")
self.gridLayout_6.addWidget(self.label_37, 2, 0, 1, 1)
self.spinBox_icnum = QtWidgets.QSpinBox(self.to_ica)
self.spinBox_icnum.setMaximum(10000)
self.spinBox_icnum.setSingleStep(5)
self.spinBox_icnum.setProperty("value", 10)
self.spinBox_icnum.setObjectName("spinBox_icnum")
self.gridLayout_6.addWidget(self.spinBox_icnum, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.to_ica)
self.label_9.setObjectName("label_9")
self.gridLayout_6.addWidget(self.label_9, 1, 0, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem3, 0, 2, 1, 1)
self.doubleSpinBox_ica_maxics = QtWidgets.QDoubleSpinBox(self.to_ica)
self.doubleSpinBox_ica_maxics.setMaximum(1.0)
self.doubleSpinBox_ica_maxics.setSingleStep(0.1)
self.doubleSpinBox_ica_maxics.setProperty("value", 0.99)
self.doubleSpinBox_ica_maxics.setObjectName("doubleSpinBox_ica_maxics")
self.gridLayout_6.addWidget(self.doubleSpinBox_ica_maxics, 0, 1, 1, 1)
self.doubleSpinBox_ica_minics = QtWidgets.QDoubleSpinBox(self.to_ica)
self.doubleSpinBox_ica_minics.setMaximum(1.0)
self.doubleSpinBox_ica_minics.setSingleStep(0.1)
self.doubleSpinBox_ica_minics.setProperty("value", 0.01)
self.doubleSpinBox_ica_minics.setObjectName("doubleSpinBox_ica_minics")
self.gridLayout_6.addWidget(self.doubleSpinBox_ica_minics, 1, 1, 1, 1)
self.stackedWidget_dimreduction.addWidget(self.to_ica)
self.to_lda = QtWidgets.QWidget()
self.to_lda.setObjectName("to_lda")
self.gridLayout_13 = QtWidgets.QGridLayout(self.to_lda)
self.gridLayout_13.setObjectName("gridLayout_13")
self.label_ida = QtWidgets.QLabel(self.to_lda)
self.label_ida.setObjectName("label_ida")
self.gridLayout_13.addWidget(self.label_ida, 0, 0, 1, 1)
self.lineEdit_ida = QtWidgets.QLineEdit(self.to_lda)
self.lineEdit_ida.setObjectName("lineEdit_ida")
self.gridLayout_13.addWidget(self.lineEdit_ida, 0, 1, 1, 1)
self.stackedWidget_dimreduction.addWidget(self.to_lda)
self.to_nmf = QtWidgets.QWidget()
self.to_nmf.setObjectName("to_nmf")
self.gridLayout_21 = QtWidgets.QGridLayout(self.to_nmf)
self.gridLayout_21.setObjectName("gridLayout_21")
self.label_label_nmf_mincompnents = QtWidgets.QLabel(self.to_nmf)
self.label_label_nmf_mincompnents.setObjectName("label_label_nmf_mincompnents")
self.gridLayout_21.addWidget(self.label_label_nmf_mincompnents, 1, 0, 1, 2)
self.spinBox_nmfnum = QtWidgets.QSpinBox(self.to_nmf)
self.spinBox_nmfnum.setMaximum(10000)
self.spinBox_nmfnum.setSingleStep(5)
self.spinBox_nmfnum.setProperty("value", 10)
self.spinBox_nmfnum.setObjectName("spinBox_nmfnum")
self.gridLayout_21.addWidget(self.spinBox_nmfnum, 3, 2, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_21.addItem(spacerItem4, 0, 3, 1, 1)
self.label_nmf_maxcompnents = QtWidgets.QLabel(self.to_nmf)
self.label_nmf_maxcompnents.setObjectName("label_nmf_maxcompnents")
self.gridLayout_21.addWidget(self.label_nmf_maxcompnents, 0, 0, 1, 2)
self.label_38 = QtWidgets.QLabel(self.to_nmf)
self.label_38.setObjectName("label_38")
self.gridLayout_21.addWidget(self.label_38, 3, 0, 1, 1)
self.label_nmf_init = QtWidgets.QLabel(self.to_nmf)
self.label_nmf_init.setObjectName("label_nmf_init")
self.gridLayout_21.addWidget(self.label_nmf_init, 4, 0, 1, 1)
self.comboBox_2 = QtWidgets.QComboBox(self.to_nmf)
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_2.addItem("")
self.gridLayout_21.addWidget(self.comboBox_2, 4, 2, 1, 1)
self.doubleSpinBox_nmf_maxcomponents = QtWidgets.QDoubleSpinBox(self.to_nmf)
self.doubleSpinBox_nmf_maxcomponents.setMaximum(1.0)
self.doubleSpinBox_nmf_maxcomponents.setSingleStep(0.1)
self.doubleSpinBox_nmf_maxcomponents.setProperty("value", 1.0)
self.doubleSpinBox_nmf_maxcomponents.setObjectName("doubleSpinBox_nmf_maxcomponents")
self.gridLayout_21.addWidget(self.doubleSpinBox_nmf_maxcomponents, 0, 2, 1, 1)
self.doubleSpinBox_nmf_mincompnents = QtWidgets.QDoubleSpinBox(self.to_nmf)
self.doubleSpinBox_nmf_mincompnents.setMaximum(1.0)
self.doubleSpinBox_nmf_mincompnents.setSingleStep(0.1)
self.doubleSpinBox_nmf_mincompnents.setProperty("value", 0.01)
self.doubleSpinBox_nmf_mincompnents.setObjectName("doubleSpinBox_nmf_mincompnents")
self.gridLayout_21.addWidget(self.doubleSpinBox_nmf_mincompnents, 1, 2, 1, 1)
self.stackedWidget_dimreduction.addWidget(self.to_nmf)
self.to_none_dimreduction = QtWidgets.QWidget()
self.to_none_dimreduction.setObjectName("to_none_dimreduction")
self.gridLayout_5 = QtWidgets.QGridLayout(self.to_none_dimreduction)
self.gridLayout_5.setObjectName("gridLayout_5")
self.stackedWidget_dimreduction.addWidget(self.to_none_dimreduction)
self.gridLayout_3.addWidget(self.stackedWidget_dimreduction, 0, 0, 1, 1)
self.gridLayout_29.addWidget(self.groupBox_2, 0, 1, 2, 1)
self.tabWidget_items.addTab(self.tabWidget_itemsPage2, "")
self.tabWidget_itemsPage3 = QtWidgets.QWidget()
self.tabWidget_itemsPage3.setObjectName("tabWidget_itemsPage3")
self.gridLayout_30 = QtWidgets.QGridLayout(self.tabWidget_itemsPage3)
self.gridLayout_30.setObjectName("gridLayout_30")
self.groupBox_3 = QtWidgets.QGroupBox(self.tabWidget_itemsPage3)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_filter = QtWidgets.QLabel(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_filter.sizePolicy().hasHeightForWidth())
self.label_filter.setSizePolicy(sizePolicy)
self.label_filter.setObjectName("label_filter")
self.gridLayout_4.addWidget(self.label_filter, 0, 0, 1, 1)
self.radioButton_variance_threshold = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_variance_threshold.setObjectName("radioButton_variance_threshold")
self.gridLayout_4.addWidget(self.radioButton_variance_threshold, 1, 0, 1, 1)
self.radioButton_correlation = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_correlation.setObjectName("radioButton_correlation")
self.gridLayout_4.addWidget(self.radioButton_correlation, 2, 0, 1, 1)
self.radioButton_distancecorrelation = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_distancecorrelation.setObjectName("radioButton_distancecorrelation")
self.gridLayout_4.addWidget(self.radioButton_distancecorrelation, 3, 0, 1, 1)
self.radioButton_fscore = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_fscore.setObjectName("radioButton_fscore")
self.gridLayout_4.addWidget(self.radioButton_fscore, 4, 0, 1, 1)
self.radioButton_mutualinfo_cls = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_mutualinfo_cls.setObjectName("radioButton_mutualinfo_cls")
self.gridLayout_4.addWidget(self.radioButton_mutualinfo_cls, 5, 0, 1, 1)
self.radioButton_mutualinfo_regression = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_mutualinfo_regression.setObjectName("radioButton_mutualinfo_regression")
self.gridLayout_4.addWidget(self.radioButton_mutualinfo_regression, 6, 0, 1, 1)
self.radioButton_relieff = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_relieff.setObjectName("radioButton_relieff")
self.gridLayout_4.addWidget(self.radioButton_relieff, 7, 0, 1, 1)
self.radioButton_anova = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_anova.setObjectName("radioButton_anova")
self.gridLayout_4.addWidget(self.radioButton_anova, 8, 0, 1, 1)
self.line = QtWidgets.QFrame(self.groupBox_3)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_4.addWidget(self.line, 9, 0, 1, 1)
self.label_wrapper = QtWidgets.QLabel(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_wrapper.sizePolicy().hasHeightForWidth())
self.label_wrapper.setSizePolicy(sizePolicy)
self.label_wrapper.setObjectName("label_wrapper")
self.gridLayout_4.addWidget(self.label_wrapper, 10, 0, 1, 1)
self.radioButton_rfe = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_rfe.setObjectName("radioButton_rfe")
self.gridLayout_4.addWidget(self.radioButton_rfe, 11, 0, 1, 1)
self.line_2 = QtWidgets.QFrame(self.groupBox_3)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout_4.addWidget(self.line_2, 12, 0, 1, 1)
self.label_embedded = QtWidgets.QLabel(self.groupBox_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_embedded.sizePolicy().hasHeightForWidth())
self.label_embedded.setSizePolicy(sizePolicy)
self.label_embedded.setObjectName("label_embedded")
self.gridLayout_4.addWidget(self.label_embedded, 13, 0, 1, 1)
self.radioButton_l1 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_l1.setObjectName("radioButton_l1")
self.gridLayout_4.addWidget(self.radioButton_l1, 14, 0, 1, 1)
self.radioButton_elasticnet = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_elasticnet.setObjectName("radioButton_elasticnet")
self.gridLayout_4.addWidget(self.radioButton_elasticnet, 15, 0, 1, 1)
self.line_4 = QtWidgets.QFrame(self.groupBox_3)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout_4.addWidget(self.line_4, 16, 0, 1, 1)
self.radioButton_featureselection_none = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_featureselection_none.setObjectName("radioButton_featureselection_none")
self.gridLayout_4.addWidget(self.radioButton_featureselection_none, 17, 0, 1, 1)
self.line_5 = QtWidgets.QFrame(self.groupBox_3)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.gridLayout_4.addWidget(self.line_5, 18, 0, 1, 1)
self.gridLayout_30.addWidget(self.groupBox_3, 0, 0, 2, 2)
self.groupBox_feature_selection_input = QtWidgets.QGroupBox(self.tabWidget_itemsPage3)
self.groupBox_feature_selection_input.setObjectName("groupBox_feature_selection_input")
self.gridLayout_9 = QtWidgets.QGridLayout(self.groupBox_feature_selection_input)
self.gridLayout_9.setObjectName("gridLayout_9")
self.stackedWidget_feature_selection = QtWidgets.QStackedWidget(self.groupBox_feature_selection_input)
self.stackedWidget_feature_selection.setObjectName("stackedWidget_feature_selection")
self.to_variance_threshold = QtWidgets.QWidget()
self.to_variance_threshold.setObjectName("to_variance_threshold")
self.gridLayout_15 = QtWidgets.QGridLayout(self.to_variance_threshold)
self.gridLayout_15.setObjectName("gridLayout_15")
self.label_variancesthreshold = QtWidgets.QLabel(self.to_variance_threshold)
self.label_variancesthreshold.setObjectName("label_variancesthreshold")
self.gridLayout_15.addWidget(self.label_variancesthreshold, 0, 0, 1, 1)
self.doubleSpinBox_variancethreshold_min = QtWidgets.QDoubleSpinBox(self.to_variance_threshold)
self.doubleSpinBox_variancethreshold_min.setMaximum(1.0)
self.doubleSpinBox_variancethreshold_min.setSingleStep(0.1)
self.doubleSpinBox_variancethreshold_min.setProperty("value", 0.1)
self.doubleSpinBox_variancethreshold_min.setObjectName("doubleSpinBox_variancethreshold_min")
self.gridLayout_15.addWidget(self.doubleSpinBox_variancethreshold_min, 1, 1, 1, 1)
self.doubleSpinBox_variancethreshold_max = QtWidgets.QDoubleSpinBox(self.to_variance_threshold)
self.doubleSpinBox_variancethreshold_max.setMaximum(1.0)
self.doubleSpinBox_variancethreshold_max.setSingleStep(0.1)
self.doubleSpinBox_variancethreshold_max.setProperty("value", 0.8)
self.doubleSpinBox_variancethreshold_max.setObjectName("doubleSpinBox_variancethreshold_max")
self.gridLayout_15.addWidget(self.doubleSpinBox_variancethreshold_max, 0, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.to_variance_threshold)
self.label_18.setObjectName("label_18")
self.gridLayout_15.addWidget(self.label_18, 1, 0, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_15.addItem(spacerItem5, 0, 2, 1, 1)
self.label_20 = QtWidgets.QLabel(self.to_variance_threshold)
self.label_20.setObjectName("label_20")
self.gridLayout_15.addWidget(self.label_20, 2, 0, 1, 1)
self.spinBox_variancethreshold_num = QtWidgets.QSpinBox(self.to_variance_threshold)
self.spinBox_variancethreshold_num.setMaximum(10000)
self.spinBox_variancethreshold_num.setSingleStep(5)
self.spinBox_variancethreshold_num.setProperty("value", 10)
self.spinBox_variancethreshold_num.setObjectName("spinBox_variancethreshold_num")
self.gridLayout_15.addWidget(self.spinBox_variancethreshold_num, 2, 1, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_variance_threshold)
self.correlation = QtWidgets.QWidget()
self.correlation.setObjectName("correlation")
self.gridLayout_16 = QtWidgets.QGridLayout(self.correlation)
self.gridLayout_16.setObjectName("gridLayout_16")
self.doubleSpinBox_correlation_minabscoef = QtWidgets.QDoubleSpinBox(self.correlation)
self.doubleSpinBox_correlation_minabscoef.setMaximum(1.0)
self.doubleSpinBox_correlation_minabscoef.setSingleStep(0.1)
self.doubleSpinBox_correlation_minabscoef.setProperty("value", 0.01)
self.doubleSpinBox_correlation_minabscoef.setObjectName("doubleSpinBox_correlation_minabscoef")
self.gridLayout_16.addWidget(self.doubleSpinBox_correlation_minabscoef, 1, 2, 1, 1)
self.label_7 = QtWidgets.QLabel(self.correlation)
self.label_7.setObjectName("label_7")
self.gridLayout_16.addWidget(self.label_7, 0, 0, 1, 1)
self.label_21 = QtWidgets.QLabel(self.correlation)
self.label_21.setObjectName("label_21")
self.gridLayout_16.addWidget(self.label_21, 2, 0, 1, 1)
self.doubleSpinBox_correlation_maxabscoef = QtWidgets.QDoubleSpinBox(self.correlation)
self.doubleSpinBox_correlation_maxabscoef.setMaximum(1.0)
self.doubleSpinBox_correlation_maxabscoef.setSingleStep(0.1)
self.doubleSpinBox_correlation_maxabscoef.setProperty("value", 1.0)
self.doubleSpinBox_correlation_maxabscoef.setObjectName("doubleSpinBox_correlation_maxabscoef")
self.gridLayout_16.addWidget(self.doubleSpinBox_correlation_maxabscoef, 0, 2, 1, 1)
self.spinBox_correlation_num = QtWidgets.QSpinBox(self.correlation)
self.spinBox_correlation_num.setMaximum(10000)
self.spinBox_correlation_num.setSingleStep(5)
self.spinBox_correlation_num.setProperty("value", 10)
self.spinBox_correlation_num.setObjectName("spinBox_correlation_num")
self.gridLayout_16.addWidget(self.spinBox_correlation_num, 2, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.correlation)
self.label_8.setObjectName("label_8")
self.gridLayout_16.addWidget(self.label_8, 1, 0, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.correlation)
self.to_distancecorrelation = QtWidgets.QWidget()
self.to_distancecorrelation.setObjectName("to_distancecorrelation")
self.gridLayout_2 = QtWidgets.QGridLayout(self.to_distancecorrelation)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_22 = QtWidgets.QLabel(self.to_distancecorrelation)
self.label_22.setObjectName("label_22")
self.gridLayout_2.addWidget(self.label_22, 2, 0, 1, 1)
self.spinBox_distancecorrelation_num = QtWidgets.QSpinBox(self.to_distancecorrelation)
self.spinBox_distancecorrelation_num.setMaximum(10000)
self.spinBox_distancecorrelation_num.setSingleStep(5)
self.spinBox_distancecorrelation_num.setProperty("value", 10)
self.spinBox_distancecorrelation_num.setObjectName("spinBox_distancecorrelation_num")
self.gridLayout_2.addWidget(self.spinBox_distancecorrelation_num, 2, 1, 1, 1)
self.doubleSpinBox_distancecorrelation_maxabscoef = QtWidgets.QDoubleSpinBox(self.to_distancecorrelation)
self.doubleSpinBox_distancecorrelation_maxabscoef.setMaximum(1.0)
self.doubleSpinBox_distancecorrelation_maxabscoef.setSingleStep(0.1)
self.doubleSpinBox_distancecorrelation_maxabscoef.setProperty("value", 1.0)
self.doubleSpinBox_distancecorrelation_maxabscoef.setObjectName("doubleSpinBox_distancecorrelation_maxabscoef")
self.gridLayout_2.addWidget(self.doubleSpinBox_distancecorrelation_maxabscoef, 0, 1, 1, 1)
self.label_23 = QtWidgets.QLabel(self.to_distancecorrelation)
self.label_23.setObjectName("label_23")
self.gridLayout_2.addWidget(self.label_23, 0, 0, 1, 1)
self.doubleSpinBox_distancecorrelation_minabscoef = QtWidgets.QDoubleSpinBox(self.to_distancecorrelation)
self.doubleSpinBox_distancecorrelation_minabscoef.setMaximum(1.0)
self.doubleSpinBox_distancecorrelation_minabscoef.setSingleStep(0.1)
self.doubleSpinBox_distancecorrelation_minabscoef.setProperty("value", 0.0)
self.doubleSpinBox_distancecorrelation_minabscoef.setObjectName("doubleSpinBox_distancecorrelation_minabscoef")
self.gridLayout_2.addWidget(self.doubleSpinBox_distancecorrelation_minabscoef, 1, 1, 1, 1)
self.label_24 = QtWidgets.QLabel(self.to_distancecorrelation)
self.label_24.setObjectName("label_24")
self.gridLayout_2.addWidget(self.label_24, 1, 0, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem6, 0, 2, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_distancecorrelation)
self.to_fscore = QtWidgets.QWidget()
self.to_fscore.setObjectName("to_fscore")
self.gridLayout_14 = QtWidgets.QGridLayout(self.to_fscore)
self.gridLayout_14.setObjectName("gridLayout_14")
self.doubleSpinBox_fscore_minnum = QtWidgets.QDoubleSpinBox(self.to_fscore)
self.doubleSpinBox_fscore_minnum.setMaximum(1.0)
self.doubleSpinBox_fscore_minnum.setSingleStep(0.1)
self.doubleSpinBox_fscore_minnum.setProperty("value", 0.01)
self.doubleSpinBox_fscore_minnum.setObjectName("doubleSpinBox_fscore_minnum")
self.gridLayout_14.addWidget(self.doubleSpinBox_fscore_minnum, 1, 1, 1, 1)
self.label_27 = QtWidgets.QLabel(self.to_fscore)
self.label_27.setObjectName("label_27")
self.gridLayout_14.addWidget(self.label_27, 1, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.to_fscore)
self.label_26.setObjectName("label_26")
self.gridLayout_14.addWidget(self.label_26, 0, 0, 1, 1)
self.label_25 = QtWidgets.QLabel(self.to_fscore)
self.label_25.setObjectName("label_25")
self.gridLayout_14.addWidget(self.label_25, 2, 0, 1, 1)
self.spinBox_fscore_num = QtWidgets.QSpinBox(self.to_fscore)
self.spinBox_fscore_num.setMaximum(10000)
self.spinBox_fscore_num.setSingleStep(5)
self.spinBox_fscore_num.setProperty("value", 10)
self.spinBox_fscore_num.setObjectName("spinBox_fscore_num")
self.gridLayout_14.addWidget(self.spinBox_fscore_num, 2, 1, 1, 1)
self.doubleSpinBox_fscore_maxnum = QtWidgets.QDoubleSpinBox(self.to_fscore)
self.doubleSpinBox_fscore_maxnum.setMaximum(1.0)
self.doubleSpinBox_fscore_maxnum.setSingleStep(0.1)
self.doubleSpinBox_fscore_maxnum.setProperty("value", 1.0)
self.doubleSpinBox_fscore_maxnum.setObjectName("doubleSpinBox_fscore_maxnum")
self.gridLayout_14.addWidget(self.doubleSpinBox_fscore_maxnum, 0, 1, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_14.addItem(spacerItem7, 0, 2, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_fscore)
self.to_mutualinfo_cls = QtWidgets.QWidget()
self.to_mutualinfo_cls.setObjectName("to_mutualinfo_cls")
self.gridLayout_22 = QtWidgets.QGridLayout(self.to_mutualinfo_cls)
self.gridLayout_22.setObjectName("gridLayout_22")
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_22.addItem(spacerItem8, 4, 2, 1, 1)
self.label_mutualinfo_cls = QtWidgets.QLabel(self.to_mutualinfo_cls)
self.label_mutualinfo_cls.setObjectName("label_mutualinfo_cls")
self.gridLayout_22.addWidget(self.label_mutualinfo_cls, 5, 0, 1, 1)
self.label_28 = QtWidgets.QLabel(self.to_mutualinfo_cls)
self.label_28.setObjectName("label_28")
self.gridLayout_22.addWidget(self.label_28, 4, 0, 1, 1)
self.spinBox_mutualinfocls_num = QtWidgets.QSpinBox(self.to_mutualinfo_cls)
self.spinBox_mutualinfocls_num.setMaximum(10000)
self.spinBox_mutualinfocls_num.setSingleStep(5)
self.spinBox_mutualinfocls_num.setProperty("value", 10)
self.spinBox_mutualinfocls_num.setObjectName("spinBox_mutualinfocls_num")
self.gridLayout_22.addWidget(self.spinBox_mutualinfocls_num, 4, 1, 1, 1)
self.doubleSpinBox_mutualinfocls_maxnum = QtWidgets.QDoubleSpinBox(self.to_mutualinfo_cls)
self.doubleSpinBox_mutualinfocls_maxnum.setMaximum(1.0)
self.doubleSpinBox_mutualinfocls_maxnum.setSingleStep(0.1)
self.doubleSpinBox_mutualinfocls_maxnum.setProperty("value", 1.0)
self.doubleSpinBox_mutualinfocls_maxnum.setObjectName("doubleSpinBox_mutualinfocls_maxnum")
self.gridLayout_22.addWidget(self.doubleSpinBox_mutualinfocls_maxnum, 2, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.to_mutualinfo_cls)
self.label_30.setObjectName("label_30")
self.gridLayout_22.addWidget(self.label_30, 2, 0, 1, 1)
self.spinBox_mutualinfocls_neighbors = QtWidgets.QSpinBox(self.to_mutualinfo_cls)
self.spinBox_mutualinfocls_neighbors.setProperty("value", 3)
self.spinBox_mutualinfocls_neighbors.setObjectName("spinBox_mutualinfocls_neighbors")
self.gridLayout_22.addWidget(self.spinBox_mutualinfocls_neighbors, 5, 1, 1, 1)
self.label_29 = QtWidgets.QLabel(self.to_mutualinfo_cls)
self.label_29.setObjectName("label_29")
self.gridLayout_22.addWidget(self.label_29, 3, 0, 1, 1)
self.doubleSpinBox_mutualinfocls_minnum = QtWidgets.QDoubleSpinBox(self.to_mutualinfo_cls)
self.doubleSpinBox_mutualinfocls_minnum.setMaximum(1.0)
self.doubleSpinBox_mutualinfocls_minnum.setSingleStep(0.1)
self.doubleSpinBox_mutualinfocls_minnum.setProperty("value", 0.0)
self.doubleSpinBox_mutualinfocls_minnum.setObjectName("doubleSpinBox_mutualinfocls_minnum")
self.gridLayout_22.addWidget(self.doubleSpinBox_mutualinfocls_minnum, 3, 1, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_mutualinfo_cls)
self.to_mutualinfo_regression = QtWidgets.QWidget()
self.to_mutualinfo_regression.setObjectName("to_mutualinfo_regression")
self.gridLayout_12 = QtWidgets.QGridLayout(self.to_mutualinfo_regression)
self.gridLayout_12.setObjectName("gridLayout_12")
self.doubleSpinBox_mutualinforeg_minnum = QtWidgets.QDoubleSpinBox(self.to_mutualinfo_regression)
self.doubleSpinBox_mutualinforeg_minnum.setMaximum(1.0)
self.doubleSpinBox_mutualinforeg_minnum.setSingleStep(0.1)
self.doubleSpinBox_mutualinforeg_minnum.setProperty("value", 0.0)
self.doubleSpinBox_mutualinforeg_minnum.setObjectName("doubleSpinBox_mutualinforeg_minnum")
self.gridLayout_12.addWidget(self.doubleSpinBox_mutualinforeg_minnum, 1, 1, 1, 1)
self.label_32 = QtWidgets.QLabel(self.to_mutualinfo_regression)
self.label_32.setObjectName("label_32")
self.gridLayout_12.addWidget(self.label_32, 1, 0, 1, 1)
self.label_mutualinforeg = QtWidgets.QLabel(self.to_mutualinfo_regression)
self.label_mutualinforeg.setObjectName("label_mutualinforeg")
self.gridLayout_12.addWidget(self.label_mutualinforeg, 3, 0, 1, 1)
self.label_31 = QtWidgets.QLabel(self.to_mutualinfo_regression)
self.label_31.setObjectName("label_31")
self.gridLayout_12.addWidget(self.label_31, 0, 0, 1, 1)
self.label_33 = QtWidgets.QLabel(self.to_mutualinfo_regression)
self.label_33.setObjectName("label_33")
self.gridLayout_12.addWidget(self.label_33, 2, 0, 1, 1)
self.spinBox_mutualinforeg_num = QtWidgets.QSpinBox(self.to_mutualinfo_regression)
self.spinBox_mutualinforeg_num.setMaximum(10000)
self.spinBox_mutualinforeg_num.setSingleStep(5)
self.spinBox_mutualinforeg_num.setProperty("value", 10)
self.spinBox_mutualinforeg_num.setObjectName("spinBox_mutualinforeg_num")
self.gridLayout_12.addWidget(self.spinBox_mutualinforeg_num, 2, 1, 1, 1)
self.doubleSpinBox_mutualinforeg_maxnum = QtWidgets.QDoubleSpinBox(self.to_mutualinfo_regression)
self.doubleSpinBox_mutualinforeg_maxnum.setMaximum(1.0)
self.doubleSpinBox_mutualinforeg_maxnum.setSingleStep(0.1)
self.doubleSpinBox_mutualinforeg_maxnum.setProperty("value", 1.0)
self.doubleSpinBox_mutualinforeg_maxnum.setObjectName("doubleSpinBox_mutualinforeg_maxnum")
self.gridLayout_12.addWidget(self.doubleSpinBox_mutualinforeg_maxnum, 0, 1, 1, 1)
self.spinBox_mutualinforeg_neighbors = QtWidgets.QSpinBox(self.to_mutualinfo_regression)
self.spinBox_mutualinforeg_neighbors.setProperty("value", 3)
self.spinBox_mutualinforeg_neighbors.setObjectName("spinBox_mutualinforeg_neighbors")
self.gridLayout_12.addWidget(self.spinBox_mutualinforeg_neighbors, 3, 1, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_12.addItem(spacerItem9, 0, 2, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_mutualinfo_regression)
self.to_reliff = QtWidgets.QWidget()
self.to_reliff.setObjectName("to_reliff")
self.gridLayout_18 = QtWidgets.QGridLayout(self.to_reliff)
self.gridLayout_18.setObjectName("gridLayout_18")
self.label_relieffmax = QtWidgets.QLabel(self.to_reliff)
self.label_relieffmax.setObjectName("label_relieffmax")
self.gridLayout_18.addWidget(self.label_relieffmax, 0, 0, 1, 1)
self.doubleSpinBox_relieff_max = QtWidgets.QDoubleSpinBox(self.to_reliff)
self.doubleSpinBox_relieff_max.setMaximum(1.0)
self.doubleSpinBox_relieff_max.setSingleStep(0.1)
self.doubleSpinBox_relieff_max.setProperty("value", 1.0)
self.doubleSpinBox_relieff_max.setObjectName("doubleSpinBox_relieff_max")
self.gridLayout_18.addWidget(self.doubleSpinBox_relieff_max, 0, 1, 1, 1)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_18.addItem(spacerItem10, 0, 2, 1, 1)
self.label_reliffmin = QtWidgets.QLabel(self.to_reliff)
self.label_reliffmin.setObjectName("label_reliffmin")
self.gridLayout_18.addWidget(self.label_reliffmin, 1, 0, 1, 1)
self.doubleSpinBox_relieff_min = QtWidgets.QDoubleSpinBox(self.to_reliff)
self.doubleSpinBox_relieff_min.setMaximum(1.0)
self.doubleSpinBox_relieff_min.setSingleStep(0.1)
self.doubleSpinBox_relieff_min.setObjectName("doubleSpinBox_relieff_min")
self.gridLayout_18.addWidget(self.doubleSpinBox_relieff_min, 1, 1, 1, 1)
self.label_34 = QtWidgets.QLabel(self.to_reliff)
self.label_34.setObjectName("label_34")
self.gridLayout_18.addWidget(self.label_34, 2, 0, 1, 1)
self.spinBox_relief_num = QtWidgets.QSpinBox(self.to_reliff)
self.spinBox_relief_num.setMaximum(10000)
self.spinBox_relief_num.setSingleStep(5)
self.spinBox_relief_num.setProperty("value", 10)
self.spinBox_relief_num.setObjectName("spinBox_relief_num")
self.gridLayout_18.addWidget(self.spinBox_relief_num, 2, 1, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_reliff)
self.to_anova = QtWidgets.QWidget()
self.to_anova.setObjectName("to_anova")
self.gridLayout_23 = QtWidgets.QGridLayout(self.to_anova)
self.gridLayout_23.setObjectName("gridLayout_23")
self.label_10 = QtWidgets.QLabel(self.to_anova)
self.label_10.setObjectName("label_10")
self.gridLayout_23.addWidget(self.label_10, 0, 0, 1, 1)
self.doubleSpinBox_anova_alpha_max = QtWidgets.QDoubleSpinBox(self.to_anova)
self.doubleSpinBox_anova_alpha_max.setMaximum(1.0)
self.doubleSpinBox_anova_alpha_max.setSingleStep(0.01)
self.doubleSpinBox_anova_alpha_max.setProperty("value", 1.0)
self.doubleSpinBox_anova_alpha_max.setObjectName("doubleSpinBox_anova_alpha_max")
self.gridLayout_23.addWidget(self.doubleSpinBox_anova_alpha_max, 0, 1, 1, 1)
self.label_17 = QtWidgets.QLabel(self.to_anova)
self.label_17.setObjectName("label_17")
self.gridLayout_23.addWidget(self.label_17, 1, 0, 1, 1)
self.doubleSpinBox_anova_alpha_min = QtWidgets.QDoubleSpinBox(self.to_anova)
self.doubleSpinBox_anova_alpha_min.setMaximum(1.0)
self.doubleSpinBox_anova_alpha_min.setSingleStep(0.01)
self.doubleSpinBox_anova_alpha_min.setProperty("value", 0.0)
self.doubleSpinBox_anova_alpha_min.setObjectName("doubleSpinBox_anova_alpha_min")
self.gridLayout_23.addWidget(self.doubleSpinBox_anova_alpha_min, 1, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.to_anova)
self.label_11.setObjectName("label_11")
self.gridLayout_23.addWidget(self.label_11, 2, 0, 1, 1)
self.comboBox_anova_multicorrect = QtWidgets.QComboBox(self.to_anova)
self.comboBox_anova_multicorrect.setObjectName("comboBox_anova_multicorrect")
self.comboBox_anova_multicorrect.addItem("")
self.comboBox_anova_multicorrect.addItem("")
self.comboBox_anova_multicorrect.addItem("")
self.gridLayout_23.addWidget(self.comboBox_anova_multicorrect, 2, 1, 1, 1)
self.label_35 = QtWidgets.QLabel(self.to_anova)
self.label_35.setObjectName("label_35")
self.gridLayout_23.addWidget(self.label_35, 3, 0, 1, 1)
self.spinBox_anova_num = QtWidgets.QSpinBox(self.to_anova)
self.spinBox_anova_num.setMaximum(10000)
self.spinBox_anova_num.setSingleStep(5)
self.spinBox_anova_num.setProperty("value", 10)
self.spinBox_anova_num.setObjectName("spinBox_anova_num")
self.gridLayout_23.addWidget(self.spinBox_anova_num, 3, 1, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_anova)
self.to_rfe = QtWidgets.QWidget()
self.to_rfe.setObjectName("to_rfe")
self.gridLayout_20 = QtWidgets.QGridLayout(self.to_rfe)
self.gridLayout_20.setObjectName("gridLayout_20")
self.label_15 = QtWidgets.QLabel(self.to_rfe)
self.label_15.setObjectName("label_15")
self.gridLayout_20.addWidget(self.label_15, 1, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.to_rfe)
self.label_14.setObjectName("label_14")
self.gridLayout_20.addWidget(self.label_14, 0, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.to_rfe)
self.label_16.setObjectName("label_16")
self.gridLayout_20.addWidget(self.label_16, 2, 0, 1, 1)
self.comboBox_rfe_estimator = QtWidgets.QComboBox(self.to_rfe)
self.comboBox_rfe_estimator.setObjectName("comboBox_rfe_estimator")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.comboBox_rfe_estimator.addItem("")
self.gridLayout_20.addWidget(self.comboBox_rfe_estimator, 2, 2, 1, 1)
self.label_19 = QtWidgets.QLabel(self.to_rfe)
self.label_19.setObjectName("label_19")
self.gridLayout_20.addWidget(self.label_19, 3, 0, 1, 1)
self.spinBox_rfe_nfold = QtWidgets.QSpinBox(self.to_rfe)
self.spinBox_rfe_nfold.setMinimum(2)
self.spinBox_rfe_nfold.setMaximum(1000)
self.spinBox_rfe_nfold.setProperty("value", 5)
self.spinBox_rfe_nfold.setObjectName("spinBox_rfe_nfold")
self.gridLayout_20.addWidget(self.spinBox_rfe_nfold, 1, 2, 1, 1)
self.doubleSpinBox_rfe_step = QtWidgets.QDoubleSpinBox(self.to_rfe)
self.doubleSpinBox_rfe_step.setMaximum(10000.0)
self.doubleSpinBox_rfe_step.setSingleStep(0.1)
self.doubleSpinBox_rfe_step.setProperty("value", 0.1)
self.doubleSpinBox_rfe_step.setObjectName("doubleSpinBox_rfe_step")
self.gridLayout_20.addWidget(self.doubleSpinBox_rfe_step, 0, 2, 1, 1)
self.spinBox_rfe_njobs = QtWidgets.QSpinBox(self.to_rfe)
self.spinBox_rfe_njobs.setMinimum(-1)
self.spinBox_rfe_njobs.setProperty("value", -1)
self.spinBox_rfe_njobs.setObjectName("spinBox_rfe_njobs")
self.gridLayout_20.addWidget(self.spinBox_rfe_njobs, 3, 2, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_rfe)
self.to_l1 = QtWidgets.QWidget()
self.to_l1.setObjectName("to_l1")
self.gridLayout_24 = QtWidgets.QGridLayout(self.to_l1)
self.gridLayout_24.setObjectName("gridLayout_24")
self.label_39 = QtWidgets.QLabel(self.to_l1)
self.label_39.setObjectName("label_39")
self.gridLayout_24.addWidget(self.label_39, 0, 0, 1, 1)
self.doubleSpinBox_l1_alpha_min = QtWidgets.QDoubleSpinBox(self.to_l1)
self.doubleSpinBox_l1_alpha_min.setMaximum(100000.0)
self.doubleSpinBox_l1_alpha_min.setSingleStep(100.0)
self.doubleSpinBox_l1_alpha_min.setProperty("value", 0.0)
self.doubleSpinBox_l1_alpha_min.setObjectName("doubleSpinBox_l1_alpha_min")
self.gridLayout_24.addWidget(self.doubleSpinBox_l1_alpha_min, 1, 1, 1, 1)
self.spinBox_l1_num = QtWidgets.QSpinBox(self.to_l1)
self.spinBox_l1_num.setMaximum(10000)
self.spinBox_l1_num.setSingleStep(5)
self.spinBox_l1_num.setProperty("value", 10)
self.spinBox_l1_num.setObjectName("spinBox_l1_num")
self.gridLayout_24.addWidget(self.spinBox_l1_num, 2, 1, 1, 1)
self.doubleSpinBox_l1_alpha_max = QtWidgets.QDoubleSpinBox(self.to_l1)
self.doubleSpinBox_l1_alpha_max.setMaximum(100000.0)
self.doubleSpinBox_l1_alpha_max.setSingleStep(100.0)
self.doubleSpinBox_l1_alpha_max.setProperty("value", 100.0)
self.doubleSpinBox_l1_alpha_max.setObjectName("doubleSpinBox_l1_alpha_max")
self.gridLayout_24.addWidget(self.doubleSpinBox_l1_alpha_max, 0, 1, 1, 1)
self.label_40 = QtWidgets.QLabel(self.to_l1)
self.label_40.setObjectName("label_40")
self.gridLayout_24.addWidget(self.label_40, 1, 0, 1, 1)
self.label_41 = QtWidgets.QLabel(self.to_l1)
self.label_41.setObjectName("label_41")
self.gridLayout_24.addWidget(self.label_41, 2, 0, 1, 1)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_24.addItem(spacerItem11, 0, 2, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_l1)
self.to_elasticnet = QtWidgets.QWidget()
self.to_elasticnet.setObjectName("to_elasticnet")
self.gridLayout_25 = QtWidgets.QGridLayout(self.to_elasticnet)
self.gridLayout_25.setObjectName("gridLayout_25")
self.label_43 = QtWidgets.QLabel(self.to_elasticnet)
self.label_43.setObjectName("label_43")
self.gridLayout_25.addWidget(self.label_43, 0, 0, 1, 1)
self.doubleSpinBox_elasticnet_alpha_max = QtWidgets.QDoubleSpinBox(self.to_elasticnet)
self.doubleSpinBox_elasticnet_alpha_max.setMaximum(100000.0)
self.doubleSpinBox_elasticnet_alpha_max.setSingleStep(100.0)
self.doubleSpinBox_elasticnet_alpha_max.setProperty("value", 100.0)
self.doubleSpinBox_elasticnet_alpha_max.setObjectName("doubleSpinBox_elasticnet_alpha_max")
self.gridLayout_25.addWidget(self.doubleSpinBox_elasticnet_alpha_max, 0, 1, 1, 1)
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_25.addItem(spacerItem12, 0, 2, 1, 1)
self.label_44 = QtWidgets.QLabel(self.to_elasticnet)
self.label_44.setObjectName("label_44")
self.gridLayout_25.addWidget(self.label_44, 1, 0, 1, 1)
self.doubleSpinBox_elasticnet_alpha_min = QtWidgets.QDoubleSpinBox(self.to_elasticnet)
self.doubleSpinBox_elasticnet_alpha_min.setMaximum(100000.0)
self.doubleSpinBox_elasticnet_alpha_min.setSingleStep(100.0)
self.doubleSpinBox_elasticnet_alpha_min.setProperty("value", 0.0)
self.doubleSpinBox_elasticnet_alpha_min.setObjectName("doubleSpinBox_elasticnet_alpha_min")
self.gridLayout_25.addWidget(self.doubleSpinBox_elasticnet_alpha_min, 1, 1, 1, 1)
self.label_42 = QtWidgets.QLabel(self.to_elasticnet)
self.label_42.setObjectName("label_42")
self.gridLayout_25.addWidget(self.label_42, 2, 0, 1, 1)
self.spinBox_elasticnet_num = QtWidgets.QSpinBox(self.to_elasticnet)
self.spinBox_elasticnet_num.setMaximum(10000)
self.spinBox_elasticnet_num.setSingleStep(5)
self.spinBox_elasticnet_num.setProperty("value", 10)
self.spinBox_elasticnet_num.setObjectName("spinBox_elasticnet_num")
self.gridLayout_25.addWidget(self.spinBox_elasticnet_num, 2, 1, 1, 1)
self.label_45 = QtWidgets.QLabel(self.to_elasticnet)
self.label_45.setObjectName("label_45")
self.gridLayout_25.addWidget(self.label_45, 3, 0, 1, 1)
self.doubleSpinBox_elasticnet_l1ratio_max = QtWidgets.QDoubleSpinBox(self.to_elasticnet)
self.doubleSpinBox_elasticnet_l1ratio_max.setMaximum(1.0)
self.doubleSpinBox_elasticnet_l1ratio_max.setSingleStep(0.1)
self.doubleSpinBox_elasticnet_l1ratio_max.setProperty("value", 1.0)
self.doubleSpinBox_elasticnet_l1ratio_max.setObjectName("doubleSpinBox_elasticnet_l1ratio_max")
self.gridLayout_25.addWidget(self.doubleSpinBox_elasticnet_l1ratio_max, 3, 1, 1, 1)
self.label_46 = QtWidgets.QLabel(self.to_elasticnet)
self.label_46.setObjectName("label_46")
self.gridLayout_25.addWidget(self.label_46, 4, 0, 1, 1)
self.doubleSpinBox_elasticnet_l1ratio_min = QtWidgets.QDoubleSpinBox(self.to_elasticnet)
self.doubleSpinBox_elasticnet_l1ratio_min.setMaximum(1.0)
self.doubleSpinBox_elasticnet_l1ratio_min.setSingleStep(0.1)
self.doubleSpinBox_elasticnet_l1ratio_min.setProperty("value", 0.0)
self.doubleSpinBox_elasticnet_l1ratio_min.setObjectName("doubleSpinBox_elasticnet_l1ratio_min")
self.gridLayout_25.addWidget(self.doubleSpinBox_elasticnet_l1ratio_min, 4, 1, 1, 1)
self.label_47 = QtWidgets.QLabel(self.to_elasticnet)
self.label_47.setObjectName("label_47")
self.gridLayout_25.addWidget(self.label_47, 5, 0, 1, 1)
self.spinBox_l1ratio_num = QtWidgets.QSpinBox(self.to_elasticnet)
self.spinBox_l1ratio_num.setMaximum(10000)
self.spinBox_l1ratio_num.setSingleStep(5)
self.spinBox_l1ratio_num.setProperty("value", 10)
self.spinBox_l1ratio_num.setObjectName("spinBox_l1ratio_num")
self.gridLayout_25.addWidget(self.spinBox_l1ratio_num, 5, 1, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_elasticnet)
self.to_none_featureselection = QtWidgets.QWidget()
self.to_none_featureselection.setObjectName("to_none_featureselection")
self.gridLayout_27 = QtWidgets.QGridLayout(self.to_none_featureselection)
self.gridLayout_27.setObjectName("gridLayout_27")
self.label_none_featureselection = QtWidgets.QLabel(self.to_none_featureselection)
self.label_none_featureselection.setText("")
self.label_none_featureselection.setObjectName("label_none_featureselection")
self.gridLayout_27.addWidget(self.label_none_featureselection, 0, 0, 1, 1)
self.stackedWidget_feature_selection.addWidget(self.to_none_featureselection)
self.gridLayout_9.addWidget(self.stackedWidget_feature_selection, 0, 1, 1, 1)
self.gridLayout_30.addWidget(self.groupBox_feature_selection_input, 0, 2, 2, 1)
spacerItem13 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_30.addItem(spacerItem13, 1, 1, 1, 1)
self.tabWidget_items.addTab(self.tabWidget_itemsPage3, "")
self.tabWidget_itemsPage4 = QtWidgets.QWidget()
self.tabWidget_itemsPage4.setObjectName("tabWidget_itemsPage4")
self.gridLayout_31 = QtWidgets.QGridLayout(self.tabWidget_itemsPage4)
self.gridLayout_31.setObjectName("gridLayout_31")
self.groupBox_5 = QtWidgets.QGroupBox(self.tabWidget_itemsPage4)
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayout_7 = QtWidgets.QGridLayout(self.groupBox_5)
self.gridLayout_7.setObjectName("gridLayout_7")
self.radioButton_extractionunder = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_extractionunder.setObjectName("radioButton_extractionunder")
self.gridLayout_7.addWidget(self.radioButton_extractionunder, 8, 0, 1, 1)
self.line_3 = QtWidgets.QFrame(self.groupBox_5)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout_7.addWidget(self.line_3, 5, 0, 1, 1)
self.radioButton_smotencover = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_smotencover.setObjectName("radioButton_smotencover")
self.gridLayout_7.addWidget(self.radioButton_smotencover, 3, 0, 1, 1)
self.radioButton_nearmissunder = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_nearmissunder.setObjectName("radioButton_nearmissunder")
self.gridLayout_7.addWidget(self.radioButton_nearmissunder, 10, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.groupBox_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_12.sizePolicy().hasHeightForWidth())
self.label_12.setSizePolicy(sizePolicy)
self.label_12.setObjectName("label_12")
self.gridLayout_7.addWidget(self.label_12, 0, 0, 1, 1)
self.radioButton_randunder = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_randunder.setObjectName("radioButton_randunder")
self.gridLayout_7.addWidget(self.radioButton_randunder, 7, 0, 1, 1)
self.radioButton_bsmoteover = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_bsmoteover.setObjectName("radioButton_bsmoteover")
self.gridLayout_7.addWidget(self.radioButton_bsmoteover, 4, 0, 1, 1)
self.radioButton_randover = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_randover.setCheckable(True)
self.radioButton_randover.setChecked(False)
self.radioButton_randover.setObjectName("radioButton_randover")
self.gridLayout_7.addWidget(self.radioButton_randover, 1, 0, 1, 1)
self.label_13 = QtWidgets.QLabel(self.groupBox_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_13.sizePolicy().hasHeightForWidth())
self.label_13.setSizePolicy(sizePolicy)
self.label_13.setObjectName("label_13")
self.gridLayout_7.addWidget(self.label_13, 6, 0, 1, 1)
self.radioButton_smoteover = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_smoteover.setObjectName("radioButton_smoteover")
self.gridLayout_7.addWidget(self.radioButton_smoteover, 2, 0, 1, 1)
self.radioButton_cludterunder = QtWidgets.QRadioButton(self.groupBox_5)
self.radioButton_cludterunder.setObjectName("radioButton_cludterunder")
self.gridLayout_7.addWidget(self.radioButton_cludterunder, 9, 0, 1, 1)
self.gridLayout_31.addWidget(self.groupBox_5, 0, 0, 1, 1)
spacerItem14 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_31.addItem(spacerItem14, 0, 1, 1, 1)
self.groupBox_4 = QtWidgets.QGroupBox(self.tabWidget_itemsPage4)
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayout_31.addWidget(self.groupBox_4, 0, 2, 1, 1)
self.tabWidget_items.addTab(self.tabWidget_itemsPage4, "")
self.gridLayout_32.addWidget(self.tabWidget_items, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 892, 26))
self.menubar.setObjectName("menubar")
self.menuConfiguration_file_f = QtWidgets.QMenu(self.menubar)
self.menuConfiguration_file_f.setObjectName("menuConfiguration_file_f")
self.menuHelp_G = QtWidgets.QMenu(self.menubar)
self.menuHelp_G.setObjectName("menuHelp_G")
self.menuSkin = QtWidgets.QMenu(self.menubar)
self.menuSkin.setObjectName("menuSkin")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionLoad_configuration = QtWidgets.QAction(MainWindow)
self.actionLoad_configuration.setObjectName("actionLoad_configuration")
self.actionSave_configuration = QtWidgets.QAction(MainWindow)
self.actionSave_configuration.setObjectName("actionSave_configuration")
self.actionDark = QtWidgets.QAction(MainWindow)
self.actionDark.setObjectName("actionDark")
self.actionBlack = QtWidgets.QAction(MainWindow)
self.actionBlack.setObjectName("actionBlack")
self.actionDarkOrange = QtWidgets.QAction(MainWindow)
self.actionDarkOrange.setObjectName("actionDarkOrange")
self.actionGray = QtWidgets.QAction(MainWindow)
self.actionGray.setObjectName("actionGray")
self.actionBlue = QtWidgets.QAction(MainWindow)
self.actionBlue.setObjectName("actionBlue")
self.actionNavy = QtWidgets.QAction(MainWindow)
self.actionNavy.setObjectName("actionNavy")
self.actionClassic = QtWidgets.QAction(MainWindow)
self.actionClassic.setObjectName("actionClassic")
self.menuConfiguration_file_f.addAction(self.actionLoad_configuration)
self.menuConfiguration_file_f.addAction(self.actionSave_configuration)
self.menuSkin.addAction(self.actionDark)
self.menuSkin.addAction(self.actionBlack)
self.menuSkin.addAction(self.actionDarkOrange)
self.menuSkin.addAction(self.actionGray)
self.menuSkin.addAction(self.actionBlue)
self.menuSkin.addAction(self.actionNavy)
self.menuSkin.addAction(self.actionClassic)
self.menubar.addAction(self.menuConfiguration_file_f.menuAction())
self.menubar.addAction(self.menuHelp_G.menuAction())
self.menubar.addAction(self.menuSkin.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget_items.setCurrentIndex(1)
self.stackedWidget_preprocessing_methods.setCurrentIndex(3)
self.stackedWidget_dimreduction.setCurrentIndex(4)
self.stackedWidget_feature_selection.setCurrentIndex(11)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox_preprocessing_setting.setTitle(_translate("MainWindow", "Setting"))
self.label_scaling_max.setText(_translate("MainWindow", "Max"))
self.lineEdit_scaling_min.setText(_translate("MainWindow", "0"))
self.label_scaling_min.setText(_translate("MainWindow", "Min"))
self.lineEdit_scaling_max.setText(_translate("MainWindow", "1"))
self.groupBox_methods.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_none_methods.setText(_translate("MainWindow", "None"))
self.radioButton_demean.setText(_translate("MainWindow", "De-mean"))
self.radioButton_zscore.setText(_translate("MainWindow", "Z-score normalization"))
self.radioButton_scaling.setText(_translate("MainWindow", "Scaling"))
self.groupBox_level.setTitle(_translate("MainWindow", "Level"))
self.radioButton_grouplevel.setText(_translate("MainWindow", "Group"))
self.radioButton_subjectlevel.setText(_translate("MainWindow", "Subject"))
self.tabWidget_items.setTabText(self.tabWidget_items.indexOf(self.tabWidget_itemsPage1), _translate("MainWindow", " Preprocessing "))
self.groupBox.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_lda.setText(_translate("MainWindow", "Latent Dirichlet Allocation"))
self.radioButton_ica.setText(_translate("MainWindow", "Independent component analysis"))
self.radioButton_nmf.setText(_translate("MainWindow", "Non-negative matrix factorization"))
self.radioButton_pca.setText(_translate("MainWindow", "Principal component analysis"))
self.radioButton_none.setText(_translate("MainWindow", "None"))
self.groupBox_2.setTitle(_translate("MainWindow", "Setting"))
self.label_n_components.setText(_translate("MainWindow", "Max_components"))
self.label_2.setText(_translate("MainWindow", "Min_components"))
self.label_36.setText(_translate("MainWindow", "Number"))
self.label_n_ic.setText(_translate("MainWindow", "Max_ics"))
self.label_37.setText(_translate("MainWindow", "Number"))
self.label_9.setText(_translate("MainWindow", "Min_ics"))
self.label_ida.setText(_translate("MainWindow", "N/A"))
self.label_label_nmf_mincompnents.setText(_translate("MainWindow", "Min_components"))
self.label_nmf_maxcompnents.setText(_translate("MainWindow", "Max_components"))
self.label_38.setText(_translate("MainWindow", "Number"))
self.label_nmf_init.setText(_translate("MainWindow", "Init"))
self.comboBox_2.setItemText(0, _translate("MainWindow", "random"))
self.tabWidget_items.setTabText(self.tabWidget_items.indexOf(self.tabWidget_itemsPage2), _translate("MainWindow", " Dimension reduction "))
self.groupBox_3.setTitle(_translate("MainWindow", "Methods"))
self.label_filter.setText(_translate("MainWindow", "Filter"))
self.radioButton_variance_threshold.setText(_translate("MainWindow", "Variance threshold"))
self.radioButton_correlation.setText(_translate("MainWindow", "Correlation"))
self.radioButton_distancecorrelation.setText(_translate("MainWindow", "Distance correlation"))
self.radioButton_fscore.setText(_translate("MainWindow", "F-Score (classification)"))
self.radioButton_mutualinfo_cls.setText(_translate("MainWindow", "Mutual information (classification)"))
self.radioButton_mutualinfo_regression.setText(_translate("MainWindow", "Mutual information (regression)"))
self.radioButton_relieff.setText(_translate("MainWindow", "ReliefF"))
self.radioButton_anova.setText(_translate("MainWindow", "ANOVA/Ttest2 (classification)"))
self.label_wrapper.setText(_translate("MainWindow", "Wrapper"))
self.radioButton_rfe.setText(_translate("MainWindow", "RFE"))
self.label_embedded.setText(_translate("MainWindow", "Embedded"))
self.radioButton_l1.setText(_translate("MainWindow", "L1 regularization (Lasso)"))
self.radioButton_elasticnet.setText(_translate("MainWindow", "L1 + L2 regularization (Elastic net regression)"))
self.radioButton_featureselection_none.setText(_translate("MainWindow", "None"))
self.groupBox_feature_selection_input.setTitle(_translate("MainWindow", "Setting"))
self.label_variancesthreshold.setText(_translate("MainWindow", "Max_threshold"))
self.label_18.setText(_translate("MainWindow", "Min_threshold"))
self.label_20.setText(_translate("MainWindow", "Number"))
self.label_7.setText(_translate("MainWindow", "Max_abs_coef"))
self.label_21.setText(_translate("MainWindow", "Number"))
self.label_8.setText(_translate("MainWindow", "Min_abs_coef"))
self.label_22.setText(_translate("MainWindow", "Number"))
self.label_23.setText(_translate("MainWindow", "Max_abs_coef"))
self.label_24.setText(_translate("MainWindow", "Min_abs_coef"))
self.label_27.setText(_translate("MainWindow", "Min_num"))
self.label_26.setText(_translate("MainWindow", "Max_num"))
self.label_25.setText(_translate("MainWindow", "Number"))
self.label_mutualinfo_cls.setText(_translate("MainWindow", "N_neighbors"))
self.label_28.setText(_translate("MainWindow", "Number"))
self.label_30.setText(_translate("MainWindow", "Max_num"))
self.label_29.setText(_translate("MainWindow", "Min_num"))
self.label_32.setText(_translate("MainWindow", "Min_num"))
self.label_mutualinforeg.setText(_translate("MainWindow", "N_neighbors"))
self.label_31.setText(_translate("MainWindow", "Max_num"))
self.label_33.setText(_translate("MainWindow", "Number"))
self.label_relieffmax.setText(_translate("MainWindow", "Max_features"))
self.label_reliffmin.setText(_translate("MainWindow", "Min_feature"))
self.label_34.setText(_translate("MainWindow", "Number"))
self.label_10.setText(_translate("MainWindow", "Max_alpha"))
self.label_17.setText(_translate("MainWindow", "Min_alpha"))
self.label_11.setText(_translate("MainWindow", "Multi_correct"))
self.comboBox_anova_multicorrect.setItemText(0, _translate("MainWindow", "None"))
self.comboBox_anova_multicorrect.setItemText(1, _translate("MainWindow", "FDR"))
self.comboBox_anova_multicorrect.setItemText(2, _translate("MainWindow", "FWE"))
self.label_35.setText(_translate("MainWindow", "Number"))
self.label_15.setText(_translate("MainWindow", "N-fold"))
self.label_14.setText(_translate("MainWindow", "Step"))
self.label_16.setText(_translate("MainWindow", "Estimator"))
self.comboBox_rfe_estimator.setItemText(0, _translate("MainWindow", "SVM"))
self.comboBox_rfe_estimator.setItemText(1, _translate("MainWindow", "Linear regression"))
self.comboBox_rfe_estimator.setItemText(2, _translate("MainWindow", "Logistic regression"))
self.comboBox_rfe_estimator.setItemText(3, _translate("MainWindow", "Ridge regression"))
self.comboBox_rfe_estimator.setItemText(4, _translate("MainWindow", "Lasso regression"))
self.comboBox_rfe_estimator.setItemText(5, _translate("MainWindow", "Bayesian ridge regression"))
self.comboBox_rfe_estimator.setItemText(6, _translate("MainWindow", "Gaussian processes"))
self.label_19.setText(_translate("MainWindow", "N_jobs"))
self.label_39.setText(_translate("MainWindow", "Max_alpha"))
self.label_40.setText(_translate("MainWindow", "Min_alpha"))
self.label_41.setText(_translate("MainWindow", "Number"))
self.label_43.setText(_translate("MainWindow", "Max_alpha"))
self.label_44.setText(_translate("MainWindow", "Min_alpha"))
self.label_42.setText(_translate("MainWindow", "Number_alpha"))
self.label_45.setText(_translate("MainWindow", "Max_l1_ratio"))
self.label_46.setText(_translate("MainWindow", "Min_l1_ratio"))
self.label_47.setText(_translate("MainWindow", "Number_l1_ratio"))
self.tabWidget_items.setTabText(self.tabWidget_items.indexOf(self.tabWidget_itemsPage3), _translate("MainWindow", " Feature selection "))
self.groupBox_5.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_extractionunder.setText(_translate("MainWindow", "Extraction of majority-minority Tomek links"))
self.radioButton_smotencover.setText(_translate("MainWindow", "SMOTENC - SMOTE for Nominal Continuous "))
self.radioButton_nearmissunder.setText(_translate("MainWindow", "NearMiss-(1 and 2 and 3) "))
self.label_12.setText(_translate("MainWindow", "Over-sampling the minority class"))
self.radioButton_randunder.setText(_translate("MainWindow", "Random majority under-sampling with replacement"))
self.radioButton_bsmoteover.setText(_translate("MainWindow", "bSMOTE(1 and 2) - Borderline SMOTE of types 1 and 2"))
self.radioButton_randover.setText(_translate("MainWindow", "Random minority over-sampling with replacement"))
self.label_13.setText(_translate("MainWindow", "Under-sampling the majority class(es)"))
self.radioButton_smoteover.setText(_translate("MainWindow", "SMOTE - Synthetic Minority Over-sampling Technique"))
self.radioButton_cludterunder.setText(_translate("MainWindow", "Under-sampling with Cluster Centroids"))
self.groupBox_4.setTitle(_translate("MainWindow", "Setting"))
self.tabWidget_items.setTabText(self.tabWidget_items.indexOf(self.tabWidget_itemsPage4), _translate("MainWindow", " Unbalance treatment "))
self.menuConfiguration_file_f.setTitle(_translate("MainWindow", "Configuration file (&F)"))
self.menuHelp_G.setTitle(_translate("MainWindow", "Help (&H)"))
self.menuSkin.setTitle(_translate("MainWindow", "Skin"))
self.actionLoad_configuration.setText(_translate("MainWindow", "Load configuration"))
self.actionSave_configuration.setText(_translate("MainWindow", "Save configuration"))
self.actionDark.setText(_translate("MainWindow", "Dark"))
self.actionBlack.setText(_translate("MainWindow", "Black"))
self.actionDarkOrange.setText(_translate("MainWindow", "DarkOrange"))
self.actionGray.setText(_translate("MainWindow", "Gray"))
self.actionBlue.setText(_translate("MainWindow", "Blue"))
self.actionNavy.setText(_translate("MainWindow", "Navy"))
self.actionClassic.setText(_translate("MainWindow", "Classic"))
|
dongmengshi/easylearn | eslearn/visualization/lc_barplot.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 19:12:26 2018
bar plot
当我们的数据是num_subj*num_var,且有几个诊断组时,
我们一般希望把var name作为x,把var value作为y,把诊断组作为hue
来做bar,以便于观察每个var的组间差异。
此时,用于sns的特殊性,我们要将数据变换未长列的形式。
行数目为:num_subj*num_var。列数目=3,分别是hue,x以及y
input:
data_path=r'D:\others\彦鸽姐\final_data.xlsx'
x_location=np.arange(5,13,1)#筛选数据的列位置
@author: lenovo
"""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
class BarPlot():
"""
plot bar with hue
"""
def __init__(sel,
data_path=r'D:\others\彦鸽姐\total-122.xlsx',
x_location=np.arange(5, 13, 1),
hue_name='分组',
hue_order=None,
if_save_axure=0,
savename='violin.tiff'):
sel.data_path = data_path
sel.x_location = x_location
sel.hue_name = hue_name
sel.hue_order = hue_order
sel.if_save_axure = if_save_axure
sel.savename = savename
sel.x_name = 'var_name'
sel.y_name = 'value'
def load(sel):
df = pd.read_excel(sel.data_path, index=False)
return df
def data_preparation(sel, df):
# 筛选数据
# TODO let the sel.x_location to more smart
try:
df_selected = df.loc[:, sel.x_location]
except:
df_selected = df.iloc[:,sel.x_location]
# 把需要呈现的数据concat到一列
n_subj, n_col = df_selected.shape
df_decreased_long = pd.DataFrame([])
for nc in range(n_col):
df_decreased_long = pd.concat([df_decreased_long, df_selected.iloc[:, nc]])
# 整理columns
col_name = list(df_selected.columns)
col_name_long = [pd.DataFrame([name] * n_subj) for name in col_name]
col_name_long = pd.concat(col_name_long)
# 整理分组标签
group = pd.DataFrame([])
for i in range(n_col):
group = pd.concat([group, df[sel.hue_name]])
# 整合
data = pd.concat([group, col_name_long, df_decreased_long], axis=1)
# 加列名
data.columns = [sel.hue_name, sel.x_name, sel.y_name]
return data
def plot(sel, data):
# ax = plt.axure()
# f, ax = plt.subplots(1, axisbg='k') # make sure the last ax does not interfere with this one
# sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth":0.2})
plt.figure(figsize=(12, 6))
ax = sns.barplot(x=sel.x_name,
y=sel.y_name,
hue=sel.hue_name,
data=data,
hue_order=sel.hue_order,
ci='sd',
orient="v",
palette='Set2',
n_boot=1000,
saturation=0.65,
capsize= 0.01,
errwidth=4,
units=None,
linewidth=5,
# facecolor=(1, 1, 1, 1),
errcolor=".2",
edgecolor=".2")
#
#
# ax = sns.barplot(x=sel.x_name,
# y=sel.y_name,
# hue=sel.hue_name,
# ci='sd',
# data=data,
# hue_order=sel.hue_order,
# capsize= 0.02,
# errwidth=4,
# linewidth=5,
# facecolor=(1, 1, 1, 1),
# errcolor=".1",
# edgecolor=".1")
ax1=plt.gca()
ax1.patch.set_facecolor("w")
plt.legend('')
# 设置网格
# plt.grid(axis="y", ls='--', c='k')
# 设置label,以及方位
xticklabel = ax.get_xticklabels()
yticklabel = ax.get_yticklabels()
plt.setp(xticklabel, size=15, rotation=0, horizontalalignment='right')
plt.setp(yticklabel, size=15, rotation=0, horizontalalignment='right')
sns.despine() # 去上右边框
# save figure
if sel.if_save_axure:
f = ax.get_axure()
f.saveax(sel.savename, dpi=1200, bbox_inches='tight')
if __name__ == "__main__":
sel = BarPlot(data_path=r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\results\results_dfc\results_of_individual\temploral_properties.xlsx',
x_location=[2,3],
hue_name='group',
hue_order=[1,3,2,4],
if_save_axure=0,
savename='violin.tif')
df =sel.load()
data = sel.data_preparation(df)
sel.plot(data)
# plt.savefig('NT1.tif',dpi=1200)
|
dongmengshi/easylearn | eslearn/feature_engineering/pipeline/grid_search_cv/plot_compare_reduction.py | <reponame>dongmengshi/easylearn<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of ``GridSearchCV`` and
``Pipeline`` to optimize over different classes of estimators in a
single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality
reductions are compared to univariate feature selection during
the grid search.
Additionally, ``Pipeline`` can be instantiated with the ``memory``
argument to memoize the transformers within the pipeline, avoiding to fit
again the same transformers over and over.
Note that the use of ``memory`` to enable caching becomes interesting when the
fitting of a transformer is costly.
###############################################################################
Illustration of ``Pipeline`` and ``GridSearchCV``
###############################################################################
This section illustrates the use of a ``Pipeline`` with ``GridSearchCV``
"""
# Authors: <NAME>, <NAME>, <NAME>
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
from joblib import Memory
from shutil import rmtree
print(__doc__)
location = 'cachedir'
memory = Memory(location=location, verbose=10)
pipe = Pipeline([
# the reduce_dim stage is populated by the param_grid
('reduce_dim', NMF()),
('feature_selection', SelectKBest(chi2)),
('classify', LogisticRegression(solver='saga', penalty='l1'))
],
memory=memory
)
N_FEATUREDIM_OPTIONS = [2, 4, 8]
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1]
l1_ratio = [0, 0.1, 0.5, 1]
param_grid = [
{
'reduce_dim__n_components': N_FEATUREDIM_OPTIONS,
'feature_selection__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS,
'classify__l1_ratio': l1_ratio,
},
]
reducer_labels = ['PCA', 'KBest(chi2)']
grid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)
X, y = load_digits(return_X_y=True)
grid.fit(X, y)
# Delete the temporary cache before exiting
memory.clear(warn=False)
rmtree(location)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
plt.show() |
dongmengshi/easylearn | eslearn/visualization/plot_brainmap_nilearn_.py | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 22:49:33 2019
@author: <NAME>
Email: <EMAIL>
"""
"""
Making a surface plot of a 3D statistical map
=============================================
project a 3D statistical map onto a cortical mesh using
:func:`nilearn.surface.vol_to_surf`. Display a surface plot of the projected
map using :func:`nilearn.plotting.plot_surf_stat_map`.
"""
##############################################################################
# Get a statistical map
# ---------------------
from nilearn import datasets
from nilearn import plotting
import matplotlib.pyplot as plt
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import Utils.lc_niiProcessor as NiiProc
img = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\Atalas\sorted_brainnetome_atalas_3mm.nii'
niiproc = NiiProc.NiiProcessor()
stat_img_data, stat_img = niiproc.read_sigle_nii(img)
plotting.plot_roi(stat_img, cmap=plotting.cm.bwr, colorbar=True)
plt.show()
# motor_images = datasets.fetch_neurovault_motor_task()
# stat_img = motor_images.images[0]
##############################################################################
# Get a cortical mesh
# -------------------
# fsaverage = datasets.fetch_surf_fsaverage()
# ##############################################################################
# # Sample the 3D data around each node of the mesh
# # -----------------------------------------------
# from nilearn import surface
# texture = surface.vol_to_surf(stat_img, fsaverage.pial_right)
# ##############################################################################
# # Plot the result
# # ---------------
# plotting.plot_surf_stat_map(fsaverage.infl_right, texture, hemi='right',
# title='Surface right hemisphere', colorbar=True,
# threshold=3., bg_map=fsaverage.sulc_right)
# ##############################################################################
# # Plot 3D image for comparison
# # ----------------------------
# plotting.plot_glass_brain(stat_img, display_mode='r', plot_abs=False,
# title='Glass brain', threshold=3.)
# plotting.plot_stat_map(stat_img, display_mode='z', threshold=3.,
# cut_coords=range(-30, -10, 3), title='Slices')
# plotting.plot_stat_map(stat_img, display_mode='z', threshold=3.,
# cut_coords=range(-10, 10, 3), title='Slices')
# plotting.plot_stat_map(stat_img, display_mode='z', threshold=3.,
# cut_coords=range(10, 30, 3), title='Slices')
# plotting.plot_stat_map(stat_img, display_mode='z', threshold=3.,
# cut_coords=range(30, 50, 3), title='Slices')
# ##############################################################################
# # Plot with higher-resolution mesh
# # --------------------------------
# #
# # `fetch_surf_fsaverage` takes a "mesh" argument which specifies
# # wether to fetch the low-resolution fsaverage5 mesh, or the high-resolution
# # fsaverage mesh. using mesh="fsaverage" will result in more memory usage and
# # computation time, but finer visualizations.
# big_fsaverage = datasets.fetch_surf_fsaverage('fsaverage')
# big_texture = surface.vol_to_surf(stat_img, big_fsaverage.pial_right)
# plotting.plot_surf_stat_map(big_fsaverage.infl_right,
# big_texture, hemi='right', colorbar=True,
# title='Surface right hemisphere: fine mesh',
# threshold=1., bg_map=big_fsaverage.sulc_right)
# plotting.show()
# ##############################################################################
# # 3D visualization in a web browser
# # ---------------------------------
# # An alternative to :func:`nilearn.plotting.plot_surf_stat_map` is to use
# # :func:`nilearn.plotting.view_surf` or
# # :func:`nilearn.plotting.view_img_on_surf` that give more interactive
# # visualizations in a web browser. See :ref:`interactive-surface-plotting` for
# # more details.
# view = plotting.view_surf(fsaverage.infl_right, texture, threshold='90%',
# bg_map=fsaverage.sulc_right)
# # In a Jupyter notebook, if ``view`` is the output of a cell, it will
# # be displayed below the cell
# view
# ##############################################################################
# # uncomment this to open the plot in a web browser:
# # view.open_in_browser()
# ##############################################################################
# # We don't need to do the projection ourselves, we can use view_img_on_surf:
# view = plotting.view_img_on_surf(stat_img, threshold='90%')
# # view.open_in_browser()
# view
|
dongmengshi/easylearn | eslearn/examples/demo_pca_rfecv_svc.py | """
This script is a demo script, showing how to use eslearn to training and testing a SVC model.
Classifier: linear SVC
Dimension reduction: PCA
Feature selection: RFE
"""
import numpy as np
import eslearn.machine_learning.classfication.pca_rfe_svc_cv as pca_rfe_svc
# =============================================================================
# All inputs
path_patients = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree\Z_degree_patient\Weighted' # All patients' image files, .nii format
path_HC = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree\Z_degree_control\Weighted' # All HCs' image files, .nii format
path_mask = r'G:\Softer_DataProcessing\spm12\spm12\tpm\Reslice3_TPM_greaterThan0.2.nii' # Mask file for filter image
path_out = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree' # Directory for saving results
data_preprocess_method='StandardScaler'
data_preprocess_level='group' # In which level to preprocess data 'subject' or 'group'
num_of_fold_outer = 5 # How many folds to perform cross validation
is_dim_reduction = 1 # Whether to perform dimension reduction, default is using PCA to reduce the dimension.
components = 0.95 # How many percentages of the cumulatively explained variance to be retained. This is used to select the top principal components.
step = 0.1 # RFE parameter: percentages or number of features removed each iteration.
num_fold_of_inner_rfeCV = 5 # RFE parameter: how many folds to perform inner RFE loop.
n_jobs = -1 # RFE parameter: how many jobs (parallel works) to perform inner RFE loop.
is_showfig_finally = True # Whether show results figure finally.
is_showfig_in_each_fold = False # Whether show results in each fold.
# =============================================================================
clf = pca_rfe_svc.PcaRfeSvcCV(
path_patients=path_patients,
path_HC=path_HC,
path_mask=path_mask,
path_out=path_out,
data_preprocess_method=data_preprocess_method,
data_preprocess_level=data_preprocess_level,
num_of_fold_outer=num_of_fold_outer,
is_dim_reduction=is_dim_reduction,
components=components,
step=step,
num_fold_of_inner_rfeCV=num_fold_of_inner_rfeCV,
n_jobs=n_jobs,
is_showfig_finally=is_showfig_finally,
is_showfig_in_each_fold=is_showfig_in_each_fold
)
results = clf.main_function()
results = results.__dict__
print(f"mean accuracy = {np.mean(results['accuracy'])}")
print(f"std of accuracy = {np.std(results['accuracy'])}")
print(f"mean sensitivity = {np.mean(results['sensitivity'])}")
print(f"std of sensitivity = {np.std(results['sensitivity'])}")
print(f"mean specificity = {np.mean(results['specificity'])}")
print(f"std of specificity = {np.std(results['specificity'])}")
print(f"mean AUC = {np.mean(results['AUC'])}")
print(f"std of AUC = {np.std(results['AUC'])}") |
dongmengshi/easylearn | eslearn/examples/demo_pca_relieff_svc.py | """
This script is a demo script, showing how to use eslearn to training and testing a SVC model.
Classifier: linear SVC
Dimension reduction: PCA
Feature selection: ReliefF
"""
import numpy as np
import eslearn.machine_learning.classfication.pca_relieff_svc_cv as pca_relieff_svc
# =============================================================================
# All inputs
path_patients = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree\Z_degree_patient\Weighted' # .nii format
path_HC = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree\Z_degree_control\Weighted' # .nii format
path_mask = r'G:\Softer_DataProcessing\spm12\spm12\tpm\Reslice3_TPM_greaterThan0.2.nii' # mask file for filter image
path_out = r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018-05_MVPA_insomnia_FCS\Degree\degree_gray_matter\Zdegree' # directory for saving results
# =============================================================================
clf = pca_relieff_svc.PcaReliffSvcCV(
path_patients=path_patients,
path_HC=path_HC,
path_mask=path_mask,
path_out=path_out,
data_preprocess_method='MinMaxScaler',
data_preprocess_level='group',
num_of_kfold=5, # How many folds to perform cross validation (Default: 5-fold cross validation)
is_dim_reduction=1, # Default is using PCA to reduce the dimension.
components=0.75,
is_feature_selection=False, # Whether perform feature selection( Default is using relief-based feature selection algorithms).
n_features_to_select=0.99, # How many features to be selected.
is_showfig_finally=True, # Whether show results figure finally.
is_showfig_in_each_fold=False # Whether show results in each fold.
)
results = clf.main_function()
results = results.__dict__
print(f"mean accuracy = {np.mean(results['accuracy'])}")
print(f"std of accuracy = {np.std(results['accuracy'])}")
print(f"mean sensitivity = {np.mean(results['sensitivity'])}")
print(f"std of sensitivity = {np.std(results['sensitivity'])}")
print(f"mean specificity = {np.mean(results['specificity'])}")
print(f"std of specificity = {np.std(results['specificity'])}")
print(f"mean AUC = {np.mean(results['AUC'])}")
print(f"std of AUC = {np.std(results['AUC'])}") |
dongmengshi/easylearn | eslearn/machine_learning/datasets/h5py_test.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 09:12:14 2018
@author: lenovo
"""
#创建
import h5py
#要是读取文件的话,就把w换成r
f=h5py.File("myh5py.hdf5","w")
d1=f.create_dataset("dset1", (20,), 'i')
for key in f.keys():
print(key)
print(f[key].name)
print(f[key].shape)
print(f[key].value)
## 赋值
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
d1=f.create_dataset("dset1",(20,),'i')
#赋值
d1[...]=np.arange(20)
#或者我们可以直接按照下面的方式创建数据集并赋值
f["dset2"]=np.arange(15)
for key in f.keys():
print(f[key].name)
print(f[key].value)
# 已有numpy数组时
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
a=np.arange(20)
d1=f.create_dataset("dset3",data=a)
for key in f.keys():
print(f[key].name)
print(f[key].value)
# 创建group
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
#创建一个名字为bar的组
g1=f.create_group("bar")
#在bar这个组里面分别创建name为dset1,dset2的数据集并赋值。
g1["dset1"]=np.arange(10)
g1["dset2"]=np.arange(12).reshape((3,4))
for key in g1.keys():
print(g1[key].name)
print(g1[key].value)
# group and datasets
import h5py
import numpy as np
f=h5py.File("myh5py.hdf5","w")
#创建组bar1,组bar2,数据集dset
g1=f.create_group("bar1")
g2=f.create_group("bar2")
d=f.create_dataset("dset",data=np.arange(10))
#在bar1组里面创建一个组car1和一个数据集dset1。
c1=g1.create_group("car1")
d1=g1.create_dataset("dset1",data=np.arange(10))
#在bar2组里面创建一个组car2和一个数据集dset2
c2=g2.create_group("car2")
d2=g2.create_dataset("dset2",data=np.arange(10))
#根目录下的组和数据集
print(".............")
for key in f.keys():
print(f[key].name)
#bar1这个组下面的组和数据集
print(".............")
for key in g1.keys():
print(g1[key].name)
#bar2这个组下面的组和数据集
print(".............")
for key in g2.keys():
print(g2[key].name)
#顺便看下car1组和car2组下面都有什么,估计你都猜到了为空。
print(".............")
print(c1.keys())
print(c2.keys()) |
dongmengshi/easylearn | eslearn/utils/regression/multi_test2.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 20:25:12 2018
@author: lenovo
"""
def func(msg):
print (multiprocessing.current_process().name + '-' + msg)
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=4) # 创建4个进程
for i in range(10):
msg = "hello %d" %(i)
pool.apply_async(func, (msg, ))
pool.close() # 关闭进程池,表示不能在往进程池中添加进程
pool.join() # 等待进程池中的所有进程执行完毕,必须在close()之后调用
print ("Sub-process(es) done.") |
dongmengshi/easylearn | eslearn/utils/lc_rfe_svc_deterministic_train_and_test_dataset.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 11:15:59 2019
@author: lenovo
"""
from lc_featureSelection_rfe import rfeCV
from lc_read_nii import read_sigleNii_LC
from lc_read_nii import main
import numpy as np
from sklearn import datasets
import sys
sys.path.append(r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python-master\Utils')
sys.path.append(
r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python-master\Machine_learning\classfication')
class SVCDeterministicTrAndTe():
def init(self):
# ==============================================================================
# input begin
patients_path = r'K:\XiaoweiJiang\KETI\3_新建文件夹(20190308)\DATA\REST\ALFF\ALFF_BD'
hc_path = r'K:\XiaoweiJiang\KETI\3_新建文件夹(20190308)\DATA\REST\ALFF\ALFF_HC1'
validation_path = r'K:\XiaoweiJiang\KETI\3_新建文件夹(20190308)\DATA\REST\ALFF\ALFF_HC1'
mask = r'G:\Softer_DataProcessing\spm12\spm12\tpm\Reslice3_TPM_greaterThan0.2.nii'
kfold = 5
# ==============================================================================
# mask
mask = read_sigleNii_LC(mask) >= 0.2
mask = np.array(mask).reshape(-1,)
# is_train
if_training = 1
# input end
def load_nii_and_gen_label(patients_path, hc_path, mask):
# train data
data1 = main(patients_path)
data1 = np.squeeze(
np.array([np.array(data1).reshape(1, -1) for data1 in data1]))
data2 = main(hc_path)
data2 = np.squeeze(
np.array([np.array(data2).reshape(1, -1) for data2 in data2]))
data = np.vstack([data1, data2])
# validation data
data_val = main(validation_path)
data_val = np.squeeze(
np.array([np.array(data_val).reshape(1, -1) for data_val in data_val]))
# data in mask
data_tr = data[:, mask]
data_val = data_val[:, mask]
# label_tr
label_tr = np.hstack(
[np.ones([len(data1), ]) - 1, np.ones([len(data2), ])])
return label_tr, data_tr, data_val
# training and test
def tr_te():
import lc_svc_rfe_cv_V2 as lsvc
svc = lsvc.SVCRefCv(
pca_n_component=0.9,
show_results=1,
show_roc=0,
k=kfold)
if if_training:
results = svc.svc_rfe_cv(data_tr, label_tr)
return results
# run
data_tr, label_tr = datasets.make_classification(n_samples=200, n_classes=2,
n_informative=50, n_redundant=3,
n_features=100, random_state=1)
label_tr, data_tr, data_val = load_nii_and_gen_label(
patients_path, hc_path, mask)
selector, weight = rfeCV(data_tr, label_tr, step=0.1, cv=kfold, n_jobs=1,
permutation=0)
results = tr_te()
results = results.__dict__
y_pred = selector.predict(x)
|
dongmengshi/easylearn | eslearn/statistical analysis/lc_anova_test.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 15:33:53 2018
@author: lenovo
"""
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
dat = sm.datasets.get_rdataset("Guerry", "HistData").data
moore = sm.datasets.get_rdataset("Moore", "car",cache=True) |
dongmengshi/easylearn | eslearn/GUI/easylearn_machine_learning_gui.py | <filename>eslearn/GUI/easylearn_machine_learning_gui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:/My_Codes/easylearn-fmri/eslearn/GUI/easylearn_machine_learning_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_17 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_17.setObjectName("gridLayout_17")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tabWidget.setObjectName("tabWidget")
self.classification = QtWidgets.QWidget()
self.classification.setObjectName("classification")
self.gridLayout_9 = QtWidgets.QGridLayout(self.classification)
self.gridLayout_9.setObjectName("gridLayout_9")
self.groupBox_method = QtWidgets.QGroupBox(self.classification)
self.groupBox_method.setObjectName("groupBox_method")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_method)
self.gridLayout_5.setObjectName("gridLayout_5")
self.radioButton_classification_gaussianprocess = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classification_gaussianprocess.setObjectName("radioButton_classification_gaussianprocess")
self.gridLayout_5.addWidget(self.radioButton_classification_gaussianprocess, 3, 0, 1, 1)
self.radioButton_classification_randomforest = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classification_randomforest.setObjectName("radioButton_classification_randomforest")
self.gridLayout_5.addWidget(self.radioButton_classification_randomforest, 5, 0, 1, 1)
self.radioButton_classification_svm = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classification_svm.setObjectName("radioButton_classification_svm")
self.gridLayout_5.addWidget(self.radioButton_classification_svm, 1, 0, 1, 1)
self.radioButton_classificaton_lr = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classificaton_lr.setObjectName("radioButton_classificaton_lr")
self.gridLayout_5.addWidget(self.radioButton_classificaton_lr, 0, 0, 1, 1)
self.radioButton_classification_adaboost = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classification_adaboost.setObjectName("radioButton_classification_adaboost")
self.gridLayout_5.addWidget(self.radioButton_classification_adaboost, 8, 0, 1, 1)
self.radioButton_classification_ridge = QtWidgets.QRadioButton(self.groupBox_method)
self.radioButton_classification_ridge.setObjectName("radioButton_classification_ridge")
self.gridLayout_5.addWidget(self.radioButton_classification_ridge, 2, 0, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_method, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_9.addItem(spacerItem, 0, 1, 1, 1)
self.groupBox_setting = QtWidgets.QGroupBox(self.classification)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_setting.sizePolicy().hasHeightForWidth())
self.groupBox_setting.setSizePolicy(sizePolicy)
self.groupBox_setting.setMinimumSize(QtCore.QSize(100, 0))
self.groupBox_setting.setObjectName("groupBox_setting")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_setting)
self.gridLayout.setObjectName("gridLayout")
self.stackedWidget_setting = QtWidgets.QStackedWidget(self.groupBox_setting)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget_setting.sizePolicy().hasHeightForWidth())
self.stackedWidget_setting.setSizePolicy(sizePolicy)
self.stackedWidget_setting.setMinimumSize(QtCore.QSize(200, 0))
self.stackedWidget_setting.setObjectName("stackedWidget_setting")
self.to_lr = QtWidgets.QWidget()
self.to_lr.setObjectName("to_lr")
self.gridLayout_11 = QtWidgets.QGridLayout(self.to_lr)
self.gridLayout_11.setObjectName("gridLayout_11")
self.label_7 = QtWidgets.QLabel(self.to_lr)
self.label_7.setObjectName("label_7")
self.gridLayout_11.addWidget(self.label_7, 2, 0, 1, 1)
self.spinBox__clf_lr_numl1ratio = QtWidgets.QSpinBox(self.to_lr)
self.spinBox__clf_lr_numl1ratio.setObjectName("spinBox__clf_lr_numl1ratio")
self.gridLayout_11.addWidget(self.spinBox__clf_lr_numl1ratio, 2, 1, 1, 1)
self.doubleSpinBox_clf_lr_maxl1ratio = QtWidgets.QDoubleSpinBox(self.to_lr)
self.doubleSpinBox_clf_lr_maxl1ratio.setMaximum(1.0)
self.doubleSpinBox_clf_lr_maxl1ratio.setSingleStep(0.1)
self.doubleSpinBox_clf_lr_maxl1ratio.setProperty("value", 1.0)
self.doubleSpinBox_clf_lr_maxl1ratio.setObjectName("doubleSpinBox_clf_lr_maxl1ratio")
self.gridLayout_11.addWidget(self.doubleSpinBox_clf_lr_maxl1ratio, 0, 1, 1, 1)
self.doubleSpinBox_clf_lr_minl1ratio = QtWidgets.QDoubleSpinBox(self.to_lr)
self.doubleSpinBox_clf_lr_minl1ratio.setMaximum(1.0)
self.doubleSpinBox_clf_lr_minl1ratio.setSingleStep(0.1)
self.doubleSpinBox_clf_lr_minl1ratio.setObjectName("doubleSpinBox_clf_lr_minl1ratio")
self.gridLayout_11.addWidget(self.doubleSpinBox_clf_lr_minl1ratio, 1, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.to_lr)
self.label_5.setObjectName("label_5")
self.gridLayout_11.addWidget(self.label_5, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.to_lr)
self.label_2.setObjectName("label_2")
self.gridLayout_11.addWidget(self.label_2, 1, 0, 1, 1)
self.stackedWidget_setting.addWidget(self.to_lr)
self.to_svm = QtWidgets.QWidget()
self.to_svm.setObjectName("to_svm")
self.gridLayout_10 = QtWidgets.QGridLayout(self.to_svm)
self.gridLayout_10.setObjectName("gridLayout_10")
self.comboBox_clf_svm_kernel = QtWidgets.QComboBox(self.to_svm)
self.comboBox_clf_svm_kernel.setObjectName("comboBox_clf_svm_kernel")
self.comboBox_clf_svm_kernel.addItem("")
self.comboBox_clf_svm_kernel.addItem("")
self.comboBox_clf_svm_kernel.addItem("")
self.comboBox_clf_svm_kernel.addItem("")
self.gridLayout_10.addWidget(self.comboBox_clf_svm_kernel, 0, 1, 1, 2)
self.doubleSpinBox_clf_svm_maxc = QtWidgets.QDoubleSpinBox(self.to_svm)
self.doubleSpinBox_clf_svm_maxc.setMaximum(1000000.0)
self.doubleSpinBox_clf_svm_maxc.setSingleStep(0.1)
self.doubleSpinBox_clf_svm_maxc.setProperty("value", 1.0)
self.doubleSpinBox_clf_svm_maxc.setObjectName("doubleSpinBox_clf_svm_maxc")
self.gridLayout_10.addWidget(self.doubleSpinBox_clf_svm_maxc, 1, 2, 1, 1)
self.label_clf_svm_kernel = QtWidgets.QLabel(self.to_svm)
self.label_clf_svm_kernel.setObjectName("label_clf_svm_kernel")
self.gridLayout_10.addWidget(self.label_clf_svm_kernel, 0, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.to_svm)
self.label_6.setObjectName("label_6")
self.gridLayout_10.addWidget(self.label_6, 1, 0, 1, 2)
self.label_4 = QtWidgets.QLabel(self.to_svm)
self.label_4.setObjectName("label_4")
self.gridLayout_10.addWidget(self.label_4, 2, 0, 1, 2)
self.label_10 = QtWidgets.QLabel(self.to_svm)
self.label_10.setObjectName("label_10")
self.gridLayout_10.addWidget(self.label_10, 4, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.to_svm)
self.label_9.setObjectName("label_9")
self.gridLayout_10.addWidget(self.label_9, 5, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.to_svm)
self.label_8.setObjectName("label_8")
self.gridLayout_10.addWidget(self.label_8, 6, 0, 1, 1)
self.doubleSpinBox_clf_svm_minc = QtWidgets.QDoubleSpinBox(self.to_svm)
self.doubleSpinBox_clf_svm_minc.setMinimum(0.0)
self.doubleSpinBox_clf_svm_minc.setMaximum(100000.0)
self.doubleSpinBox_clf_svm_minc.setSingleStep(0.1)
self.doubleSpinBox_clf_svm_minc.setProperty("value", 1.0)
self.doubleSpinBox_clf_svm_minc.setObjectName("doubleSpinBox_clf_svm_minc")
self.gridLayout_10.addWidget(self.doubleSpinBox_clf_svm_minc, 2, 2, 1, 1)
self.spinBox_clf_svm_numgamma = QtWidgets.QSpinBox(self.to_svm)
self.spinBox_clf_svm_numgamma.setObjectName("spinBox_clf_svm_numgamma")
self.gridLayout_10.addWidget(self.spinBox_clf_svm_numgamma, 6, 2, 1, 1)
self.label = QtWidgets.QLabel(self.to_svm)
self.label.setObjectName("label")
self.gridLayout_10.addWidget(self.label, 3, 0, 1, 1)
self.spinBox_clf_svm_numc = QtWidgets.QSpinBox(self.to_svm)
self.spinBox_clf_svm_numc.setObjectName("spinBox_clf_svm_numc")
self.gridLayout_10.addWidget(self.spinBox_clf_svm_numc, 3, 2, 1, 1)
self.lineEdit_clf_svm_maxgamma = QtWidgets.QLineEdit(self.to_svm)
self.lineEdit_clf_svm_maxgamma.setObjectName("lineEdit_clf_svm_maxgamma")
self.gridLayout_10.addWidget(self.lineEdit_clf_svm_maxgamma, 4, 2, 1, 1)
self.lineEdit_clf_svm_mingamma = QtWidgets.QLineEdit(self.to_svm)
self.lineEdit_clf_svm_mingamma.setObjectName("lineEdit_clf_svm_mingamma")
self.gridLayout_10.addWidget(self.lineEdit_clf_svm_mingamma, 5, 2, 1, 1)
self.stackedWidget_setting.addWidget(self.to_svm)
self.to_ridgeclf = QtWidgets.QWidget()
self.to_ridgeclf.setObjectName("to_ridgeclf")
self.gridLayout_14 = QtWidgets.QGridLayout(self.to_ridgeclf)
self.gridLayout_14.setObjectName("gridLayout_14")
self.label_21 = QtWidgets.QLabel(self.to_ridgeclf)
self.label_21.setObjectName("label_21")
self.gridLayout_14.addWidget(self.label_21, 0, 0, 1, 1)
self.doubleSpinBox_clf_ridgeclf_maxalpha = QtWidgets.QDoubleSpinBox(self.to_ridgeclf)
self.doubleSpinBox_clf_ridgeclf_maxalpha.setMaximum(1000000.0)
self.doubleSpinBox_clf_ridgeclf_maxalpha.setSingleStep(0.1)
self.doubleSpinBox_clf_ridgeclf_maxalpha.setProperty("value", 1.0)
self.doubleSpinBox_clf_ridgeclf_maxalpha.setObjectName("doubleSpinBox_clf_ridgeclf_maxalpha")
self.gridLayout_14.addWidget(self.doubleSpinBox_clf_ridgeclf_maxalpha, 0, 1, 1, 1)
self.label_20 = QtWidgets.QLabel(self.to_ridgeclf)
self.label_20.setObjectName("label_20")
self.gridLayout_14.addWidget(self.label_20, 1, 0, 1, 1)
self.doubleSpinBox_clf_ridgeclf_minalpha = QtWidgets.QDoubleSpinBox(self.to_ridgeclf)
self.doubleSpinBox_clf_ridgeclf_minalpha.setMinimum(0.0)
self.doubleSpinBox_clf_ridgeclf_minalpha.setMaximum(100000.0)
self.doubleSpinBox_clf_ridgeclf_minalpha.setSingleStep(0.1)
self.doubleSpinBox_clf_ridgeclf_minalpha.setProperty("value", 1.0)
self.doubleSpinBox_clf_ridgeclf_minalpha.setObjectName("doubleSpinBox_clf_ridgeclf_minalpha")
self.gridLayout_14.addWidget(self.doubleSpinBox_clf_ridgeclf_minalpha, 1, 1, 1, 1)
self.label_19 = QtWidgets.QLabel(self.to_ridgeclf)
self.label_19.setObjectName("label_19")
self.gridLayout_14.addWidget(self.label_19, 2, 0, 1, 1)
self.spinBox_clf_ridgeclf_numalpha = QtWidgets.QSpinBox(self.to_ridgeclf)
self.spinBox_clf_ridgeclf_numalpha.setObjectName("spinBox_clf_ridgeclf_numalpha")
self.gridLayout_14.addWidget(self.spinBox_clf_ridgeclf_numalpha, 2, 1, 1, 1)
self.stackedWidget_setting.addWidget(self.to_ridgeclf)
self.to_guassianprocess = QtWidgets.QWidget()
self.to_guassianprocess.setObjectName("to_guassianprocess")
self.stackedWidget_setting.addWidget(self.to_guassianprocess)
self.to_randomforest = QtWidgets.QWidget()
self.to_randomforest.setObjectName("to_randomforest")
self.gridLayout_15 = QtWidgets.QGridLayout(self.to_randomforest)
self.gridLayout_15.setObjectName("gridLayout_15")
self.label_22 = QtWidgets.QLabel(self.to_randomforest)
self.label_22.setObjectName("label_22")
self.gridLayout_15.addWidget(self.label_22, 2, 0, 1, 1)
self.spinBox_clf_randomforest_maxdepth = QtWidgets.QSpinBox(self.to_randomforest)
self.spinBox_clf_randomforest_maxdepth.setMaximum(10000)
self.spinBox_clf_randomforest_maxdepth.setProperty("value", 10)
self.spinBox_clf_randomforest_maxdepth.setObjectName("spinBox_clf_randomforest_maxdepth")
self.gridLayout_15.addWidget(self.spinBox_clf_randomforest_maxdepth, 5, 1, 1, 1)
self.label_23 = QtWidgets.QLabel(self.to_randomforest)
self.label_23.setObjectName("label_23")
self.gridLayout_15.addWidget(self.label_23, 1, 0, 1, 1)
self.spinBox_clf_randomforest_minestimators = QtWidgets.QSpinBox(self.to_randomforest)
self.spinBox_clf_randomforest_minestimators.setProperty("value", 10)
self.spinBox_clf_randomforest_minestimators.setObjectName("spinBox_clf_randomforest_minestimators")
self.gridLayout_15.addWidget(self.spinBox_clf_randomforest_minestimators, 3, 1, 1, 1)
self.label_24 = QtWidgets.QLabel(self.to_randomforest)
self.label_24.setObjectName("label_24")
self.gridLayout_15.addWidget(self.label_24, 3, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.to_randomforest)
self.label_26.setObjectName("label_26")
self.gridLayout_15.addWidget(self.label_26, 5, 0, 1, 1)
self.comboBox_clf_randomforest_criterion = QtWidgets.QComboBox(self.to_randomforest)
self.comboBox_clf_randomforest_criterion.setObjectName("comboBox_clf_randomforest_criterion")
self.comboBox_clf_randomforest_criterion.addItem("")
self.comboBox_clf_randomforest_criterion.addItem("")
self.gridLayout_15.addWidget(self.comboBox_clf_randomforest_criterion, 1, 1, 1, 1)
self.spinBox_clf_randomforest_maxestimators = QtWidgets.QSpinBox(self.to_randomforest)
self.spinBox_clf_randomforest_maxestimators.setProperty("value", 10)
self.spinBox_clf_randomforest_maxestimators.setObjectName("spinBox_clf_randomforest_maxestimators")
self.gridLayout_15.addWidget(self.spinBox_clf_randomforest_maxestimators, 2, 1, 1, 1)
self.stackedWidget_setting.addWidget(self.to_randomforest)
self.to_adaboost = QtWidgets.QWidget()
self.to_adaboost.setObjectName("to_adaboost")
self.gridLayout_16 = QtWidgets.QGridLayout(self.to_adaboost)
self.gridLayout_16.setObjectName("gridLayout_16")
self.comboBox_clf_adaboost_baseesitmator = QtWidgets.QComboBox(self.to_adaboost)
self.comboBox_clf_adaboost_baseesitmator.setObjectName("comboBox_clf_adaboost_baseesitmator")
self.comboBox_clf_adaboost_baseesitmator.addItem("")
self.gridLayout_16.addWidget(self.comboBox_clf_adaboost_baseesitmator, 1, 1, 1, 1)
self.label_31 = QtWidgets.QLabel(self.to_adaboost)
self.label_31.setObjectName("label_31")
self.gridLayout_16.addWidget(self.label_31, 1, 0, 1, 1)
self.label_29 = QtWidgets.QLabel(self.to_adaboost)
self.label_29.setObjectName("label_29")
self.gridLayout_16.addWidget(self.label_29, 3, 0, 1, 1)
self.spinBox_clf_adaboost_maxestimators = QtWidgets.QSpinBox(self.to_adaboost)
self.spinBox_clf_adaboost_maxestimators.setProperty("value", 2)
self.spinBox_clf_adaboost_maxestimators.setObjectName("spinBox_clf_adaboost_maxestimators")
self.gridLayout_16.addWidget(self.spinBox_clf_adaboost_maxestimators, 2, 1, 1, 1)
self.label_27 = QtWidgets.QLabel(self.to_adaboost)
self.label_27.setObjectName("label_27")
self.gridLayout_16.addWidget(self.label_27, 2, 0, 1, 1)
self.spinBox_clf_adaboost_minestimators = QtWidgets.QSpinBox(self.to_adaboost)
self.spinBox_clf_adaboost_minestimators.setProperty("value", 2)
self.spinBox_clf_adaboost_minestimators.setObjectName("spinBox_clf_adaboost_minestimators")
self.gridLayout_16.addWidget(self.spinBox_clf_adaboost_minestimators, 3, 1, 1, 1)
self.stackedWidget_setting.addWidget(self.to_adaboost)
self.gridLayout.addWidget(self.stackedWidget_setting, 0, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 0, 1, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_setting, 0, 2, 1, 1)
self.tabWidget.addTab(self.classification, "")
self.regression = QtWidgets.QWidget()
self.regression.setObjectName("regression")
self.gridLayout_2 = QtWidgets.QGridLayout(self.regression)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_5 = QtWidgets.QGroupBox(self.regression)
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayout_2.addWidget(self.groupBox_5, 0, 1, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.regression)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_6 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_6.setObjectName("gridLayout_6")
self.radioButton_7 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_7.setObjectName("radioButton_7")
self.gridLayout_6.addWidget(self.radioButton_7, 0, 0, 1, 1)
self.radioButton_6 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_6.setObjectName("radioButton_6")
self.gridLayout_6.addWidget(self.radioButton_6, 3, 0, 1, 1)
self.radioButton_10 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_10.setObjectName("radioButton_10")
self.gridLayout_6.addWidget(self.radioButton_10, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.tabWidget.addTab(self.regression, "")
self.clustering = QtWidgets.QWidget()
self.clustering.setObjectName("clustering")
self.gridLayout_3 = QtWidgets.QGridLayout(self.clustering)
self.gridLayout_3.setObjectName("gridLayout_3")
self.groupBox_6 = QtWidgets.QGroupBox(self.clustering)
self.groupBox_6.setObjectName("groupBox_6")
self.gridLayout_3.addWidget(self.groupBox_6, 0, 2, 1, 1)
self.groupBox_3 = QtWidgets.QGroupBox(self.clustering)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_7 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_7.setObjectName("gridLayout_7")
self.gridLayout_3.addWidget(self.groupBox_3, 0, 1, 1, 1)
self.tabWidget.addTab(self.clustering, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_7 = QtWidgets.QGroupBox(self.tab)
self.groupBox_7.setObjectName("groupBox_7")
self.gridLayout_8 = QtWidgets.QGridLayout(self.groupBox_7)
self.gridLayout_8.setObjectName("gridLayout_8")
self.radioButton_16 = QtWidgets.QRadioButton(self.groupBox_7)
self.radioButton_16.setObjectName("radioButton_16")
self.gridLayout_8.addWidget(self.radioButton_16, 4, 0, 1, 1)
self.radioButton_17 = QtWidgets.QRadioButton(self.groupBox_7)
self.radioButton_17.setObjectName("radioButton_17")
self.gridLayout_8.addWidget(self.radioButton_17, 1, 0, 1, 1)
self.radioButton_18 = QtWidgets.QRadioButton(self.groupBox_7)
self.radioButton_18.setObjectName("radioButton_18")
self.gridLayout_8.addWidget(self.radioButton_18, 0, 0, 1, 1)
self.radioButton_19 = QtWidgets.QRadioButton(self.groupBox_7)
self.radioButton_19.setObjectName("radioButton_19")
self.gridLayout_8.addWidget(self.radioButton_19, 5, 0, 1, 1)
self.radioButton_20 = QtWidgets.QRadioButton(self.groupBox_7)
self.radioButton_20.setObjectName("radioButton_20")
self.gridLayout_8.addWidget(self.radioButton_20, 2, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_7, 0, 0, 1, 1)
self.groupBox_8 = QtWidgets.QGroupBox(self.tab)
self.groupBox_8.setObjectName("groupBox_8")
self.gridLayout_4.addWidget(self.groupBox_8, 0, 1, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.gridLayout_17.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
self.menuConfiguration_file_F = QtWidgets.QMenu(self.menubar)
self.menuConfiguration_file_F.setObjectName("menuConfiguration_file_F")
self.menuHelp_H = QtWidgets.QMenu(self.menubar)
self.menuHelp_H.setObjectName("menuHelp_H")
self.menuSkin = QtWidgets.QMenu(self.menubar)
self.menuSkin.setObjectName("menuSkin")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionLoad_configuration = QtWidgets.QAction(MainWindow)
self.actionLoad_configuration.setObjectName("actionLoad_configuration")
self.actionSave_configuration = QtWidgets.QAction(MainWindow)
self.actionSave_configuration.setObjectName("actionSave_configuration")
self.actionWeb = QtWidgets.QAction(MainWindow)
self.actionWeb.setObjectName("actionWeb")
self.actionPDF = QtWidgets.QAction(MainWindow)
self.actionPDF.setObjectName("actionPDF")
self.actionDark = QtWidgets.QAction(MainWindow)
self.actionDark.setObjectName("actionDark")
self.actionBlack = QtWidgets.QAction(MainWindow)
self.actionBlack.setObjectName("actionBlack")
self.actionDarkOrange = QtWidgets.QAction(MainWindow)
self.actionDarkOrange.setObjectName("actionDarkOrange")
self.actionGray = QtWidgets.QAction(MainWindow)
self.actionGray.setObjectName("actionGray")
self.actionBlue = QtWidgets.QAction(MainWindow)
self.actionBlue.setObjectName("actionBlue")
self.actionNavy = QtWidgets.QAction(MainWindow)
self.actionNavy.setObjectName("actionNavy")
self.actionClassic = QtWidgets.QAction(MainWindow)
self.actionClassic.setObjectName("actionClassic")
self.menuConfiguration_file_F.addAction(self.actionLoad_configuration)
self.menuConfiguration_file_F.addAction(self.actionSave_configuration)
self.menuHelp_H.addAction(self.actionWeb)
self.menuHelp_H.addAction(self.actionPDF)
self.menuSkin.addAction(self.actionDark)
self.menuSkin.addAction(self.actionBlack)
self.menuSkin.addAction(self.actionDarkOrange)
self.menuSkin.addAction(self.actionGray)
self.menuSkin.addAction(self.actionBlue)
self.menuSkin.addAction(self.actionNavy)
self.menuSkin.addAction(self.actionClassic)
self.menubar.addAction(self.menuConfiguration_file_F.menuAction())
self.menubar.addAction(self.menuHelp_H.menuAction())
self.menubar.addAction(self.menuSkin.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.stackedWidget_setting.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox_method.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_classification_gaussianprocess.setText(_translate("MainWindow", "Gaussian process"))
self.radioButton_classification_randomforest.setText(_translate("MainWindow", "Random forest"))
self.radioButton_classification_svm.setText(_translate("MainWindow", "Support vector machine"))
self.radioButton_classificaton_lr.setText(_translate("MainWindow", "Logistic regression"))
self.radioButton_classification_adaboost.setText(_translate("MainWindow", "AdaBoost"))
self.radioButton_classification_ridge.setText(_translate("MainWindow", "Ridge classification"))
self.groupBox_setting.setTitle(_translate("MainWindow", "Setting"))
self.label_7.setText(_translate("MainWindow", "Number"))
self.label_5.setText(_translate("MainWindow", "Max_l1_ratio"))
self.label_2.setText(_translate("MainWindow", "Min_l1_ratio"))
self.comboBox_clf_svm_kernel.setItemText(0, _translate("MainWindow", "linear"))
self.comboBox_clf_svm_kernel.setItemText(1, _translate("MainWindow", "poly"))
self.comboBox_clf_svm_kernel.setItemText(2, _translate("MainWindow", "rbf"))
self.comboBox_clf_svm_kernel.setItemText(3, _translate("MainWindow", "sigmoid"))
self.label_clf_svm_kernel.setText(_translate("MainWindow", "Kernel"))
self.label_6.setText(_translate("MainWindow", "Max_C"))
self.label_4.setText(_translate("MainWindow", "Min_C"))
self.label_10.setText(_translate("MainWindow", "Max_gamma"))
self.label_9.setText(_translate("MainWindow", "Min_gamma"))
self.label_8.setText(_translate("MainWindow", "Number_gamma"))
self.label.setText(_translate("MainWindow", "Numer_C"))
self.label_21.setText(_translate("MainWindow", "Max_alpha"))
self.label_20.setText(_translate("MainWindow", "Min_alpha"))
self.label_19.setText(_translate("MainWindow", "Numer"))
self.label_22.setText(_translate("MainWindow", "Max_estimators"))
self.label_23.setText(_translate("MainWindow", "Criterion"))
self.label_24.setText(_translate("MainWindow", "Min_estimators"))
self.label_26.setText(_translate("MainWindow", "Max_depth"))
self.comboBox_clf_randomforest_criterion.setItemText(0, _translate("MainWindow", "gini"))
self.comboBox_clf_randomforest_criterion.setItemText(1, _translate("MainWindow", "entropy"))
self.comboBox_clf_adaboost_baseesitmator.setItemText(0, _translate("MainWindow", "DecisionTreeClassifier(max_depth=1)"))
self.label_31.setText(_translate("MainWindow", "Base_estimator"))
self.label_29.setText(_translate("MainWindow", "Min_estimators"))
self.label_27.setText(_translate("MainWindow", "Max_estimators"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.classification), _translate("MainWindow", "Classification"))
self.groupBox_5.setTitle(_translate("MainWindow", "Setting"))
self.groupBox_2.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_7.setText(_translate("MainWindow", "Support vector machines"))
self.radioButton_6.setText(_translate("MainWindow", "Random forest"))
self.radioButton_10.setText(_translate("MainWindow", "Gaussian process"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.regression), _translate("MainWindow", "Regression"))
self.groupBox_6.setTitle(_translate("MainWindow", "Setting"))
self.groupBox_3.setTitle(_translate("MainWindow", "Methods"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.clustering), _translate("MainWindow", "Clustering"))
self.groupBox_7.setTitle(_translate("MainWindow", "Methods"))
self.radioButton_16.setText(_translate("MainWindow", "Random forest classification"))
self.radioButton_17.setText(_translate("MainWindow", "Support vector machines"))
self.radioButton_18.setText(_translate("MainWindow", "Logistic regressio"))
self.radioButton_19.setText(_translate("MainWindow", "RadioButton"))
self.radioButton_20.setText(_translate("MainWindow", "Gaussian process classification"))
self.groupBox_8.setTitle(_translate("MainWindow", "Setting"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Deep learning"))
self.menuConfiguration_file_F.setTitle(_translate("MainWindow", "Configuration file(&F)"))
self.menuHelp_H.setTitle(_translate("MainWindow", "Help(&H)"))
self.menuSkin.setTitle(_translate("MainWindow", "Skin"))
self.actionLoad_configuration.setText(_translate("MainWindow", "Load configuration"))
self.actionSave_configuration.setText(_translate("MainWindow", "Save configuration"))
self.actionWeb.setText(_translate("MainWindow", "Web"))
self.actionPDF.setText(_translate("MainWindow", "PDF"))
self.actionDark.setText(_translate("MainWindow", "Dark"))
self.actionBlack.setText(_translate("MainWindow", "Black"))
self.actionDarkOrange.setText(_translate("MainWindow", "DarkOrange"))
self.actionGray.setText(_translate("MainWindow", "Gray"))
self.actionBlue.setText(_translate("MainWindow", "Blue"))
self.actionNavy.setText(_translate("MainWindow", "Navy"))
self.actionClassic.setText(_translate("MainWindow", "Classic"))
|
dongmengshi/easylearn | eslearn/GUI/easylearn_feature_engineering.py | <filename>eslearn/GUI/easylearn_feature_engineering.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:/My_Codes/easylearn-fmri/eslearn/GUI/Feature_engineering.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(863, 583)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setGeometry(QtCore.QRect(79, 260, 721, 111))
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.stackedWidget.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.widget = QtWidgets.QWidget(self.page_2)
self.widget.setGeometry(QtCore.QRect(10, 10, 183, 23))
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.comboBox = QtWidgets.QComboBox(self.widget)
self.comboBox.setObjectName("comboBox")
self.horizontalLayout_2.addWidget(self.comboBox)
self.comboBox_2 = QtWidgets.QComboBox(self.widget)
self.comboBox_2.setObjectName("comboBox_2")
self.horizontalLayout_2.addWidget(self.comboBox_2)
self.stackedWidget.addWidget(self.page_2)
self.widget1 = QtWidgets.QWidget(self.centralwidget)
self.widget1.setGeometry(QtCore.QRect(81, 111, 635, 30))
self.widget1.setObjectName("widget1")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton = QtWidgets.QPushButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_2 = QtWidgets.QPushButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicy)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.pushButton_4 = QtWidgets.QPushButton(self.widget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicy)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 863, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Preprocessing"))
self.pushButton_2.setText(_translate("MainWindow", "Dimension reduction"))
self.pushButton_3.setText(_translate("MainWindow", "Selection"))
self.pushButton_4.setText(_translate("MainWindow", "Dealing with unbalance"))
|
dongmengshi/easylearn | eslearn/machine_learning/regression/lc_cca.py | <reponame>dongmengshi/easylearn<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 14:17:50 2018
典型相关分析
please refer to {Linked dimensions of psychopathology and
connectivity in functional brain networks}
@author: <NAME>
"""
from sklearn.cross_decomposition import CCA
import numpy as np
class LcCCA():
# initial parameters
def __init__(self,n_comp=4,permutation=0):
self.n_comp=n_comp
self.permutation=permutation
##
def cca(self,X,y):
cca_model = CCA(n_components=self.n_comp,scale=False)
cca_model.fit(X, y)
X_c, y_c = cca_model.transform(X, y)
y_predict=cca_model.predict(X,copy=True)
# R2=cca_model.score(X, y, sample_weight=None)
# loading 为每个原始变量与对应典型变量的相关性*
loading_x=cca_model.x_loadings_
loading_y=cca_model.y_loadings_
# weight即为线性组合的系数,可能可以用来将降维的变量投射到原始空间
# 注意:如果scale参数设置为Ture,则weight是原始数据经过标准化后得到的weight
weight_x=cca_model.x_weights_
weight_y=cca_model.y_weights_
# weight_orig=np.dot(y_c[0,:],weight_y.T)
# coef为X对y的系数,可以用来预测y(np.dot,矩阵乘法)
coef=cca_model.coef_
# 此算法中rotations==weight
# rotation_y=cca_model.y_rotations_
# rotation_x=cca_model.x_rotations_
# score(X,y)返回R squre
# 求某个典型变量对本组变量的协方差解释度(covariance explained by each canonical variate or component)
cov_x= np.cov(X_c.T)
cov_y= np.cov(y_c.T)
# np.diag(cov_x)
eigvals_x,_ = np.linalg.eig(cov_x)
eigvals_y,_ = np.linalg.eig(cov_y)
explain_x=pow(eigvals_x,2)/np.sum(pow(eigvals_x,2))
explain_y=pow(eigvals_y,2)/np.sum(pow(eigvals_y,2))
# np.sort(explain)
return (cca_model,\
X_c,y_c,\
loading_x,loading_y,\
weight_x,weight_y,\
explain_x,explain_y,\
coef,y_predict)
#==================================
if __name__=="__main__":
# from sklearn.datasets import make_multilabel_classification
import lc_cca as lcca
n = 1000
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
y = latents + np.random.normal(size=4 * n).reshape((n, 4))
# y=np.random.permutation(y)
# n_sample,n_features_x,n_features_y =500,4,2
# np.random.seed(0)
# coef = np.random.randn(n_features_x,n_features_y)
# #coef = np.array([[.8],[.1],[0.2]])
# X = np.random.random([n_sample, n_features_x])
# y = np.dot(X, coef)
#
myCCA=lcca.LcCCA()
(cca_model,X_c,y_c,\
loading_x,loading_y,\
weight_x,weight_y,\
explain_x,explain_y,\
coef,y_predict)=myCCA.cca(X,y)
# np.corrcoef(y_predict[:,0],y[:,0])
r=[]
for i in range(X_c.shape[1]):
r.append(np.corrcoef(X_c[:,i],y_c[:,i])[0,1])
#
bb=np.dot(y,weight_y)
aa=np.dot(X,weight_x) |
dongmengshi/easylearn | eslearn/GUI/__init__.py | name = "easylearn" |
dongmengshi/easylearn | eslearn/machine_learning/test/gcn_test_.py | import torch
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree
from torch_geometric.nn import GCNConv
class GCNConv(MessagePassing):
def __init__(self, in_channels, out_channels):
super(GCNConv, self).__init__(aggr='add') # "Add" aggregation.
self.lin = torch.nn.Linear(in_channels, out_channels)
def forward(self, x, edge_index):
# x has shape [N, in_channels]
# edge_index has shape [2, E]
# Step 1: Add self-loops to the adjacency matrix.
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
# Step 2: Linearly transform node feature matrix.
x = self.lin(x)
# Step 3-5: Start propagating messages.
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
def message(self, x_j, edge_index, size):
# x_j has shape [E, out_channels]
# edge_index has shape [2, E]
# Step 3: Normalize node features.
row, col = edge_index
deg = degree(row, size[0], dtype=x_j.dtype) # [N, ]
deg_inv_sqrt = deg.pow(-0.5) # [N, ]
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
# aggr_out has shape [N, out_channels]
# Step 5: Return new node embeddings.
return aggr_out
conv = GCNConv(1, 32)
edge_index = torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)
x = torch.tensor([[-1], [0], [1]], dtype=torch.float)
y = torch.tensor([[-1], [1], [1]], dtype=torch.float)
# data = Data(x=x, edge_index=edge_index, y=y)
x = conv(x, edge_index)
print(x) |
dongmengshi/easylearn | eslearn/utils/lc_read_dicominfo_grouplevel.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 15:30:50 2019
@author: lenovo
"""
import sys
import os
homedir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(homedir)
from os import listdir
from os.path import join
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from numpy import column_stack, row_stack, savetxt
from Utils.lc_read_dicominfo_base import readdcmseries
def run(subjpath, get_seriesinfo):
"""
For several series folder of one subject
Input:
subjpath: one subject's folder path (containing several seriers subfolders)
"""
print(f'running {subjpath}...')
allsubj = listdir(subjpath)
allseriespath = [join(subjpath, subj) for subj in allsubj]
spacing, machine, seriesname, shape, errorseries = [], [], [], [], []
for seriespath in allseriespath:
_, s, m, _, shp, es = readdcmseries(seriespath, get_seriesinfo)
spacing.append(s)
machine.append(m)
shape.append(shp)
errorseries.append(es)
seriesname.append(seriespath)
return spacing, machine, seriesname, shape, errorseries
def run_all(rootdir, get_seriesinfo, n_process):
# 多线程
allsubj = listdir(rootdir)
allsubjpath = [join(rootdir, subj) for subj in allsubj]
cores = multiprocessing.cpu_count()
if n_process > cores:
n_process = cores - 1
dcminfo = []
with ThreadPoolExecutor(n_process) as executor:
for subjpath in allsubjpath:
info = executor.submit(run, subjpath, get_seriesinfo)
dcminfo.append(info.result())
return dcminfo
def main():
# python lc_readcminfo_forradiomics.py -rd D:\\dms-lymph-nodes\\test -gs True -np 3 -op D:/dms-lymph-nodes/dcminfo_finish1
# input
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-rd', '--rootdir', type=str, help='所有被试根目录')
parser.add_argument('-gs', '--get_seriesinfo', type=str, help='是否提取dcm series信息')
parser.add_argument('-np', '--n_process', type=int, help='多核运行数')
parser.add_argument('-op', '--outpath', type=str, help='保存结果的路径')
args = parser.parse_args()
# run
dcminfo = run_all(rootdir = args.rootdir, get_seriesinfo=args.get_seriesinfo, n_process=args.n_process)
dcminfo_list = [list(di) for di in dcminfo]
errorseries = [dl[3] for dl in dcminfo_list] # dimension error series
dcminfo_df = [column_stack((dl[2], dl[1], dl[0])) for dl in dcminfo_list]
dcminfo_alldf = row_stack(dcminfo_df)
# save
savetxt(join(args.outpath, 'errorseries.txt'), errorseries, fmt='%s', delimiter=' ')
savetxt(join(args.outpath, 'dcminfo.txt'),
dcminfo_alldf, fmt='%s', delimiter=',',
header='series_name, machine, spacing_x, spacing_y, slice_thickness',
comments='')
if __name__ == '__main__':
# main()
# for debug
rootdir = r'D:\dms'
dcminfo = run_all(rootdir, True, 4)
# dcminfo = run_all(r'D:\dms-lymph-nodes\test', True, 2)
# outpath = r'D:\dms-lymph-nodes'
#
# dcminfo_list = [list(di) for di in dcminfo]
# errorseries = [dl[3] for dl in dcminfo_list] # dimension error series
# dcminfo_df = [column_stack((dl[2], dl[1], dl[0]))
# for dl in dcminfo_list]
# dcminfo_alldf = row_stack(dcminfo_df)
#
# # save
# savetxt(join(outpath, 'errorseries.txt'), errorseries, fmt='%s', delimiter=' ')
# savetxt(join(outpath, 'dcminfo.txt'),
# dcminfo_alldf, fmt='%s', delimiter=',',
# header='series_name, machine, spacing_x, spacing_y, slice_thickness',
# comments='')
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_elasticNet.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 18:27:38 2018
ElasticNet
Minimizes the objective function:
1 / (2 * n_samples) * ||y_train - Xw||^2_2+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
l1_ratio = 1 is the lasso penalty
a * L1 + b * L2
where:
alpha = a + b and l1_ratio = a / (a + b)
@author: lenovo
"""
# =============================================================================
from sklearn.linear_model import ElasticNet
from sklearn.datasets import make_regression
from sklearn import preprocessing
import numpy as np
class ElasticNet():
def __init__(sel,
x_train=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\x_train342.npy',
y_train=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\y_train342.npy',
x_val=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\x_test38.npy',
y_val=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\y_test38.npy',
x_test=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\x_test206.npy',
y_test=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\y_test206.npy',
l1_ratio=0.5,
alpha=0.1):
sel.l1_ratio=l1_ratio
sel.alpha=alpha
sel.x_train=x_train
sel.y_train=y_train
sel.x_val=x_val
sel.y_val=y_val
sel.x_test=x_test
sel.y_test=y_test
def load_data_and_label(sel,x,y,label_col):
x=np.load(x)
y=np.load(y)[:,label_col]
return x,y
def normalization(sel,data):
# because of our normalization level is on subject,
# we should transpose the data matrix on python(but not on matlab)
scaler = preprocessing.StandardScaler().fit(data.T)
z_data=scaler.transform(data.T) .T
return z_data
def train(sel,x_train,y_train):
sel.regr = ElasticNet(random_state=0,l1_ratio=sel.l1_ratio,alpha=sel.alpha)
sel.regr.fit(x_train,y_train)
sel.coef=sel.regr.coef_
sel.intersept=sel.regr.intercept_
return sel
def test(sel,x_test):
sel.pred=sel.regr.predict(x_test)
return sel
if __name__=='__main__':
x_train,y_train=[[0,0], [1, 1], [2, 2]], [0, 1, 2]
import lc_elasticNet as EN
sel=EN.ElasticNet()
sel.train(x_train,y_train)
sel.test(x_train)
|
dongmengshi/easylearn | test/matlab_engine.py | import matlab.engine
eng = matlab.engine.start_matlab()
[uni_label_of_from_atalas, max_prop, matching_idx] = eng.lc_mapping_brain_atalas_highlevel(nargout=3)
print(matching_idx) |
dongmengshi/easylearn | eslearn/visualization/lc_polarplot.py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 18:39:53 2018
polar bar
@author: lenovo
"""
from pyecharts import Polar
radius1 =['Left orbital frontal cortex',
'Right orbital frontal cortex',
'Left dorsolateral prefrontal cortex',
'Left angular gyrus']
radius2 =['Right primary somatosensory cortex',
'Left primary somatosensory cortex' ,
'Right supplementary motor area',
'Left primary auditory cortex',
'Left thalamus',
'Right visual association cortex',
'Right primary association visual cortex',
'Left visual association cortex'
]
polar =Polar("极坐标系-堆叠柱状图示例", width=1200, height=600)
polar.add("HC", [0.9803,0.9691, 0.9579,0.8857, 0.7264,0.9935,1.1036,0.9253],
radius_data=radius2, type='barAngle', is_stack=False)
polar.add("MDD", [0.9589, 0.9398, 0.9183, 0.8551, 0.7138, 0.9678, 1.0596, 0.9105],
radius_data=radius1, type='barAngle', is_stack=False)
polar.add("BD", [0.9414, 0.9218, 0.9307, 0.8490, 0.6764 , 0.9652 , 1.0598 , 0.8775],
radius_data=radius1, type='barAngle', is_stack=False)
polar.add("SZ", [ 0.9028, 0.8883, 0.8804 , 0.8272 , 0.6668 , 0.9072 , 1.0102, 0.8547],
radius_data=radius1, type='barAngle', is_stack=False)
polar.show_config()
polar.render() |
dongmengshi/easylearn | eslearn/feature_engineering/feature_selection/el_rfe.py | <filename>eslearn/feature_engineering/feature_selection/el_rfe.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 16:06:48 2018
Recursive feature elimination (RFE) and RFE-Cross-validation(nested)
@author: <NAME>
"""
from sklearn.feature_selection import RFE
#from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFECV
from sklearn.svm import SVC
import numpy as np
def rfe(x, y, step=0.1, n_features_to_select=10,
permutation=0):
"""sigle rfe"""
estimator = SVC(kernel="linear")
selector = RFE(estimator, step=step,
n_features_to_select=n_features_to_select)
selector.fit(x, y)
# mask=selector.support_
rank = selector.ranking_
return rank
def rfeCV(x, y, step, cv, n_jobs,
permutation=0):
"""equal to nested rfe"""
n_samples, n_features = x.shape
estimator = SVC(kernel="linear") # TOO: Add other classifiers
selector = RFECV(estimator, step=step, cv=cv, n_jobs=n_jobs)
selector = selector.fit(x, y)
mask = selector.support_
# rank=selector.ranking_
optmized_model = selector.estimator_
w = optmized_model.coef_ # 当为多分类时,w是2维向量
weight = np.zeros([w.shape[0], n_features])
weight[:, mask] = w
# selector.score(x, y)
# y_pred=selector.predict(x)
# r=np.corrcoef(y,y_pred)[0,1]
return selector, weight
##
if __name__ == '__main__':
"""example"""
from sklearn import datasets
x, y = datasets.make_classification(n_samples=200, n_classes=2,
n_informative=50, n_redundant=3,
n_features=100, random_state=1)
selector, weight = rfeCV(x, y, step=0.1, cv=3, n_jobs=1,
permutation=0)
y_pred = selector.predict(x)
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_anova_svm.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 16:43:03 2018
anova for feature selection and svm for classfication
@author: <NAME> (0.7279411764705882, 0.66, 0.79, 0.8419117647058824)
(0.7132352941176471, 0.66, 0.76, 0.8380190311418685)
"""
import sys
import os
root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(root)
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.metrics import classification_report
class AnovaSvm():
def __init__(sel):
sel.n_selectedFeatures = 6
sel.kernel = 'linear'
sel.class_weight = 'balanced'
sel.random_state = 888
def main_anova_svm(sel, x, y):
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=sel.random_state)
# ANOVA SVM-C
# 1) anova filter, take k best ranked features
anova_filter = SelectKBest(f_regression, k=sel.n_selectedFeatures)
# 2) built svm
clf = svm.SVC(kernel=sel.kernel, class_weight=sel.class_weight)
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(x_train, y_train)
# 3) predict
sel.y_pred = anova_svm.predict(x_test)
sel.decision = anova_svm.decision_function(x_test)
sel.weight = clf.coef_
sel.selected_features = anova_filter.get_support()
# eval
ac = y_test - sel.y_pred
print('Accuracy={:.2f}\n'.format(sum(ac == 0) / len(ac)))
print(classification_report(y_test, sel.y_pred))
return sel
if __name__ == '__main__':
from Utils.lc_read_write_Mat import read_mat, write_mat
import pandas as pd
import numpy as np
sel = AnovaSvm()
# =============================================================================
# # 生成数据
# x, y = samples_generator.make_classification(
# n_samples=200,n_features=20, n_informative=3, n_redundant=0, n_classes=3,
# n_clusters_per_class=2)
# =============================================================================
x = read_mat(
r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\staticFC.mat', dataset_name=None)
y = pd.read_excel(
r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\folder_label.xlsx')
y = y['诊断'].values
order = [1, 4]
y1 = np.hstack([y[y == order[0]], y[y == order[1]]])
x = np.vstack([x[y == order[0], :], x[y == order[1], :]])
y = y1
print(sum(y == order[0]), sum(y == order[1]))
# from sklearn.preprocessing import OneHotEncoder
# #哑编码,对IRIS数据集的目标值,返回值为哑编码后的数据
# enc=OneHotEncoder()
# y=enc.fit(y1.reshape(-1,1))
results = sel.main_anova_svm(x, y)
results = results.__dict__
|
dongmengshi/easylearn | eslearn/visualization/lc_circleBarPlot.py | <filename>eslearn/visualization/lc_circleBarPlot.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 22:17:07 2018
@author: lenovo
"""
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
结核分枝杆菌, 800, 5, 2, negative
沙门氏菌, 10, 0.8, 0.09, negative
变形杆菌, 3, 0.1, 0.1, negative
肺炎克雷伯氏菌, 850, 1.2, 1, negative
布鲁氏菌, 1, 2, 0.02, negative
铜绿假单胞菌, 850, 2, 0.4, negative
大肠杆菌, 100, 0.4, 0.1, negative
产气杆菌, 870, 1, 1.6, negative
白色葡萄球菌, 0.007, 0.1, 0.001, positive
溶血性链球菌, 0.001, 14, 10, positive
草绿色链球菌, 0.005, 10, 40, positive
肺炎双球菌, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([# 配置中间标签名称与颜色
("盘尼西林", "#0d3362"),
("链霉素", "#c64737"),
("新霉素", "black"),
])
gram_color = {
"positive": "#aeaeb8",
"negative": "#e69584",
}
# 读取数据
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
# 整体配置
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-420, 420), y_range=(-420, 420),
min_border=90, outline_line_color="black",
background_fill_color="#f0e1d2")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# annular wedges
angles = np.pi / 2 - big_angle / 2 - df.index.to_series() * big_angle #计算角度
colors = [gram_color[gram] for gram in df.gram] # 配置颜色
p.annular_wedge(
0, 0, inner_radius, outer_radius, -big_angle + angles, angles, color=colors,
)
# small wedges
p.annular_wedge(0, 0, inner_radius, rad(df.penicillin),
-big_angle + angles + 5 * small_angle, -big_angle + angles + 6 * small_angle,
color=drug_color['盘尼西林'])
p.annular_wedge(0, 0, inner_radius, rad(df.streptomycin),
-big_angle + angles + 3 * small_angle, -big_angle + angles + 4 * small_angle,
color=drug_color['链霉素'])
p.annular_wedge(0, 0, inner_radius, rad(df.neomycin),
-big_angle + angles + 1 * small_angle, -big_angle + angles + 2 * small_angle,
color=drug_color['新霉素'])
# 绘制大圆和标签
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
p.text(0, radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# 半径
p.annular_wedge(0, 0, inner_radius - 10, outer_radius + 10,
-big_angle + angles, -big_angle + angles, color="black")
# 细菌标签
xr = radii[0] * np.cos(np.array(-big_angle / 2 + angles))
yr = radii[0] * np.sin(np.array(-big_angle / 2 + angles))
label_angle = np.array(-big_angle / 2 + angles)
label_angle[label_angle < -np.pi / 2] += np.pi # easier to read labels on the left side
# 绘制各个细菌的名字
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# 绘制圆形,其中数字分别为 x 轴与 y 轴标签
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
# 绘制文字
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
# 绘制矩形,中间标签部分。其中 -40,-40,-40 为三个矩形的 x 轴坐标。18,0,-18 为三个矩形的 y 轴坐标
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
# 配置中间标签文字、文字大小、文字对齐方式
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color),
text_font_size="9pt", text_align="left", text_baseline="middle")
# show
output_file("burtin.html", title="burtin.py example")
show(p) |
dongmengshi/easylearn | eslearn/GUI/lc_ui2py_for_data_loading.py | import os
cmd_str = r'pyuic5 -o easylearn_data_loading_gui.py D:\My_Codes\easylearn-fmri\eslearn\gui_test\easylearn_data_loading_gui.ui'
os.system(cmd_str)
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_permutation_svc_multiprocessing.py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 14:53:28 2018
@author: lenovo
"""
import multiprocessing
import time
#from scipy import io
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
# import module
from lc_read_write_Mat import write_mat
import time,os
import numpy as np
import lc_svc_rfe_cv as lsvc
##
class Perm_mvpa():
# # initial parameters
def __init__(self,\
model=lsvc.svc_rfe_cv(permutation=1,num_jobs=1),\
N_perm=20,\
n_processess=10,\
fileName=r'D:\myCodes\LC_MVPA\Python\MVPA_Python\perm',\
k=5):
self.model=model
self.N_perm=N_perm
self.n_processess=n_processess
self.fileName=fileName
self.k=k # k fold CV of model
##
def perm_mvpa(self,X,y):
s=time.time()
pool = multiprocessing.Pool(processes=self.n_processess)
for n_perm in range(self.N_perm):
pool.apply_async(self.run_svc,\
(X,y,n_perm))
#
print ('Waiting...')
pool.close()
pool.join()
e=time.time()
print('Done!\n running time is {:.1f}'.format(e-s))
#
def run_svc(self,X,y,n_perm):
# print('we have processing {} permutation'.format(n_perm))
y_rand=np.random.permutation(y)
predict,dec,y_sorted,weight=\
self.model.main_svc_rfe_cv(X,y_rand,self.k)
# write mat
write_mat(os.path.join(self.fileName,str(n_perm)),\
dataset_name=['predict','dec','y_sorted','weight'],\
dataset=[predict,dec,y_sorted,weight])
###
if __name__=='__main__':
import lc_permutation_svc_multiprocessing as Perm
perm=Perm.Perm_mvpa()
perm.perm_mvpa(X,y)
# perm.run_svc(X,y,1)
|
dongmengshi/easylearn | eslearn/statistical analysis/lc_binomialtest.py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 12:39:48 2019
This script is used to perform binomial test for classification performances, e.g., accuracy, sensitivity, specificity.
@author: lenovo
"""
from scipy.special import comb
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def lc_binomialtest(n, k, p1, p2):
sum_prob = 0
prob = []
randk = 0
for i in range(k):
sum_prob += comb(n,i) * pow(p1,i) * pow(p2, (n-i))
if (sum_prob >= 0.95) and (not randk):
randk = i+1
print(f'sum_prob in {randk} is {sum_prob}')
p = 1 - sum_prob if sum_prob <=1 else 0
for i in range(n):
prob.append(comb(n,i) * pow(p1,i) * pow(p2,(n-i)))
return p, sum_prob, prob, randk
def lc_plot(prob, k, p, titlename):
plt.plot(prob)
if p < 0.001:
plt.title(titlename + f'\np < 0.001',fontsize=10)
else:
plt.title(titlename + '\n' + 'p = ' + '%.3f' %p, fontsize=10)
plt.plot([k,k],[prob[k],prob[k]],'.', markersize=10)
plt.plot([k,k],[0,0.06],'--', markersize=15)
# plt.title(titlename,fontsize=10)
plt.xlabel('Number of correct predictions',fontsize=8)
plt.ylabel('Probability', fontsize=8)
plt.show()
if __name__ == "__main__":
n = 85
acc = 0.78
k = np.int32(n * acc)
print(k)
p, sum_prob, prob, randk = lc_binomialtest(n, k, 0.5, 0.5)
print(p)
lc_plot(prob, k, p, titlename = f'Testing data\n (Sample size = {n}, Accuracy = {acc})')
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=0.5, hspace=0.5)
# p1, p2 = 0.5, 0.5
# n, k = 149, 119
# p, sum_prob, prob, randk = lc_binomialtest(140, 119, p1, p2)
# plt.subplot(2, 5, 1)
# lc_plot(prob, k, p, titlename = f'Training set (n = 149)')
# n, k = 61, 49
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 2)
# lc_plot(prob, k, p, titlename = f'Test set (n = 61)')
# n, k = 6, 4
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 3)
# lc_plot(prob, k, p, titlename = f'GE (n = 6)')
# n, k = 24, 18
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 4)
# lc_plot(prob, k, p, titlename = f'Philips (n = 24)')
# n, k = 11, 10
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 5)
# lc_plot(prob, k, p, titlename = f'Siemens (n = 11)')
# n, k = 20,16
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 6)
# lc_plot(prob, k, p, titlename = f'Toshiba (n = 20)')
# n, k = 4, 3
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 7)
# lc_plot(prob, k, p, titlename = f'1 mm (n = 4)')
# n, k = 3, 2
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 8)
# lc_plot(prob, k, p, titlename = f'2 mm (n = 3)')
# n, k = 21, 15
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 9)
# lc_plot(prob, k, p, titlename = f'5 mm (n = 21)')
# n, k = 33, 28
# p, sum_prob, prob, randk = lc_binomialtest(n, k, p1, p2)
# plt.subplot(2, 5, 10)
# lc_plot(prob, k, p, titlename = f'8 mm (n = 33)')
# plt.savefig(r'D:\workstation_b\Fundation\stat_test.tif', dpi=1200, bbox_inches='tight')
# plt.show()
|
dongmengshi/easylearn | eslearn/visualization/tmp.py | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 20:39:06 2019
@author: lenovo
"""
from scipy import io
x = io.loadmat(r'D:\WorkStation_2018\WorkStation_dimensionPLS\Data\ROISignals\ROICorrelation_FisherZ_ROISignal_00003.mat')
x = x['ROICorrelation_FisherZ']
f, (ax) = plt.subplots(figsize=(20,20))
sns.heatmap(x,
ax=ax,
annot=False,
annot_kws={'size':9,'weight':'normal', 'color':'k'},fmt='.3f',
cmap='RdBu_r',
center=0,
square=True,
linewidths = False,
linecolor= [0.6,0.6,0.6],
mask=None,
vmin=-1,
vmax=1)
#plt.subplots_adjust(top = 1, bottom = 0.5, right = 1, left = 0.5, hspace = 0, wspace = 0)
#plt.savefig(r'D:\workstation_b\彦鸽姐\20190927\aa.tiff',
# transparent=True, dpi=300, pad_inches = 0) |
dongmengshi/easylearn | eslearn/statistical analysis/lc_ttest2.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 18:55:25 2018
t test
method='independent'
OR
method='related'
@author: lenovo
"""
from scipy.stats import ttest_ind
from scipy.stats import ttest_rel
import numpy as np
#
def ttest2(a,b,method='independent'):
if method=='independent':
t,p=ttest_ind(a,b,axis=0,nan_policy='omit')
elif method=='related':
t,p=ttest_rel(a,b,axis=0,nan_policy='omit')
else:
print('Nether independent nor related\n')
return (t,p)
|
dongmengshi/easylearn | eslearn/utils/lc_screening_subject_folder_V2.py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 20:27:50 2018
筛选大表的item和subjects
最后得到诊断为[1,2,3,4],扫描质量良好,而且不重复的被试
inputs:
file_all:大表
basicIndex_iloc=[0,11,19,20,21,22,23,27,28,29,30]:基本信息列
basicIndex_str=['学历(年)','中国人利手量表']:基本信息名
hamd17Index_iloc=np.arange(104,126,1),
hamaIndex_iloc=np.arange(126,141,1),
yarsIndex_iloc=np.arange(141,153,1),
bprsIndex_iloc=np.arange(153,177,1)
diagnosis_column_name='诊断':诊断的列名
quality_column_name='Resting_quality'
note1_column_name='诊断备注'
note2_column_name='备注'
note1_keyword='复扫':重复备注文字
outputs:
folder:筛选出来的ID
basic:筛选出来的基本信息
hamd17,hamm,yars,bprs:筛选出来的量表
logicIndex_scale:量表的逻辑index
logicIndex_repeat:重复量表的index
@author: lenovo
"""
# ===============================================
import pandas as pd
#import re
import os
import numpy as np
class select_SubjID():
# initial parameters
def __init__(self,
file_all=r'..\大表.xlsx',
basicIndex_iloc=[0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30],
basicIndex_str=['学历(年)', '中国人利手量表'],
hamd17Index_iloc=np.arange(104, 126, 1),
hamaIndex_iloc=np.arange(126, 141, 1),
yarsIndex_iloc=np.arange(141, 153, 1),
bprsIndex_iloc=np.arange(153, 177, 1),
diagnosis_column_name='诊断',
diagnosis_label=[1, 2, 3, 4],
quality_column_name='Resting_quality',
quality_keyword='Y',
note1_column_name='诊断备注',
note1_keyword='复扫',
note2_column_name='备注',
note2_keyword='复扫'):
# ======================================
self.file_all = file_all
self.basicIndex_iloc = basicIndex_iloc
self.basicIndex_str = basicIndex_str
self.hamd17Index_iloc = hamd17Index_iloc
self.hamaIndex_iloc = hamaIndex_iloc
self.yarsIndex_iloc = yarsIndex_iloc
self.bprsIndex_iloc = bprsIndex_iloc
self.diagnosis_column_name = diagnosis_column_name
self.diagnosis_label = diagnosis_label
self.quality_column_name = quality_column_name
self.quality_keyword = quality_keyword
self.note1_column_name = note1_column_name
self.note1_keyword = note1_keyword
self.note2_column_name = note2_column_name
self.note2_keyword = note2_keyword
print('Initialized!\n')
# ====================================================
def loadExcel(self):
# load all clinical data in excel
self.allClinicalData = pd.read_excel(self.file_all)
return self
# ===============================================
def select_item(self):
# ini=np.int32(1)
# 选项目
if isinstance(self.basicIndex_iloc[0], str):
basic1 = self.allClinicalData.loc[:, self.basicIndex_iloc]
elif isinstance(self.basicIndex_iloc[0], np.int32):
basic1 = self.allClinicalData.iloc[:, self.basicIndex_iloc]
elif isinstance(self.basicIndex_iloc[0], int):
basic1 = self.allClinicalData.iloc[:, self.basicIndex_iloc]
else:
print('basicIndex 的输入有误!\n')
basic2 = self.allClinicalData[self.basicIndex_str]
self.basic = pd.concat([basic1, basic2], axis=1)
if isinstance(self.hamd17Index_iloc[0], str):
self.hamd17 = self.allClinicalData.loc[:, self.hamd17Index_iloc]
elif isinstance(self.hamd17Index_iloc[0], int):
self.hamd17 = self.allClinicalData.iloc[:, self.hamd17Index_iloc]
elif isinstance(self.hamd17Index_iloc[0], np.int32):
self.hamd17 = self.allClinicalData.iloc[:, self.hamd17Index_iloc]
else:
print('hamd17Index_iloc 的输入有误!\n')
if isinstance(self.hamaIndex_iloc[0], str):
self.hama = self.allClinicalData.loc[:, self.hamaIndex_iloc]
elif isinstance(self.hamaIndex_iloc[0], int):
self.hama = self.allClinicalData.iloc[:, self.hamaIndex_iloc]
elif isinstance(self.hamaIndex_iloc[0], np.int32):
self.hama = self.allClinicalData.iloc[:, self.hamaIndex_iloc]
else:
print('hamaIndex_iloc 的输入有误!\n')
if isinstance(self.yarsIndex_iloc[0], str):
self.yars = self.allClinicalData.loc[:, self.yarsIndex_iloc]
elif isinstance(self.yarsIndex_iloc[0], int):
self.yars = self.allClinicalData.iloc[:, self.yarsIndex_iloc]
elif isinstance(self.yarsIndex_iloc[0], np.int32):
self.yars = self.allClinicalData.iloc[:, self.yarsIndex_iloc]
else:
print('yarsIndex_iloc 的输入有误!\n')
if isinstance(self.bprsIndex_iloc[0], str):
self.bprs = self.allClinicalData.loc[:, self.bprsIndex_iloc]
elif isinstance(self.bprsIndex_iloc[0], int):
self.bprs = self.allClinicalData.iloc[:, self.bprsIndex_iloc]
elif isinstance(self.bprsIndex_iloc[0], np.int32):
self.bprs = self.allClinicalData.iloc[:, self.bprsIndex_iloc]
else:
print('bprsIndex_iloc 的输入有误!\n')
# print('bprs1:{}\n'.format(self.bprs))
return self
# ===============================================
def select_diagnosis(self):
# 诊断
diagnosis = self.allClinicalData[self.diagnosis_column_name]
logicIndex_diagnosis = pd.DataFrame(
np.ones([len(self.allClinicalData), 1]) == 0).iloc[:, 0]
for i, dia in enumerate(self.diagnosis_label):
dia = diagnosis.loc[:] == dia
logicIndex_diagnosis = pd.Series(
logicIndex_diagnosis.values | dia.values)
# logicIndex_diagnosis=(diagnosis==1 )|(diagnosis==2 )|(diagnosis==3)|(diagnosis==4)
self.ind_diagnosis = logicIndex_diagnosis.index[logicIndex_diagnosis]
return self
def select_quality(self):
# 根据resting质量筛选样本
logicIndex_quality = self.allClinicalData[self.quality_column_name] == self.quality_keyword
self.ind_quality = logicIndex_quality.index[logicIndex_quality]
return self
def select_note1(self):
# 检查复扫,筛选
# note1_keyword=r'.*?复扫.*?'
note1_column_name = self.allClinicalData[self.note1_column_name]
note1_column_name = note1_column_name.where(
note1_column_name.notnull(), '未知')
index_repeat = note1_column_name.str.contains(self.note1_keyword)
logicIndex_repeat = [bool(index_repeat_)
for index_repeat_ in index_repeat]
logicIndex_notRepeat = [index_repeat_ ==
0 for index_repeat_ in index_repeat]
self.ind_note1 = self.allClinicalData.index[logicIndex_repeat]
self.ind_not_note1 = self.allClinicalData.index[logicIndex_notRepeat]
return self
def select_note2(self):
# 检查复扫,筛选
# note1_keyword=r'.*?复扫.*?'
note2_column_name = self.allClinicalData[self.note2_column_name]
note2_column_name = note2_column_name.where(
note2_column_name.notnull(), '未知')
index_repeat = note2_column_name.str.contains(self.note2_keyword)
logicIndex_repeat = [bool(index_repeat_)
for index_repeat_ in index_repeat]
logicIndex_notRepeat = [index_repeat_ ==
0 for index_repeat_ in index_repeat]
self.ind_note2 = self.allClinicalData.index[logicIndex_repeat]
self.ind_not_note2 = self.allClinicalData.index[logicIndex_notRepeat]
return self
def select_intersection(self):
# index 交集
# 诊断*扫描质量
self.ind_selected = pd.DataFrame(
self.ind_diagnosis).set_index(0).join(
pd.DataFrame(
self.ind_quality).set_index(0),
how='inner')
# 诊断*扫描质量*note1_column_name
self.ind_selected = pd.DataFrame(
self.ind_selected).join(
pd.DataFrame(
self.ind_not_note1).set_index(0),
how='inner')
# 诊断*扫描质量*note1_column_name*note2_column_name
self.ind_selected = pd.DataFrame(
self.ind_selected).join(
pd.DataFrame(
self.ind_not_note2).set_index(0),
how='inner')
#
# 筛选folder
self.folder = self.allClinicalData['folder'].loc[self.ind_selected.index]
return self
# ===============================================
def selcet_subscale_according_index(self):
# 根据index 选择量表
self.basic = self.basic.loc[self.ind_selected.index]
self.hamd17 = self.hamd17.loc[self.ind_selected.index]
self.hama = self.hama.loc[self.ind_selected.index]
self.yars = self.yars.loc[self.ind_selected.index]
self.bprs = self.bprs.loc[self.ind_selected.index]
# print('bprs2:{}\n'.format(self.bprs))
return self
# =============================================================================
# def dropnan(self,scale):
# # 把量表中的空缺去除
# nanIndex_scale=scale.isnull()
# nanIndex_scale=np.sum(nanIndex_scale.values,axis=1)
# logicIndex_scale=nanIndex_scale==0
# return logicIndex_scale
#
# def dropnan_all(self,scale):
# logicIndex_scale=[dropnan(scale_) for scale_ in scale]
# return logicIndex_scale
# =============================================================================
# ===============================================
def selMain(self):
print('Running...\n')
# load
self = self.loadExcel() # item
self = self.select_item()
# diagnosis_column_name
self = self.select_diagnosis()
# quality_column_name
self = self.select_quality()
# repeat
self = self.select_note1()
self = self.select_note2()
# intersection
self = self.select_intersection()
# select
self = self.selcet_subscale_according_index()
# folder ID
# folder=basic['folder']
# # drop nan
# logicIndex_scale=\
# dropnan_all(scale=[hamd17,hama,yars,bprs])
# print(self.folder)
print('Done!\n')
return self
# ===============================================
if __name__ == '__main__':
print('==================================我是分割线====================================\n')
# allFile=r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx"
import selectSubjID_inScale_V2 as select
current_path = os.getcwd()
print('当前路径是:[{}]\n'.format(current_path))
ini_path = os.path.join(current_path, '__ini__.txt')
print('初始化参数位于:[{}]\n'.format(ini_path))
print('正在读取初始化参数...\n')
ini = open(ini_path).read()
ini = ini.strip('').split('\n')
ini = [ini_ for ini_ in ini if ini_.strip()]
name = locals()
for ini_param in ini:
name[ini_param.strip().split('=')[0]] = eval(
ini_param.strip().split('=')[1])
print('{}={}\n'.format(ini_param.strip().split('=')
[0], name[ini_param.strip().split('=')[0]]))
print('初始化参数读取完成!\n')
sel = select.select_SubjID(
file_all=file_all,
basicIndex_iloc=basicIndex_iloc,
basicIndex_str=basicIndex_str,
hamd17Index_iloc=hamd17Index_iloc,
hamaIndex_iloc=hamaIndex_iloc,
yarsIndex_iloc=yarsIndex_iloc,
bprsIndex_iloc=bprsIndex_iloc,
diagnosis_column_name=diagnosis_column_name,
diagnosis_label=diagnosis_label,
quality_column_name=quality_column_name,
quality_keyword=quality_keyword,
note1_column_name=note1_column_name,
note1_keyword=note1_keyword,
note2_column_name=note2_column_name,
note2_keyword=note2_keyword)
results = sel.selMain()
# check results
results_dict = results.__dict__
print('所有结果为:{}\n'.format(list(results_dict.keys())))
results.folder.to_excel('folder.xlsx', header=False, index=False)
print(
'###筛选的folder 保存在:[{}]###\n'.format(
os.path.join(
current_path,
'folder.xlsx')))
print('作者:黎超\n邮箱:<EMAIL>617@163.com\n')
input("######按任意键推出######\n")
print('==================================我是分割线====================================\n')
|
dongmengshi/easylearn | eslearn/utils/selectSubjID_inScale.py | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 10:43:28 2018
筛选大表的item和subjects
最后得到诊断为[1,2,3,4],扫描质量良好,而且不重复的被试
outputs:
folder:筛选出来的ID
basic:筛选出来的基本信息
hamd17,hamm,yars,bprs:筛选出来的量表
logicIndex_scale:量表的逻辑index
logicIndex_repeat:重复量表的index
@author: lenovo
"""
# ===============================================
import pandas as pd
#import re
import numpy as np
def loadExcel(file_all):
# load all clinical data in excel
allClinicalData = pd.read_excel(file_all)
return allClinicalData
# ===============================================
def select_item(allClinicalData,
basicIndex_iloc=[0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30],
basicIndex_str=['学历(年)', '中国人利手量表'],
hamd12Index_iloc=np.arange(104, 126, 1),
hamaIndex_iloc=np.arange(126, 141, 1),
yarsIndex_iloc=np.arange(141, 153, 1),
bprsIndex_iloc=np.arange(153, 177, 1)):
# 选项目
basic1 = allClinicalData.iloc[:, basicIndex_iloc]
basic2 = allClinicalData[basicIndex_str]
basic = pd.concat([basic1, basic2], axis=1)
hamd17 = allClinicalData.iloc[:, hamd12Index_iloc]
hama = allClinicalData.iloc[:, hamaIndex_iloc]
yars = allClinicalData.iloc[:, yarsIndex_iloc]
bprs = allClinicalData.iloc[:, bprsIndex_iloc]
return basic, hamd17, hama, yars, bprs
# ===============================================
def diagnosis(diagnosis):
# 诊断
logicIndex_diagnosis = (
diagnosis == 1) | (
diagnosis == 2) | (
diagnosis == 3) | (
diagnosis == 4)
return logicIndex_diagnosis
def select_quality(quality):
# 根据resting质量筛选样本
logicIndex_quality = quality == 'Y'
return logicIndex_quality
def select_repeat(note, repeatMarker='复扫'):
# 检查复扫,筛选
# repeatMarker=r'.*?复扫.*?'
note = note.where(note.notnull(), '未知')
index_repeat = note.str.contains(repeatMarker)
logicIndex_repeat = [bool(index_repeat_) for index_repeat_ in index_repeat]
logicIndex_notRepeat = [index_repeat_ == 0 for index_repeat_ in index_repeat]
return logicIndex_repeat, logicIndex_notRepeat
def select_intersection(
logicIndex_diagnosis,
logicIndex_quality,
logicIndex_notRepeat):
# index 交集
index_seleled = logicIndex_diagnosis & logicIndex_quality & logicIndex_notRepeat
return index_seleled
# ===============================================
def selcetSubj_accordingLogicIndex(index_seleled,
basic, hamd17, hama, yars, bprs):
# 根据index 选择量表
basic = basic[index_seleled]
hamd17 = hamd17[index_seleled]
hama = hama[index_seleled]
yars = yars[index_seleled]
bprs = bprs[index_seleled]
return basic, hamd17, hama, yars, bprs
# ===============================================
def dropnan(scale):
# 把量表中的空缺去除
nanIndex_scale = scale.isnull()
nanIndex_scale = np.sum(nanIndex_scale.values, axis=1)
logicIndex_scale = nanIndex_scale == 0
return logicIndex_scale
def dropnan_all(scale):
logicIndex_scale = [dropnan(scale_) for scale_ in scale]
return logicIndex_scale
# ===============================================
def selMain(allFile):
# load
allClinicalData = loadExcel(allFile) # item
basic, hamd17, hama, yars, bprs = select_item(
allClinicalData, basicIndex_iloc=[
0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30], basicIndex_str=[
'学历(年)', '中国人利手量表'], hamd12Index_iloc=np.arange(
104, 126, 1), hamaIndex_iloc=np.arange(
126, 141, 1), yarsIndex_iloc=np.arange(
141, 153, 1), bprsIndex_iloc=np.arange(
153, 177, 1))
# diagnosis
logicIndex_diagnosis = diagnosis(diagnosis=basic['诊断'])
# quality
logicIndex_quality = select_quality(quality=basic['Resting_quality'])
# repeat
logicIndex_repeat, logicIndex_notRepeat =select_repeat(note=basic['诊断备注'], repeatMarker='复扫')
# intersection
index_seleled = select_intersection(
logicIndex_diagnosis,
logicIndex_quality,
logicIndex_notRepeat)
# select
basic, hamd17, hama, yars, bprs =\
selcetSubj_accordingLogicIndex(index_seleled,
basic, hamd17, hama, yars, bprs)
# folder ID
folder = basic['folder']
# drop nan
logicIndex_scale =\
dropnan_all(scale=[hamd17, hama, yars, bprs])
return folder, basic, hamd17, hama, yars, bprs, logicIndex_scale, logicIndex_repeat
# ===============================================
if __name__ == '__main__':
file_all = r"D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\10-24大表.xlsx"
folder, basic, hamd17, hama, yars, bprs, logicIndex_scale, logicIndex_repeat = selMain(file_all)
# save
# folder_to_save=pd.DataFrame(folder)
# folder_to_save.to_excel(r"D:\LI_Chao_important_don't_delete\Data\workstation_20180829_dynamicFC\folder.xlsx")
# a=pd.read_excel(r'D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\folder.xlsx')
# a=a['folder']
print(folder)
print('Done!')
|
dongmengshi/easylearn | eslearn/statistical analysis/lc_anova.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 15:40:45 2018
方差分析
@author: lenovo
"""
import scipy.stats as stats
def oneway_anova(data, *args, **kwargs):
"""
Data is a list,
in which each element is a np.array matrix
(such as N*M, N=sample size, M=number of variable)
"""
f, p = stats.f_oneway(data, *args, **kwargs)
return f, p
if __name__ == '__main__':
import numpy as np
data = [] |
dongmengshi/easylearn | eslearn/utils/copy_file_to_folder.py | <filename>eslearn/utils/copy_file_to_folder.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 20:46:52 2019
将一个文件夹中的ROI文件复制到相应的被试文件夹下面
@author: lenovo
"""
import os
import shutil
import pandas as pd
"""input"""
file_folder_path = r"D:\dms-lymph-nodes\merge_noContrast"
out_folder_path = r"D:\dms-lymph-nodes\finish"
def read_files_name():
all_files_name = os.listdir(file_folder_path)
all_files_path = [
os.path.join(
file_folder_path,
file_name) for file_name in all_files_name]
return all_files_name, all_files_path
def extract_uid(all_files_name):
uid = [file_name.split('_')[0] for file_name in all_files_name]
return uid
def copy_file_to_folder(all_files_name, all_files_path, uid):
"""读取被试文件名以及路径"""
all_folders_name = os.listdir(out_folder_path)
all_folders_path = [
os.path.join(
out_folder_path,
folder_name) for folder_name in all_folders_name]
all_folders_path = pd.Series(all_folders_path)
"""copy..."""
print("copying...")
for i, one_uid in enumerate(uid):
print("正在复制第{}/{}个文件".format(i + 1, len(uid)))
all_folders_name = pd.Series(all_folders_name)
my_id = all_folders_name.str.contains(one_uid)
out_path = all_folders_path[my_id]
in_file = all_files_path[i]
out_file = os.path.join(out_path.iloc[0], all_files_name[i])
shutil.copyfile(in_file, out_file)
else:
print("finished!\n")
def delete():
"""读取被试文件名以及路径"""
all_folders_name = os.listdir(out_folder_path)
all_folders_path = [
os.path.join(
out_folder_path,
folder_name) for folder_name in all_folders_name]
all_folders_path = pd.Series(all_folders_path)
print("deleting...")
for i in range(len(all_folders_path)):
print("正在删除第{}/{}个文件".format(i, len(all_folders_path)))
all_files_of_one_subj = pd.Series(os.listdir(all_folders_path.iloc[i]))
ind = all_files_of_one_subj.str.contains(".nii")
remove_file = all_files_of_one_subj[ind]
try:
remove_file_path = os.path.join(
all_folders_path[i], remove_file.iloc[0]) # 可能有多个匹配文件
os.remove(remove_file_path)
except IndexError:
print("{}文件夹下没有需要删除的文件".format(all_folders_path[i]))
def main(if_copy=0, if_del=0):
if if_copy:
all_files_name, all_files_path = read_files_name()
uid = extract_uid(all_files_name)
copy_file_to_folder(all_files_name, all_files_path, uid)
if if_del:
delete()
if __name__ == "__main__":
main(0, 1)
|
dongmengshi/easylearn | eslearn/machine_learning/test/GCNNCourseCodes/myutils.py | <gh_stars>0
"""
@author: <NAME>
LITIS Lab, Rouen, France
<EMAIL>
"""
import numpy as np
from scipy.io import loadmat
import networkx as nx
import numpy.linalg as linalg
from sklearn.preprocessing import StandardScaler
def loadCobraData(fname='cobradat.mat'):
# read mat file
mat = loadmat(fname)
# make Adjagency and Connectivity matrixes as list
A=[];C=[]
for i in range(0,mat['A'].shape[0]):
A.append(mat['A'][i][0])
C.append(mat['C'][i][0])
# read global features descriptors
F=mat['F']
# read global descr names
Vnames=[]
for i in range(0,mat['Vnames'][0].shape[0]):
Vnames.append(mat['Vnames'][0][i][0])
# read file name and molecule names
FILE=[];NAME=[]
for i in range(0,mat['FILE'].shape[0]):
FILE.append(mat['FILE'][i][0][0])
NAME.append(mat['NAME'][i][0][0])
# read atomic descriptor name
Anames=[]
for i in range(0,mat['Anames'].shape[1]):
Anames.append(mat['Anames'][0][i][0])
# read atomic descriptors
TT=[];Atom=[]
for i in range(0,mat['TT'].shape[0]):
TT.append(mat['TT'][i][0])
SA=[]
for j in range(0,mat['Atom'][i][0].shape[0]):
SA.append(mat['Atom'][i][0][j][0][0])
Atom.append(SA)
#TT Atom Anames
return A,C,F,TT,Atom,Anames,Vnames,FILE,NAME
def loadCobraGraphAsNetworkx(fname='cobradat.mat'):
# read dataset
A,C,F,TT,Atom,Anames,Vnames,FILE,NAME=loadCobraData(fname)
G=[];N=[];F=[]
#an=0
for i in range(0,len(FILE)):
name = FILE[i]
atm=Atom[i]
AA=A[i]
edge=[]
for j in range(0,len(atm)-1):
for k in range(j,len(atm)):
if AA[j,k]==1:
edge.append([str(j), str(k)])
graph = nx.from_edgelist(edge)
feat={}
for j in range(0,len(atm)):
if atm[j][0]=='C' and atm[j][1]!='l':
feat[str(j)]={'Label': u'C'} #, 'label': str(j)}
elif atm[j][0]=='H':
feat[str(j)]={'Label': u'H'} #, 'label': str(j)}
elif atm[j][0]=='O':
feat[str(j)]={'Label': u'O'} #, 'label': str(j)}
elif atm[j][0]=='N':
feat[str(j)]={'Label': u'N'} #, 'label': str(j)}
elif atm[j][0]=='F':
feat[str(j)]={'Label': u'F'} #, 'label': str(j)}
elif atm[j][0:2]=='Br':
feat[str(j)]={'Label': u'Br'} #, 'label': str(j)}
elif atm[j][0]=='S' and atm[j][1]!='i':
feat[str(j)]={'Label': u'S'} #, 'label': str(j)}
elif atm[j][0:2]=='Cl':
feat[str(j)]={'Label': u'Cl'} #, 'label': str(j)}
elif atm[j][0:2]=='Si':
feat[str(j)]={'Label': u'Si'} #, 'label': str(j)}
else:
feat[str(j)]={'Label': u'X'} #, 'label': str(j)}
#an+=1
nx.set_node_attributes(graph, feat)
G.append(graph)
return G,FILE
def normalize_wrt_train(trX,tsX):
"""Normalize signal data S and global data GF respect to train set
trX is list of [S,U,B,Nd,GF]
"""
n=int(np.round(1/trX[3][0][0]))
trainX=trX[0][0][0:n]
for i in range(1,len(trX[0])):
n=int(np.round(1/trX[3][i][0]))
trainX=np.vstack((trainX,trX[0][i][0:n]))
n=int(np.round(1/tsX[3][0][0]))
testX=tsX[0][0][0:n]
for i in range(1,len(tsX[0])):
n=int(np.round(1/tsX[3][i][0]))
testX=np.vstack((testX,tsX[0][i][0:n]))
scaler = StandardScaler()
scaler.fit(trainX)
trainX=scaler.transform(trainX)
testX=scaler.transform(testX)
trainX[:,17]=1
testX[:,17]=1
# mn=trainX.mean(axis=0)
# sd=trainX.std(axis=0)
# trainX=(trainX-mn)/sd
# testX=(testX-mn)/sd
mn=trainX.min(axis=0)
mx=trainX.max(axis=0)
for i in range(0,50):
testX[np.where(testX[:,i]<mn[i]),i]=mn[i]
testX[np.where(testX[:,i]>mx[i]),i]=mx[i]
mn=trX[4].mean(axis=0)
sd=trX[4].std(axis=0)
trX[4]=(trX[4]-mn)/sd
tsX[4]=(tsX[4]-mn)/sd
b=0
for i in range(0,len(trX[0])):
n=int(np.round(1/trX[3][i][0]))
trX[0][i][0:n]=trainX[b:b+n,:]
b=b+n
b=0
for i in range(0,len(tsX[0])):
n=int(np.round(1/tsX[3][i][0]))
tsX[0][i][0:n]=testX[b:b+n,:]
b=b+n
return trX,tsX
def laplacian(W, normalized=0):
"""Return graph Laplacian"""
# Degree matrix.
W=1.0*W
d = W.sum(axis=0)
# Laplacian matrix.
if normalized==0:
D = np.diag(d)
L = D - W
elif normalized==1:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = np.diag(d)
I = np.eye(d.size, dtype=W.dtype)
L = I - D.dot(W).dot(D)
else:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = np.diag(d)
I = np.eye(d.size, dtype=W.dtype)
L = D.dot(W).dot(D)
return L
def eigenValuesVectors(A,sorted=True):
"""Return sorted eigenvalues and corresponding vectors"""
eigenValues, eigenVectors = linalg.eigh(A)
if sorted:
idx = (-eigenValues).argsort()[::-1]
eigenValues = np.real(eigenValues[idx])
eigenVectors = np.real(eigenVectors[:,idx])
eigenValues[np.where(eigenValues<0)]=0
return eigenValues,eigenVectors
def bspline_basis(K, v, x, degree=3):
def cox_deboor(k, d):
if d == 0:
ret= np.zeros((x.shape[0],),dtype=np.float)
ret[np.where( (x - kv[k] >= 0) * (x - kv[k + 1] < 0)==True)]=1
else:
denom1 = kv[k + d] - kv[k]
term1 = 0
if denom1 > 0:
term1 = ((x - kv[k]) / denom1) * cox_deboor(k, d - 1)
denom2 = kv[k + d + 1] - kv[k + 1]
term2 = 0
if denom2 > 0:
term2 = ((-(x - kv[k + d + 1]) / denom2) * cox_deboor(k + 1, d - 1))
ret= term1 + term2
return ret
basis=np.zeros((x.shape[0],K))
kv1 = v.min() * np.ones((degree,))
kv2 = np.linspace(v.min(), v.max(), K-degree+1)
kv3 = v.max() * np.ones((degree,))
kv = np.hstack((kv1 ,kv2 ,kv3))
for k in range(0,K):
basis[:,k]=cox_deboor(k, degree)
return basis
#%basis(end,end)=1;
def prepare_data(A,K,mxeigv=None,degree=2):
L=laplacian(A)
V,U=eigenValuesVectors(L)
if mxeigv is None:
mxeigv=V.max()
nv=np.linspace(-0.0001,mxeigv,K)
# nv=np.array([ 0. , 0.13761198 , 0.37610449 , 0.56015261 , 0.74230873 , 0.97686203,
# 1.00783197 , 1.30345163 , 1.569913 , 1.82041479 , 2. , 2.09325829,
# 2.45462153, 2.70768938 , 2.97389003 , 3.14016581 , 3.45690623 , 3.76513438,
# 4.0641928 , 4.46580989 , 4.8878061 , 5.28384145 , 5.83663165 , 6.40090109,
# 7.37654178 , 8.65535957, 11.26793143 ,37.09755486, V[-2] , V[-1]])
B=np.zeros((V.shape[0],nv.shape[0]))
for i in range(0,V.shape[0]):
i1=np.where(nv<=V[i])[0][-1]
i2=np.where(nv>=V[i])[0]
if len(i2)==0:
continue
i2=i2[0]
if i1==i2:
B[i,i1]=1
else:
B[i,i2]=(nv[i2]-V[i])/(nv[i2]-nv[i1])
B[i,i1]=1-B[i,i2]
# tmp=np.exp(-0.1*np.abs(nv-V[i]))
# tmp=tmp/tmp.sum()
# B[i,:]=tmp
# #B[i,np.argmin(np.abs(nv-V[i]))]=1
#B=bspline_basis(K,nv,V,degree=degree)
#if B[-1,:].sum()==0:
# B[-1,-1]=1
# B=np.zeros((V.shape[0],K))
# for i in range(0,V.shape[0]):
# B[i,np.argmin(np.abs(nv-V[i]))]=1
return U,B,V
|
dongmengshi/easylearn | eslearn/utils/lc_selectFile.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 15:01:44 2018
used for select target files
refer to and thank [https://www.jianshu.com/p/91453c76dbc3]
@author: <NAME>
"""
# import
from nipype import SelectFiles, Node
# def
def selectFile(rootPath=r'I:\Data_Code\Doctor\RealignParameter'):
templates = {'path': '{folder}\\{id}'}
# Create SelectFiles node
sf = Node(SelectFiles(templates), name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = rootPath
# Feed {}-based placeholder strings with values
sf.inputs.folder = '*_*'
sf.inputs.id = 'FD_Jenkinson_*'
# sf.inputs.subject_id2 = '01'
# sf.inputs.ses_name = "retest"
# sf.inputs.task_name = 'covertverb'
path = sf.run().outputs.__dict__['path']
return path
if __name__ == '__main__':
path = selectFile()
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_classify_FC.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 22:15:54 2018
1.以功能连接/动态功能连接矩阵为特征,来进行分类
2.本程序使用的算法为svc(交叉验证)
3.当特征是动态连接时,使用标准差或者均数等来作为特征。也可以自己定义
4.input:
所有人的.mat FC/dFC
5.output:
机器学习的相应结果,以字典形式保存再result中。
@author: <NAME>
"""
import sys
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Machine_learning\classfication')
from sklearn.model_selection import train_test_split
import time
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
import re
import os
from lc_read_write_mat import read_mat, write_mat
class classify_using_FC():
def __init__(sel,
k=5,
file_path=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zDynamic\DynamicFC_length17_step1_screened',
dataset_name=None, # mat文件打开后的名字
scale=r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\8.30大表.xlsx',
save_path=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zDynamic',
feature='std17', # 用均数还是std等('mean'/'std'/'staticFC')
folder_label_name='folder_label17.xlsx',
which_group_to_classify=[2, 3],
how_resample='up_resample', # 样本不匹配时
mask=np.ones([114, 114]), # 特征矩阵的mask
n_processess=10,
if_save_post_mat=1, # 保存后处理后的mat?
random_state=2):
sel.k = k
sel.file_path = file_path
sel.dataset_name = dataset_name
sel.scale = scale
sel.save_path = save_path
sel.feature = feature
sel.folder_label_name = folder_label_name
sel.which_group_to_classify = which_group_to_classify
sel.how_resample = how_resample
sel.mask = mask
sel.mask = np.triu(sel.mask, 1) == 1 # 只提取上三角(因为其他的为重复)
# sel.mask=np.ones([1,4])==1 # 只提取上三角(因为其他的为重复)
sel.n_processess = n_processess
sel.if_save_post_mat = if_save_post_mat
sel.random_state = random_state
def load_allmat(sel):
# 多线程
s = time.time()
print('loading all mat...\n')
# 判断是否有FC mat文件
if os.path.exists(os.path.join(sel.save_path, sel.feature + '.mat')):
sel.mat = pd.DataFrame(read_mat(os.path.join(
sel.save_path, sel.feature + '.mat'), None))
print('Already have {}\nloaded all mat!\nrunning time={:.2f}'.format(
sel.feature + '.mat', time.time() - s))
else:
sel.all_mat = os.listdir(sel.file_path)
all_mat_path = [os.path.join(sel.file_path, all_mat_)
for all_mat_ in sel.all_mat]
cores = multiprocessing.cpu_count()
if sel.n_processess > cores:
sel.n_processess = cores - 1
len_all = len(all_mat_path)
sel.mat = pd.DataFrame([])
# 特征用std还是mean
if re.match('mean', sel.feature):
ith = 1
elif re.match('std', sel.feature):
ith = 0
elif re.match('static', sel.feature):
ith = 0
else:
print('###还未添加其他衡量dFC的指标,默认使用std###\n')
ith = 0
# load mat...
with ThreadPoolExecutor(sel.n_processess) as executor:
for i, all_mat_ in enumerate(all_mat_path):
task = executor.submit(
sel.load_onemat_and_processing, i, all_mat_, len_all, s)
sel.mat = pd.concat(
[sel.mat, pd.DataFrame(task.result()[ith]).T], axis=0)
# 保存后处理后的mat文件
if sel.if_save_post_mat:
write_mat(fileName=os.path.join(sel.save_path, sel.feature + '.mat'),
dataset_name=sel.feature,
dataset=np.mat(sel.mat.values))
print('saved all {} mat!\n'.format(sel.feature))
return sel
def load_onemat_and_processing(sel, i, all_mat_, len_all, s):
# load mat
mat = read_mat(all_mat_, sel.dataset_name)
# 计算方差,均数等。可扩展。(如果时静态FC,则不执行)
if re.match('static', sel.feature):
mat_std, mat_mean = mat, []
else:
mat_std, mat_mean = sel.calc_std(mat)
# 后处理特征,可扩展
if re.match('static', sel.feature):
mat_std_1d, mat_mean_1d = sel.postprocessing_features(mat_std), []
else:
mat_std_1d = sel.postprocessing_features(mat_std)
mat_mean_1d = sel.postprocessing_features(mat_mean)
# 打印load进度
if i % 20 == 0 or i == 0:
print('{}/{}\n'.format(i, len_all))
if i % 50 == 0 and i != 0:
e = time.time()
remaining_running_time = (e - s) * (len_all - i) / i
print('\nremaining time={:.2f} seconds \n'.format(
remaining_running_time))
return mat_std_1d, mat_mean_1d
def calc_std(sel, mat):
mat_std = np.std(mat, axis=2)
mat_mean = np.mean(mat, axis=2)
return mat_std, mat_mean
def postprocessing_features(sel, mat):
# 准备特征:比如取上三角,拉直等
return mat[sel.mask]
def gen_label(sel):
# 判断是否已经存在label
if os.path.exists(os.path.join(sel.save_path, sel.folder_label_name)):
sel.label = pd.read_excel(os.path.join(
sel.save_path, sel.folder_label_name))['诊断']
print('\nAlready have {}\n'.format(sel.folder_label_name))
else:
# identify label for each subj
id_subj = pd.Series(sel.all_mat).str.extract('([1-9]\d*)')
scale = pd.read_excel(sel.scale)
id_subj = pd.DataFrame(id_subj, dtype=type(scale['folder'][0]))
sel.label = pd.merge(
scale, id_subj, left_on='folder', right_on=0, how='inner')['诊断']
sel.folder = pd.merge(
scale, id_subj, left_on='folder', right_on=0, how='inner')['folder']
# save folder and label
if sel.if_save_post_mat:
sel.label_folder = pd.concat([sel.folder, sel.label], axis=1)
sel.label_folder.to_excel(os.path.join(
sel.save_path, sel.folder_label_name), index=False)
return sel
def machine_learning(sel):
# label
y = pd.concat([sel.label[sel.label.values == sel.which_group_to_classify[0]],
sel.label[sel.label.values == sel.which_group_to_classify[1]]])
y = y.values
# x/sel.mat
if os.path.exists(os.path.join(sel.save_path, sel.feature + '.mat')):
sel.mat = pd.DataFrame(read_mat(os.path.join(
sel.save_path, sel.feature + '.mat'), None))
x = pd.concat([sel.mat.iloc[sel.label.values == sel.which_group_to_classify[0], :],
sel.mat.iloc[sel.label.values == sel.which_group_to_classify[1], :]])
x = x.values
# 二值化y
y[y == sel.which_group_to_classify[0]] = 0
y[y == sel.which_group_to_classify[1]] = 1
# 打印未平衡前的样本
print('未平衡前的样本={}:{}\n'.format(sum(y == 0), sum(y == 1)))
sel.origin_sample_size = '{}:{}'.format(sum(y == 0), sum(y == 1))
# 平衡样本(上采样)
ind_up, ind_down = np.argmin([sum(y == 0), sum(y == 1)]), np.argmax([
sum(y == 0), sum(y == 1)])
num_up = np.abs(sum(y == 0) - sum(y == 1))
if sel.how_resample == 'up_resample':
y_need_up = y[y == ind_up]
x_need_up = x[y == ind_up]
x = np.vstack([x[y == ind_down], x_need_up[:num_up, :], x_need_up])
# dropna
x = pd.DataFrame(x)
x = x.dropna()
ind = list(x.index)
x = x.values
y = np.hstack([y[y == ind_down], y_need_up[:num_up], y_need_up])
y = pd.DataFrame(y).loc[ind].values
else:
y_need_down = y[y == ind_down]
x_need_down = x[y == ind_down]
# dropna
x = np.vstack([x_need_down[num_up:, :], x[y == ind_up]])
x = pd.DataFrame(x)
x = x.dropna()
ind = list(x.index)
x = x.values
y = np.hstack([y_need_down[num_up:], y[y == ind_up]])
y = pd.DataFrame(y).loc[ind].values
print('平衡后的样本={}:{}\n'.format(sum(y == 0), sum(y == 1)))
sel.balanced_sample_size = '{}:{}'.format(sum(y == 0), sum(y == 1))
# 置换y
# rand_ind=np.random.permutation(len(y))
# y=y[rand_ind]
# cross-validation
# 1) split data to training and testing datasets
# x_train, x_test, y_train, y_test = \
# train_test_split(x, y, random_state=sel.random_state)
# rfe
import lc_svc_rfe_cv_V2 as lsvc
model = lsvc.svc_rfe_cv(k=sel.k, pca_n_component=0.85)
results = model.main_svc_rfe_cv(x, y)
results = results.__dict__
return results
if __name__ == '__main__':
import lc_classify_FC as Clasf
sel = Clasf.classify_using_FC(
k=5,
file_path=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic\x_test206',
dataset_name=None,
scale=r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\8.30大表.xlsx',
save_path=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zStatic',
feature='static',
folder_label_name='folder_label_static_add.xlsx',
which_group_to_classify=[1, 3],
how_resample='down_resample',
mask=np.ones([114, 114]),
n_processess=10,
if_save_post_mat=1,
random_state=2)
results = sel.load_allmat()
results = sel.gen_label()
result = sel.machine_learning()
|
dongmengshi/easylearn | eslearn/utils/lc_copy_selected_file_V4.py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以限定某个file)
input:
# 1 referencePath:需要复制的被试名字所在text文件(大表中的folder)
# 2 regularExpressionOfsubjName_forReference:如提取量表中subjName的正则表达式
# 3 folderNameContainingFile_forSelect:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
# 4 num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
# 5 regularExpressionOfSubjName_forNeuroimageDataFiles:用来筛选mri数据中subject name字符串的正则表达式
# 6 keywordThatFileContain:用来筛选file的正则表达式或keyword
# 7 neuroimageDataPath:原始数据的根目录
# 8 savePath: 将原始数据copy到哪个大路径
# n_processess=5几个线程
# 9 ifSaveLog:是否保存复制log
# 10 ifCopy:是否执行复制功能
# 11 ifMove:是否移动(0)
# 12 saveInToOneOrMoreFolder:保存到每个被试文件夹下,还是保存到一个文件夹下
# 13 saveNameSuffix:文件保存的尾缀('.nii')
# 14 ifRun:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:savePath/saveFolderName/subjName/files
@author: <NAME>
"""
# =========================================================================
# import
import sys
import shutil
import os
import time
# from lc_selectFile_ import selectFile
import pandas as pd
import numpy as np
from sklearn.externals.joblib import Parallel, delayed
# =========================================================================
# def
class copy_fmri():
def __init__(self,
referencePath=r'E:\wangfeidata\folder.txt',
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=2,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='nii',
neuroimageDataPath=r'E:\wangfeidata\FunImgARWD',
savePath=r'E:\wangfeidata',
n_processess=5,
ifSaveLog=1,
ifCopy=0,
ifMove=0,
saveInToOneOrMoreFolder='saveToEachSubjFolder',
saveNameSuffix='.nii',
ifRun=0):
# 核对参数信息
if ifCopy == 1 & ifMove == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
time.sleep(5)
# print('==========================================================')
# print('\nThe num_countBackwards that to screen subject name is {} !'.format(num_countBackwards))
# print('\nKeyword of folder name that containing the files is {} !'.format(folderNameContainingFile_forSelect))
# print('regularExpressionOfSubjName_forNeuroimageDataFiles is {}'.format(regularExpressionOfSubjName_forNeuroimageDataFiles))
# print('ifCopy is {}'.format(ifCopy))
# print('saveInToOneOrMoreFolder is {}'.format(saveInToOneOrMoreFolder))
# print('==========================================================')
# input("***请核对以上信息是否准确,否则复制出错!***")
# =========================================================================
# accept excel or csv
self.referencePath = referencePath
try:
self.subjName_forSelect = pd.read_excel(
referencePath, dtype='str', header=None, index=None)
except:
self.subjName_forSelect = pd.read_csv(
referencePath, dtype='str', header=None)
#
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
ith = 0
if regularExpressionOfsubjName_forReference:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]\
.str.findall('[1-9]\d*')
self.subjName_forSelect = [self.subjName_forSelect_[ith]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
# 提取subjName_forSelect完毕
self.folderNameContainingFile_forSelect = folderNameContainingFile_forSelect
self.num_countBackwards = num_countBackwards
self.regularExpressionOfSubjName_forNeuroimageDataFiles = regularExpressionOfSubjName_forNeuroimageDataFiles
self.keywordThatFileContain = keywordThatFileContain
self.neuroimageDataPath = neuroimageDataPath
self.savePath = savePath
self.n_processess = n_processess
self.ifSaveLog = ifSaveLog
self.ifCopy = ifCopy
self.ifMove = ifMove
self.saveInToOneOrMoreFolder = saveInToOneOrMoreFolder
self.saveNameSuffix = saveNameSuffix
self.ifRun = ifRun
# ===================================================================
def walkAllPath(self):
allWalkPath = os.walk(self.neuroimageDataPath)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return allWalkPath
def fetch_allFilePath(self, allWalkPath):
allFilePath = []
for onePath in allWalkPath:
for oneFile in onePath[2]:
path = os.path.join(onePath[0], oneFile)
allFilePath.append(path)
return allFilePath
def fetch_allSubjName(self, allFilePath):
'''
num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
# allWalkPath=sel.walkAllPath()
# allFilePath=sel.fetch_allFilePath(allWalkPath)
allSubjName = allFilePath
for i in range(self.num_countBackwards - 1):
allSubjName = [os.path.dirname(allFilePath_)
for allFilePath_ in allSubjName]
allSubjName = [os.path.basename(allFilePath_)
for allFilePath_ in allSubjName]
allSubjName = pd.DataFrame(allSubjName)
# allSubjName=allSubjName.iloc[:,0].where(allSubjName.iloc[:,0]!='').dropna()
# allSubjName=pd.DataFrame(allSubjName)
return allSubjName
def fetch_folerNameContainingFile(self, allFilePath):
'''
如果file上一级folder不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的folder name(可能是模态名),然后根据你的关键词来筛选
'''
folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in allFilePath]
folerNameContainingFile = [os.path.basename(
folderName) for folderName in folerNameContainingFile]
return folerNameContainingFile
def fetch_allFileName(self, allFilePath):
'''
获取把所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
allFileName = [os.path.basename(allFilePath_)
for allFilePath_ in allFilePath]
return allFileName
# ===================================================================
def screen_pathLogicalLocation_accordingTo_yourSubjName(self, allSubjName):
# 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功
# maker sure subjName_forSelect is pd.Series and its content is string
if type(self.subjName_forSelect) is type(pd.DataFrame([1])):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if type(self.subjName_forSelect[0]) is not str:
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
# allSubjName=sel.fetch_allSubjName(allFilePath)
try:
allSubjName = allSubjName.iloc[:, 0].str.findall(
self.regularExpressionOfSubjName_forNeuroimageDataFiles)
# 正则表达后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in allSubjName.values:
if name:
allSubjName_temp.append(name[0])
else:
allSubjName_temp.append(None)
allSubjName = allSubjName_temp
allSubjName = pd.DataFrame(allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
# self.subjName_forSelect
intersect = allSubjName.set_index(0).join(
self.subjName_forSelect.set_index(0), how='right')
intersect = pd.Series(intersect.index)
# allSubjName有,但是subjName_forSelect没有
# self.difName=allSubjName.join(self.subjName_forSelect)
# self.difName=self.difName.where(self.difName!='').dropna()
except:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
if any(intersect):
# 为了逻辑比较,将allSubjName 转化为DataFrame
allSubjName = pd.DataFrame(allSubjName)
logic_loc = [allSubjName == intersect_ for intersect_ in intersect]
if len(logic_loc) > 1:
logic_loc = pd.concat(logic_loc, axis=1)
logic_loc = np.sum(logic_loc, axis=1)
logic_loc = logic_loc == 1
else:
logic_loc = logic_loc
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.zeros([len(allSubjName), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(
self, folerNameContainingFile):
# 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
if self.folderNameContainingFile_forSelect:
logic_loc = [
self.folderNameContainingFile_forSelect in oneName_ for oneName_ in folerNameContainingFile]
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.ones([len(folerNameContainingFile), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def screen_pathLogicalLocation_accordingTo_fileName(self, allFileName):
# 匹配file name:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
if self.keywordThatFileContain:
logic_loc = [
self.keywordThatFileContain in oneName_ for oneName_ in allFileName]
logic_loc = pd.DataFrame(logic_loc)
else:
logic_loc = np.ones([len(allFileName), 1]) == 1
logic_loc = pd.DataFrame(logic_loc)
return logic_loc
def fetch_totalLogicalLocation(self,
logicLoc_subjName, logicLoc_folderNameContaningFile, logicLoc_fileName):
logic_loc = pd.concat([logicLoc_subjName,
logicLoc_folderNameContaningFile,
logicLoc_fileName],
axis=1)
logic_loc = np.sum(logic_loc, axis=1) == np.shape(logic_loc)[1]
return logic_loc
def fetch_selectedFilePath_accordingPathLogicalLocation(self,
allFilePath, allSubjName, logic_loc):
#
allFilePath = pd.DataFrame(allFilePath)
allSelectedFilePath = allFilePath[logic_loc]
allSelectedFilePath = allSelectedFilePath.dropna()
# name
allSubjName = pd.DataFrame(allSubjName)
allSelectedSubjName = allSubjName[logic_loc]
allSelectedSubjName = allSelectedSubjName.dropna()
return allSelectedFilePath, allSelectedSubjName
# ===================================================================
def copy_allDicomsOfOneSubj(
self,
i,
subjName,
allSelectedSubjName,
allSelectedFilePath):
n_allSelectedSubj = len(allSelectedSubjName)
print('Copying the {}/{}th subject: {}...'.format(i +
1, n_allSelectedSubj, subjName))
# 每个file保存到每个subjxxx文件夹下面
if self.saveInToOneOrMoreFolder == 'saveToEachSubjFolder':
output_folder = os.path.join(self.savePath, subjName)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个folder下面(file的名字以subjxxx命名)
if self.saveInToOneOrMoreFolder == 'saveToOneFolder':
output_folder = os.path.join(self.savePath,
subjName + self.saveNameSuffix)
# copying OR moving OR do nothing
fileIndex = allSelectedSubjName[(
allSelectedSubjName.values == subjName)].index.tolist()
if self.ifCopy == 1 and self.ifMove == 0:
[shutil.copy(allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 1:
[shutil.move(allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('OK!\n')
#
def copy_allDicomsOfAllSubj_multiprocess(self, allSelectedSubjName,
allSelectedFilePath):
# 新建保存文件夹
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
# 多线程
s = time.time()
# unique的name
uniSubjName = allSelectedSubjName.iloc[:, 0].unique()
# 当复制的文件较少时,不要开多线程
if len(uniSubjName) <= 500:
self.n_processess = 1
print('Copying...\n')
Parallel(n_jobs=self.n_processess, backend='threading')(delayed(self.copy_allDicomsOfOneSubj)(i, subjName, allSelectedSubjName, allSelectedFilePath)
for i, subjName in enumerate(uniSubjName))
e = time.time()
print('Done!\nRunning time is {:.1f}'.format(e - s))
# ===================================================================
def main_run(self):
# all path and name
allWalkPath = self.walkAllPath()
allFilePath = self.fetch_allFilePath(allWalkPath)
allSubjName = self.fetch_allSubjName(allFilePath)
allFileName = self.fetch_allFileName(allFilePath)
# select
folderNameContainingFile = self.fetch_folerNameContainingFile(
allFilePath)
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
logicLoc_subjName = self.screen_pathLogicalLocation_accordingTo_yourSubjName(
allSubjName)
logicLoc_folderNameContaningFile = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile(
folderNameContainingFile)
logicLoc_fileName = self.screen_pathLogicalLocation_accordingTo_fileName(
allFileName)
logic_loc = self.fetch_totalLogicalLocation(
logicLoc_subjName, logicLoc_folderNameContaningFile, logicLoc_fileName)
allSelectedFilePath, allSelectedSubjName = self.fetch_selectedFilePath_accordingPathLogicalLocation(
allFilePath, allSubjName, logic_loc)
# save for checking
if self.ifSaveLog:
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
#
uniSubjName = allSelectedSubjName.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName = pd.DataFrame(uniSubjName)
allSelectedFilePath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedFilePath.txt'),
index=False,
header=False)
allSelectedSubjPath = [os.path.dirname(
allSelectedFilePath_) for allSelectedFilePath_ in allSelectedFilePath.iloc[:, 0]]
allSelectedSubjPath = pd.DataFrame(
allSelectedSubjPath).drop_duplicates()
allSelectedSubjPath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjPath.txt'),
index=False,
header=False)
uniSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjName.txt'),
index=False,
header=False)
self.difName.to_csv(
os.path.join(
self.savePath,
'log_difdSubjName.txt'),
index=False,
header=False)
allSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSubjName.txt'),
index=False,
header=False)
#
if len(uniSubjName) <= 100:
self.n_processess = 1
f = open(os.path.join(self.savePath, "copy_inputs.txt"), 'a')
f.write("\n\n")
f.write('====================' + now + '====================')
f.write("\n\n")
f.write("referencePath is: " + self.referencePath)
f.write("\n\n")
f.write(
"folderNameContainingFile_forSelect are: " +
self.folderNameContainingFile_forSelect)
f.write("\n\n")
f.write("num_countBackwards is: " + str(self.num_countBackwards))
f.write("\n\n")
f.write("regularExpressionOfSubjName_forNeuroimageDataFiles is: " +
str(self.regularExpressionOfSubjName_forNeuroimageDataFiles))
f.write("\n\n")
f.write("keywordThatFileContain is: " +
str(self.keywordThatFileContain))
f.write("\n\n")
f.write("neuroimageDataPath is: " + self.neuroimageDataPath)
f.write("\n\n")
f.write("savePath is: " + self.savePath)
f.write("\n\n")
f.write("n_processess is: " + str(self.n_processess))
f.write("\n\n")
f.close()
# copy
if self.ifRun:
self.copy_allDicomsOfAllSubj_multiprocess(
allSelectedSubjName, allSelectedFilePath)
return allFilePath, allSubjName, logic_loc, allSelectedFilePath, allSelectedSubjName
if __name__ == '__main__':
import lc_copy_selected_file_V4 as copy
# basic['folder'].to_csv(r'I:\dynamicALFF\folder.txt',header=False,index=False)
path=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\FunImgARWS'
folder=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\folder.txt'
sel=copy.copy_fmri(referencePath=folder,
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=2,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',\
keywordThatFileContain='nii',
neuroimageDataPath=path,
savePath=r'D:\WorkStation_2018\Workstation_Old\WorkStation_2018_07_DynamicFC_insomnia\test',
n_processess=5,
ifSaveLog=1,
ifCopy=1,
ifMove=0,
saveInToOneOrMoreFolder='saveToEachSubjFolder',
saveNameSuffix='',
ifRun=0)
allFilePath,allSubjName,\
logic_loc,allSelectedFilePath,allSelectedSubjName=\
sel.main_run()
print('Done!')
|
dongmengshi/easylearn | eslearn/utils/lc_meanFD_compare.py | <filename>eslearn/utils/lc_meanFD_compare.py
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 21:33:44 2019
@author: lichao
"""
import sys
sys.path.append(r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python')
import pandas as pd
from Statistics.lc_anova import oneway_anova
class CompareMean(object):
"""compare the mean FD value between SZ, BD, MDD and HC using ANOVA
"""
def __init__(sel):
sel.sz_id = r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\SZ.xlsx'
sel.bd_id = r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\BD.xlsx'
sel.mdd_id = r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\MDD.xlsx'
sel.hc_id = r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\HC.xlsx'
sel.meanvalue = r'D:\WorkStation_2018\WorkStation_dynamicFC\Scales\meanFD.xlsx'
# concat all id
sel.all_id_path = [sel.sz_id, sel.bd_id, sel.mdd_id, sel.hc_id]
def _loadexcel(sel):
sel.id = []
for idxpath in sel.all_id_path:
sel.id.append(sel.loadexcel(idxpath))
sel.meanvalue = pd.read_excel(sel.meanvalue)
def loadexcel(sel, idxpath):
idx = pd.read_excel(idxpath, header=None)
return idx
def _fetch_intersectionID(sel):
""" There may be some subject lack mean FD
So, fetch intersection between id and meanvalue values
"""
sel.all_meanFD_id = [list(
set(list(idx.iloc[:, 0])) & set(sel.meanvalue['ID'])) for idx in sel.id]
def _group_meanvalue(sel):
"""
return variable for anova
"""
sel.grouped_meanvalue = [sel.extract_meanvalue_accord_id(
idx).values for idx in sel.all_meanFD_id]
def extract_meanvalue_accord_id(sel, idx):
meanvalue = sel.meanvalue[sel.meanvalue['ID'].isin(idx)].iloc[:, 1:]
return meanvalue
def _anova(sel):
f, p = oneway_anova(*sel.grouped_meanvalue)
return f, p
def _anova_abs(sel):
"""
Given that head motions have direction,
but actually there is no need to distinguish direction in statistic analysis.
So, I perform anova for abselute mean value
"""
grpmva = [abs(gmv) for gmv in sel.grouped_meanvalue]
(f, p) = oneway_anova(*grpmva)
return (f, p)
if __name__ == '__main__':
sel = CompareMean()
sel._loadexcel()
sel._fetch_intersectionID()
sel._group_meanvalue()
f, p = sel._anova_abs()
|
dongmengshi/easylearn | eslearn/utils/lc_match_maskVSorigin.py | <reponame>dongmengshi/easylearn
# utf-8
"""
Author: <NAME>
Email: <EMAIL>
For: Radiomics
Useage: 将原始series(underlay, dicomfile)与ROI(mask/overlay)对应起来
INPUT: root_roi and root_origin
ROI:
/root_roi
/06278717_WANGTIEHAN_R03855066_S1_Merge
/ROI_1
/06278717_WANGTIEHAN_R03855066_S1_Merge.nii # only one file
/ROI_2
/06278717_WANGTIEHAN_R03855066_S1_Merge.nii
/ROI_n
/07022112_LIZHONGLIANG_R04164650_S1_Merge
/ROI_1
/ROI_3
/ROI_n
Origin:
/root_origin
/06278717_WANGTIEHAN_R03855066
/06278717_WANGTIEHAN_R03855066_S1
/06278717_WANGTIEHAN_R03855066_S2
/06278717_WANGTIEHAN_R03855066_Sn
/07022112_LIZHONGLIANG_R04164650
----------------------------------------------------------------------------------
OUTPUT: sorted results according to ROI order
/savepath
/ROI
/ROI_1 # containing all ROI_1 file of all subjects
/06278717_WANGTIEHAN_R03855066_S1_Merge.nii
/06278717_WANGTIEHAN_R03855066_S1_Merge.nii
/06278717_WANGTIEHAN_R03855066_S1_Merge.nii
/ID_of_subject_n.nii
/ROI_n
/Origin
/ROI_1 # containing all ROI_1 series of all subjects that match the ROI
/06278717_WANGTIEHAN_R03855066_S1
/06278717_WANGTIEHAN_R03855066_S2
/06278717_WANGTIEHAN_R03855066_Sn
/ROI_n
----------------------------------------------------------------------------------
"""
import os
import shutil
import numpy as np
# input
root_roi = r'D:\dms-lymph-nodes\mask'
root_origin = r'D:\dms-lymph-nodes\1_finish'
def get_maskfiles(root_roi=r'D:\dms-lymph-nodes\mask'):
subjname = os.listdir(root_roi)
subjpath = [os.path.join(root_roi, name) for name in subjname]
roiname = [os.listdir(path) for path in subjpath]
roipath = []
for path, name in zip(subjpath, roiname):
roipath.append([os.path.join(path, n) for n in name])
# flatten
roipath = flatten_list(roipath)
# extract uid
uid_roipath = [mystr.split('\\')[-2] for mystr in roipath]
uid_roipath = [mystr.split('Merge')[0][0:-1] for mystr in uid_roipath]
flatten_roiname = np.array([mystr.split('\\')[-1] for mystr in roipath])
uni_roi = np.unique(flatten_roiname)
roiname_location = [flatten_roiname == ur for ur in uni_roi]
return roipath, uni_roi, uid_roipath, roiname_location
def get_originalfiles(root_origin=r'D:\dms-lymph-nodes\1_finish'):
subjname = os.listdir(root_origin)
subjpath = [os.path.join(root_origin, name) for name in subjname]
seriesname = [os.listdir(path) for path in subjpath]
seriespath = []
for path, name in zip(subjpath, seriesname):
seriespath.append([os.path.join(path, n) for n in name])
# flatten
seriespath = flatten_list(seriespath)
# extract uid
uid_seriespath = [mystr.split('\\')[-1] for mystr in seriespath]
return seriespath, uid_seriespath
def flatten_list(mylist):
return [item for sublist in mylist for item in sublist]
def match(roipath, uid_roipath, seriespath, uid_seriespath, roiname_location):
uid_roipath_for_eachroi = [
np.array(uid_roipath)[logic] for logic in roiname_location]
matched_series_location = []
for uid_roi in uid_roipath_for_eachroi:
matched_series_location.append(
[np.where(np.array(uid_seriespath) == aa, True, False) for aa in uid_roi])
return matched_series_location
def move(savepath, uni_roi, roipath, seriespath, roiname_location, matched_series_location):
count = 1
nsubj = len(uni_roi)
for rn, rl in zip(uni_roi, roiname_location):
#%% ROI
# create folder to save results
newfolder = os.path.join(savepath, 'ROI', rn)
if not os.path.exists(newfolder):
os.makedirs(newfolder)
# move
oldfolder = np.array(roipath)[rl]
# because one ROI folder only one file, so [0]
oldfile = [os.listdir(folder)[0] for folder in oldfolder]
oldfilepath = [os.path.join(folder, file)
for folder, file in zip(oldfolder, oldfile)]
newfilepath = [os.path.join(newfolder, file) for file in oldfile]
count_inner = 1
nfile = len(newfilepath)
for old, new in zip(oldfilepath, newfilepath):
print(
f'Running for ROI {count}/{nsubj} [subprocessing {count_inner}/{nfile}] ...\n')
if os.path.exists(new):
print(f'{new} exists!\n')
else:
shutil.copy(old, new)
count_inner += 1
count += 1
else:
print(f'Processing ROI completed!\n')
#%% origin
count = 1
for rn, sl in zip(uni_roi, matched_series_location):
# create folder to save results
newfolder = os.path.join(savepath, 'Origin', rn)
if not os.path.exists(newfolder):
os.makedirs(newfolder)
# move
oldfolder = [np.array(seriespath)[asl][0] for asl in sl]
newsubfolder = [os.path.join(
newfolder, os.path.basename(old)) for old in oldfolder]
count_inner = 1
nfile = len(newsubfolder)
for old, new in zip(oldfolder, newsubfolder):
print(
f'Running for Origin {count}/{nsubj} [subprocessing {count_inner}/{nfile}] ...\n')
if os.path.exists(new):
print(f'{new} exists!\n')
else:
shutil.copytree(old, new)
count_inner += 1
count += 1
else:
print(f'Processing Origin completed!\n')
#%%
if __name__ == '__main__':
"""
python lc_match_maskVSorigin.py I:\\Project_Lyph\\ROI_venus D:\\dms-lymph-nodes\\1_finish I:\\Project_Lyph\\Grouped_ROI_venous
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('root_roi', type=str, help='ROI文件夹')
parser.add_argument('root_origin', type=str, help='原始文件夹')
parser.add_argument('savepath', type=str, help='保存结果的路径')
args = parser.parse_args()
#%%
roipath, uni_roi, uid_roipath, roiname_location = get_maskfiles(root_roi=args.root_roi)
seriespath, uid_seriespath = get_originalfiles( root_origin=args.root_origin)
matched_series_location = match(roipath, uid_roipath, seriespath, uid_seriespath, roiname_location)
move(args.savepath, uni_roi, roipath, seriespath, roiname_location, matched_series_location)
|
dongmengshi/easylearn | eslearn/utils/extract_roi160info.py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 19:57:57 2019
@author: lenovo
"""
import numpy as np
import pandas as pd
# roi 2 net
with open(r'F:\Data\ASD\ROI_160.txt', 'r') as f:
info = f.readlines()
info = pd.Series(info)
# roi
roi = info.str.findall('\d.*[a-zA-Z]*\d')
roi = pd.Series([rn[0] for rn in roi])
roi = roi.str.findall('[a-zA-Z].*[a-zA-Z]')
s=[]
for roi_ in roi:
s.append([ss for ss in roi_ if ss.strip() != ''])
s = [s[0]+ ' ' + s[1] if len(s) == 2 else s[0] for s in s]
roi = pd.Series(s)
# net
net = pd.Series([info_.split(' ')[-1].strip() for info_ in info])
# excel
exl = pd.read_csv(r'F:\Data\ASD\dos160_labels.csv')
roi_exl = exl.iloc[:,1]
roi_exl=list(roi_exl)
roi = list(roi)
ll = [roi_exl.index(roi_) for roi_ in roi]
net_exl = net[ll]
net_exl.to_excel(r'F:\Data\ASD\network.xlsx')
|
dongmengshi/easylearn | eslearn/utils/el_call_powershell.py | """ Call PowerShell
This class is copy from scapy
"""
import os
from glob import glob
import subprocess as sp
class PowerShell:
def __init__(self, coding, ):
cmd = [self._where('PowerShell.exe'),
"-NoLogo", "-NonInteractive", # Do not print headers
"-Command", "-"] # Listen commands from stdin
startupinfo = sp.STARTUPINFO()
startupinfo.dwFlags |= sp.STARTF_USESHOWWINDOW
self.popen = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.STDOUT, startupinfo=startupinfo)
self.coding = coding
def __enter__(self):
return self
def __exit__(self, a, b, c):
self.popen.kill()
def run(self, cmd, timeout=15):
b_cmd = cmd.encode(encoding=self.coding)
try:
b_outs, errs = self.popen.communicate(b_cmd, timeout=timeout)
except sp.TimeoutExpired:
self.popen.kill()
b_outs, errs = self.popen.communicate()
outs = b_outs.decode(encoding=self.coding)
return outs, errs
@staticmethod
def _where(filename, dirs=None, env="PATH"):
"""Find file in current dir, in deep_lookup cache or in system path"""
if dirs is None:
dirs = []
if not isinstance(dirs, list):
dirs = [dirs]
if glob(filename):
return filename
paths = [os.curdir] + os.environ[env].split(os.path.pathsep) + dirs
try:
return next(os.path.normpath(match)
for path in paths
for match in glob(os.path.join(path, filename))
if match)
except (StopIteration, RuntimeError):
raise IOError("File not found: %s" % filename)
if __name__ == '__main__':
cmd = "pyuic5 -o D:/My_Codes/easylearn-fmri/eslearn/GUI/easylearn_machine_learning_gui.py D:/My_Codes/easylearn-fmri/eslearn/GUI/easylearn_machine_learning_gui.ui"
with PowerShell('GBK') as ps:
outs, errs = ps.run(cmd)
print('error:', os.linesep, errs)
print('output:', os.linesep, outs) |
dongmengshi/easylearn | eslearn/machine_learning/parallel_processing/lc_parallelCompute.py | <filename>eslearn/machine_learning/parallel_processing/lc_parallelCompute.py
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 19:47:54 2018
@author: lenovo
"""
#from joblib import Parallel, delayed
import time
import numpy as np
from math import sqrt
# small data
%time result1 = Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10000))
%time result2 = Parallel(n_jobs=8)(delayed(sqrt)(i**2) for i in range(10000))
#big data
%time result = Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(1000000))
%time result = Parallel(n_jobs=-1)(delayed(sqrt)(i**2) for i in range(1000000))
def add(x):
np.zeros([x,1])
start_time=time.clock()
a=Parallel(n_jobs=2,backend="threading")(delayed(add)(i) for i in range(100000))
end_time=time.clock()
print('耗时{:.1f}秒'.format(end_time-start_time))
|
dongmengshi/easylearn | eslearn/SSD_classification/Stat/lc_describe_info_feu.py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 17:03:05 2020
This code is used to describe information for first episode unmedicated SSD
@author: lenovo
"""
import pandas as pd
info_file = r'D:\WorkStation_2018\SZ_classification\Scale\cov_unmedicated_sp_and_hc_550.txt'
scale_file = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
info = pd.read_csv(info_file)
scale = pd.read_excel(scale_file)
info_all = pd.merge(info, scale, left_on='folder', right_on='folder', how='inner')[['folder', 'diagnosis', 'age', 'sex', 'BPRS_Total', '病程月']]
info_descrb = info_all.groupby('diagnosis').describe()
sex_hc = info_all[info_all['diagnosis'] == 0]['sex'].value_counts()
sex_ssd = info_all[info_all['diagnosis'] == 1]['sex'].value_counts()
import pandas as pd
from matplotlib impo
|
dongmengshi/easylearn | eslearn/utils/lc_splitX_accord_sorted_y.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 21:07:53 2018
@author: lenovo
"""
import numpy as np
def splitX_accord_sorted_y(y, k):
if y.shape[0] == 1:
y = y.reshape(y.shape[1], 0)
ind_y_sorted = np.argsort(y, axis=0) # ascending
ind_one_fold = []
ind_orig = []
for i in range(k):
ind_one_fold = (np.arange(i, y.size, k))
ind_orig.append(ind_y_sorted[ind_one_fold])
return ind_orig
|
dongmengshi/easylearn | eslearn/visualization/lc_scatterplot.py | <reponame>dongmengshi/easylearn<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:29:02 2018
sns.set(stynamele='ticks', palette='muted', color_codes=True, font_scale=1.5)
sns.set_stynamele('dark')
主题 stynamele:darkgrid, whitegrid, dark, white, ticks,默认为darkgrid。
sns.set_palette:deep, muted, bright, pastel, dark, colorblind
sns.set_contexnamet('notebook', rc={'lines.linewidth':1.5})
sns.despine():
对于白底(white,whitegrid)以及带刻度(ticks)而言,顶部的轴是不需要的,默认为去掉顶部的轴;
sns.despine(left=True):去掉左部的轴,也即 yname 轴;
注意这条语句要放在 plot 的动作之后,才会起作用;
@author: <NAME>
"""
# 载入绘图模块
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# 散点图和拟合直线 以及分布
def scatter_LC(df, x='x', y='y', color='g', marker='o'):
sns.set(context='paper', style='whitegrid', palette='colorblind', font='sans-serif',font_scale=1, color_codes=False, rc=None)
# sns.JointGrid(data=df, x=x, y=y).plot(sns.regplot, sns.distplot)
sns.regplot(data=df, x=x, y=y, fit_reg=1, color=color, marker=marker)
# set
ax = plt.gca()
sns.despine()
xticklabel = ax.get_xticklabels()
yticklabel = ax.get_yticklabels()
xlabel = ax.get_xlabel()
ylabel = ax.get_ylabel()
plt.setp(xticklabel, size=10,rotation=0, horizontalalignment='right')
plt.setp(yticklabel, size=10,rotation=0, horizontalalignment='right')
plt.xlabel(xlabel, size=15, rotation=0)
plt.ylabel(ylabel, size=15, rotation=0)
# plt.show()
if __name__ == "__main__":
plt.figure(figsize=(10,8))
signal_p = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\ROISignals_ROISignal_patients.txt'
signal_c = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\ROISignals_ROISignal_controls.txt'
s = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\sas.txt'
df_signal_p = pd.read_csv(signal_p,header=None)
df_signal_c = pd.read_csv(signal_c, header=None)
df_scale = pd.read_csv(s,header=None)
df = pd.concat([df_signal_p,df_signal_c],axis=0)
dia = np.hstack([np.zeros(31,), np.ones(47,)])
df['dia'] = pd.DataFrame(dia)
df = pd.concat([df_signal_p,df_scale],axis=1)
df.columns = ['x','y']
scatter_LC(df, 'x', 'y', color='#008B8B', marker='o')
plt.show()
plt.savefig('pDMN_sas.tif', dpi=600)
|
dongmengshi/easylearn | eslearn/machine_learning/test/gcn_test_1.py | <filename>eslearn/machine_learning/test/gcn_test_1.py
"""
https://blog.csdn.net/skj1995/article/details/103780873
"""
import os.path as osp
import scipy.io as sio
import numpy as np
import torch
import torch.nn.functional as F
from torch_geometric.datasets import TUDataset
from torch_geometric.data import DataLoader
from torch_geometric.nn import GraphConv, TopKPooling, GCNConv
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.data import Data
# read data
a=sio.loadmat(r'D:\My_Codes\easylearn-fmri\eslearn\machine_learning\test\GCNNCourseCodes\enzymes.mat')
# list of adjacency matrix
mat = a['A'][0]
# list of features
features = a['F'][0]
# label of graphs
target = a['Y'][0]
# test train index for 10-fold test
TRid=a['tr']
TSid=a['ts']
# Generate Data
def generate_data(x, m, y):
m = np.where(m)
m = np.array(m)
edge_index = torch.tensor(m, dtype=torch.long)
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor([y], dtype=torch.float)
data = Data(x=x, edge_index=edge_index, y=y)
return data
dataset = [generate_data(x, m, y) for (x, m, y) in zip(features, mat, target)]
perm = torch.randperm(len(dataset))
dataset1 = [dataset[i] for i in perm]
dataset = dataset1
del dataset1
n = len(dataset) // 10
test_dataset = dataset[:n]
train_dataset = dataset[n:]
test_loader = DataLoader(test_dataset, batch_size=60)
train_loader = DataLoader(train_dataset, batch_size=20)
# # View data
# for data_for_check in train_loader:
# print("data_for_check=",data_for_check)
# print("data_for_check.batch=",data_for_check.batch)
# print("data_for_check.batch.shape=",data_for_check.batch.shape)
# print("data_for_check.x.shape=",data_for_check.x.shape)
# print("data_for_check.num_features=",data_for_check.num_features)
# print("\n")
# Net
# Construct network
class Net(torch.nn.Module):
def __init__(self, num_feature, num_class):
super(Net, self).__init__()
self.conv1 = GraphConv(num_feature, 128)
self.conv2 = GraphConv(128, 64)
self.lin1 = torch.nn.Linear(64, 128)
self.lin2 = torch.nn.Linear(128, 64)
self.lin3 = torch.nn.Linear(64, num_class)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.lin2(x))
x = F.log_softmax(self.lin3(x), dim=-1)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(3, 6).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
def train(epoch):
model.train()
loss_all = 0
for data in train_loader:
# print("raw_data.y.shape",data.y.shape)
data = data.to(device)
optimizer.zero_grad()
output = model(data)
# print("output.shape=", output.shape)
# print("data.y.shape=", data.y.shape)
data.y = data.y.long()
loss = F.nll_loss(output, data.y)
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all / len(train_dataset)
def test(loader):
model.eval()
correct = 0
for data in loader:
data = data.to(device)
_, pred = model(data).max(dim=1)
correct += pred.eq(data.y).sum().item()
return correct / len(loader.dataset)
for epoch in range(10):
loss = train(epoch)
train_acc = test(train_loader)
test_acc = test(test_loader)
print('Epoch: {:03d}, Loss: {:.5f}, Train Acc: {:.5f}, Test Acc: {:.5f}'.format(epoch, loss, train_acc, test_acc))
|
dongmengshi/easylearn | eslearn/visualization/lc_bar_plot_for_fc.py | # -*- coding: utf-8 -*-
"""
Created on Fri May 31 17:01:21 2019
@author: lenovo
"""
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
from Utils.lc_read_write_Mat import read_mat
from Plot.lc_barplot import BarPlot
class ExtractFC():
"""
Backgrounds: For plot bar of my paper about dynamic FC
Goal: Extract fc from fc matrix according to a given mask
How: Extract data in mask from 2D fc matrix to a 1D vector for each fc file
Identify group and 2 roi name for each fc each extracted data vector
NOTE: All fc file in a folder
Attrs:
fc_folder: directory of folder that contain fc files
mask: mask used to extract fc vector from fc matrix
roiname: excel file that contain each node name in network
roiname_col: roi name in which column, e.g. 3
group_name: give a group name to the fc vector. e.g. 'SZ' or 'BD' or 'MDD'
Returns:
extracted_data:
A 2D matrix, with each row for one fc vector extracted from one fc file
dim = N*(M+1), N = number of fc files, M = how many 'Trues' in mask,
'1' means the groups column
"""
def __init__(sel, fc_folder=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zDynamic\state\allState17_4\state4_all\state1\state1_HC',
mask=r'D:\WorkStation_2018\WorkStation_dynamicFC\Data\zDynamic\state\allState17_4\state4_all\state1\result1\shared_1and2and3_fdr.mat',
roiname=r'D:\My_Codes\Github_Related\Github_Code\Template_Yeo2011\17network_label.xlsx',
group_name='HC'):
sel.fc_folder = fc_folder
sel.mask = mask
sel.roiname = roiname
sel.group_name = group_name
sel._huename = 'Groups'
def _get_all_data(sel):
# get all fc data
path = os.listdir(sel.fc_folder)
path = (os.path.join(sel.fc_folder, pt) for pt in path)
print('loading fc matrix of all fc files\n')
sel._data = (read_mat(pt) for pt in path)
# get mask data
sel._maskdata = read_mat(sel.mask)
def _extract(sel):
sel._get_all_data()
print('extracting data from mask\n')
extracted_data = [np.array(data)[np.array(
sel._maskdata == 1)] for data in sel._data]
sel._extracted_data = pd.DataFrame(extracted_data)
def _add_roipairname(sel):
"""
add roi paire name to each fc
FIX: auto identify wihich hemisphere and roi name? case-sensitive!
FIX:col 2 and col 1
"""
# extract roi pair name from excel
roi = pd.read_excel(sel.roiname, header=None)
which_hemisphere = roi.iloc[:, 0]
which_hemisphere = [
hem.split('_')[1][0] + ' ' for hem in which_hemisphere]
roiname = roi.iloc[:, 1]
roiname = which_hemisphere + roiname
roiname = [n.strip() for n in roiname]
# make each fc name is unique so that sns.barplot plot every fc
roiname = [n + '(' + str(i) + ')' for i, n in enumerate(roiname)]
idxi, idxj = np.where(sel._maskdata == 1)
roipairname = [roiname[i] + '--' + roiname[j]
for i, j in zip(idxi, idxj)]
sel._extracted_data.columns = roipairname
return sel._extracted_data
def _add_groupname(sel):
groupname = pd.DataFrame(
[sel.group_name] * np.shape(sel._extracted_data)[0])
extracted_data = pd.concat([groupname, sel._extracted_data], axis=1)
# change columns name
colname = list(extracted_data.columns)
colname[0] = sel._huename
extracted_data.columns = colname
return extracted_data
def _extract_one(sel, rootpath, whichstate, group_name):
"""
Equal to main function of ExtractFC class
Only for _extract_all
"""
sel = ExtractFC(fc_folder=os.path.join(rootpath, whichstate, whichstate + '_' + group_name),
mask=os.path.join(rootpath, whichstate,
'result1', 'shared_1and2and3_fdr.mat'),
roiname=r'D:\WorkStation_2018\Workstation_dynamic_FC_V2\Data\Network_and_plot_para\17network_label.xlsx',
group_name=group_name)
sel._extract()
sel._add_roipairname()
data = sel._add_groupname()
return data
def _extract_all(sel,
rootpath=r'D:\WorkStation_2018\WorkStation_dynamicFC\Workstation_dynamic_fc_baobaoComputer\Data\Dynamic',
whichstate='state1'):
"""
Extract all groups fc data for one state, and concat them
Only use for my paper
"""
# hc
group_name = 'HC'
data_hc = sel._extract_one(rootpath, whichstate, group_name)
# mdd
group_name = 'MDD'
data_mdd = sel._extract_one(rootpath, whichstate, group_name)
# bd
group_name = 'BD'
data_bd = sel._extract_one(rootpath, whichstate, group_name)
# sz
group_name = 'SZ'
data_sz = sel._extract_one(rootpath, whichstate, group_name)
# concat
data_all = pd.concat([data_hc, data_mdd, data_bd, data_sz], axis=0)
data_all.index = np.arange(0, np.shape(data_all)[0])
return data_all
def _group_fc_accordingto_fctype(sel, data_all):
"""
group fc according to intra- or inter-network fc
"""
colname = pd.Series(data_all.columns)
colname = colname[1:]
fcname = [n.split('--') for n in colname]
idx = []
for i, nn in enumerate(fcname):
pn = [n.split(' ')[1] for n in nn]
if pn[0] == pn[1]:
idx.append(i)
colname_intra = colname.iloc[idx]
colname_intre = list(set(data_all.columns) - set(colname_intra))
colname_intra = list(colname_intra.iloc[:])
colname_intra.append(sel._huename)
data_all_intra = data_all[colname_intra]
data_all_inter = data_all[colname_intre]
return data_all_intra, data_all_inter
def _order_fc_accordingto_networkname(sel, data):
"""
Order the fc columns name according network name
Make the bar sorted according network name
"""
colname = list(data.columns)
fcname = [name.split(' ')[1] if len(name.split(' '))
> 1 else name.split(' ')[0] for name in colname]
# sorted by the first item of each str
idx = [i for i, v in sorted(enumerate(fcname), key=lambda x:x[1])]
sorted_colname = [colname[id] for id in idx]
sorted_data = data[sorted_colname]
return sorted_data
class BarPlotForFC(BarPlot):
"""
plot bar for my paper about dynamic FC
group bar according to intra- and inter-network
"""
def __init__(sel, x_location=np.arange(1, 39), savename='fig.tiff'):
super().__init__()
sel.x_location = x_location
sel.hue_name = 'Groups'
sel.hue_order = None
sel.if_save_figure = 0
sel.savename = savename
sel.x_name = 'FC'
sel.y_name = 'Z value'
sel.if_save_figure = 1,
def prepdata(sel, df):
df = sel.data_preparation(df)
return df
def _plot(sel, data):
sel.plot(data)
if __name__ == '__main__':
sel = ExtractFC()
data_all = sel._extract_all(
rootpath=r'D:\WorkStation_2018\Workstation_dynamic_FC_V2\Data\Dynamic',
whichstate='state4')
data_all = sel._group_fc_accordingto_fctype(data_all)
data_intra = data_all[0]
data_inter = data_all[1]
data_intra = sel._order_fc_accordingto_networkname(data_intra)
# plot
loc = list(set(data_intra.columns) - set(['Groups']))
# original order
loc.sort(key = list(data_intra.columns).index)
sel = BarPlotForFC(x_location=loc, savename=r'D:\WorkStation_2018\Workstation_dynamic_FC_V2\Figure\Supp\Bar\bar_intranetwork_s4.tiff')
prepdata = sel.prepdata(data_intra)
# sel._plot(prepdata)
loc = list(set(data_inter.columns) - set(['Groups']))
loc.sort(key = list(data_inter.columns).index)
sel=BarPlotForFC(x_location=loc, savename=r'D:\WorkStation_2018\Workstation_dynamic_FC_V2\Figure\Supp\Bar\bar_internetwork_s4.tiff')
prepdata=sel.prepdata(data_inter)
# sel._plot(prepdata)
|
dongmengshi/easylearn | eslearn/utils/lc_selectFile_permSVC.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 31 15:07:39 2018
@author: lenovo
"""
# import
from nipype import SelectFiles, Node
# def
def selectFile(rootPath=r'D:\其他\老舅财务\allData'):
templates = {'path': '*.mat'}
# Create SelectFiles node
sf = Node(SelectFiles(templates),
name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = rootPath
# Feed {}-based placeholder strings with values
# sf.inputs.subject_id1 = '00[1,2]'
# sf.inputs.subject_id2 = '01'
# sf.inputs.ses_name = "retest"
# sf.inputs.task_name = 'covertverb'
path = sf.run().outputs.__dict__['path']
return path
|
dongmengshi/easylearn | eslearn/machine_learning/test/easylearn_logger.py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 20:07:19 2020
https://zhuanlan.zhihu.com/p/78452993
@author: lenovo
"""
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.nn import MessagePassing
from torch_geometric.datasets import TUDataset
# dataset = TUDataset(root='/tmp/ENZYMES', name='ENZYMES')
class EdgeConv(MessagePassing):
def __init__(self, F_in, F_out):
super(EdgeConv, self).__init__(aggr='max') # "Max" aggregation.
self.mlp = Seq(Lin(2 * F_in, F_out), ReLU(), Lin(F_out, F_out))
def forward(self, x, edge_index):
# x has shape [N, F_in]
# edge_index has shape [2, E]
return self.propagate(edge_index, x=x) # shape [N, F_out]
def message(self, x_i, x_j):
# x_i has shape [E, F_in]
# x_j has shape [E, F_in]
edge_features = torch.cat([x_i, x_j - x_i], dim=1) # shape [E, 2 * F_in]
return self.mlp(edge_features) # shape [E, F_out]
|
dongmengshi/easylearn | eslearn/visualization/lc_violinplot_yg.py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 22:21:27 2018
小提琴图:把升高的脑区和降低的脑区分开做小提琴图
增加的脑区:
x_location=np.arange(13,17,1)
减低的脑区
x_location=np.arange(5,13,1)
@author: lenovo
"""
import sys
sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\plot')
import lc_violinplot as violinplot
import lc_barplot as barplot
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
data1 = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\Y2X\ROISignal_OFC_controls\ROISignals_ROISignal_controls.txt'
data2 = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\Y2X\ROISignal_OFC_patients\ROISignals_ROISignal_patients.txt'
data1 = pd.read_csv(data1, header=None)
data2 = pd.read_csv(data2, header=None)
df = pd.concat([data1, data2], axis=0)
df.index = np.arange(0,78)
df['group'] = pd.DataFrame(np.hstack([np.zeros(47,), np.ones(31,)]))
plt.plot(figsize=(4,9))
ax = sns.barplot(x='group',
y=0,
data=df,
orient="v")
ax1=plt.gca()
ax1.patch.set_facecolor("w")
# 设置网格
# plt.grid(axis="y", ls='--', c='k')
# 设置label,以及方位
xticklabel = ax.get_xticklabels()
yticklabel = ax.get_yticklabels()
plt.setp(xticklabel, size=7, rotation=45, horizontalalignment='right')
plt.setp(yticklabel, size=10, rotation=0, horizontalalignment='right')
sns.despine() # 去上右边框
plt.savefig('bar.tif', dpi=600) |
dongmengshi/easylearn | eslearn/utils/download_fcon1000_clinicaldata.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 20:47:55 2019
# download clinical info of fcon1000
# Author: <NAME>
"""
import os
import urllib
def get_sitename():
"""
get all site name
"""
root_dir = r'F:\Data\fcon_1000'
sitename = os.listdir(root_dir)
return sitename
def get_url(sitename):
"""
get all url neet to download
"""
root_path = 'https://fcp-indi.s3.amazonaws.com/data/Projects/FCON1000/'
file_url = [''.join(root_path + one_sitename + '/participants.tsv') for one_sitename in sitename]
return file_url
def download_info(file_url):
'''
down load file to each folder
'''
root_dir = r'F:\Data\fcon_1000'
nf = len(file_url)
for i, fu in enumerate(file_url):
print(f'downloading {i+1}/{nf}\n')
save_file = os.path.basename(os.path.dirname(fu))
save_file = os.path.join(root_dir, save_file,'participants.tsv')
if os.path.exists(save_file):
continue
else:
try:
urllib.request.urlretrieve(fu, save_file)
except:
print(f'no {fu}')
continue
# Print all done
print ('Done!')
# Make module executable
if __name__ == '__main__':
pass
|
dongmengshi/easylearn | eslearn/GUI/easylearn_machine_learning_run.py | # -*- coding: utf-8 -*-
"""The GUI of the machine_learning module of easylearn
Created on 2020/04/15
@author: <NAME>
Email:<EMAIL>
GitHub account name: lichao312214129
Institution (company): Brain Function Research Section, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
@author: <NAME>
Email:<EMAIL>
GitHub account name: dongmengshi
Institution (company): Department of radiology, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
License: MIT
"""
import sys
import os
import json
import cgitb
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from eslearn.stylesheets.PyQt5_stylesheets import PyQt5_stylesheets
from easylearn_machine_learning_gui import Ui_MainWindow
class EasylearnMachineLearningRun(QMainWindow, Ui_MainWindow):
def __init__(self, working_directory=None):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# Initialization
self.machine_learning = {}
self.configuration_file = ""
# Set appearance
self.set_run_appearance()
# Debug
cgitb.enable(display=1, logdir=None)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_run_appearance)
self.actionBlack.triggered.connect(self.set_run_appearance)
self.actionDarkOrange.triggered.connect(self.set_run_appearance)
self.actionGray.triggered.connect(self.set_run_appearance)
self.actionBlue.triggered.connect(self.set_run_appearance)
self.actionNavy.triggered.connect(self.set_run_appearance)
self.actionClassic.triggered.connect(self.set_run_appearance)
# connect classification setting signal to slot: switche to corresponding stackedWidget
self.classification_stackedwedge_dict = {
"Logistic regression": 0, "Support vector machine": 1, "Ridge classification": 2,
"Gaussian process": 3, "Random forest": 4, "AdaBoost": 5
}
self.radioButton_classificaton_lr.clicked.connect(self.switche_stacked_wedge_for_classification)
self.radioButton_classification_svm.clicked.connect(self.switche_stacked_wedge_for_classification)
self.radioButton_classification_ridge.clicked.connect(self.switche_stacked_wedge_for_classification)
self.radioButton_classification_gaussianprocess.clicked.connect(self.switche_stacked_wedge_for_classification)
self.radioButton_classification_randomforest.clicked.connect(self.switche_stacked_wedge_for_classification)
self.radioButton_classification_adaboost.clicked.connect(self.switche_stacked_wedge_for_classification)
def set_run_appearance(self):
"""Set style_sheets
"""
qss_special = """QPushButton:hover
{
font-weight: bold; font-size: 15px;
}
"""
self.setWindowTitle('Feature Engineering')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
# Make the stackedWidg to default at the begining
def get_current_inputs(self):
"""Get all current inputs
Attrs:
-----
self.machine_learning: dictionary
all machine_learning parameters that the user input.
"""
self.all_backup_inputs = {
"classification": {
self.radioButton_classificaton_lr:{
"Logistic regression": {
"maxl1ratio": {"value": self.doubleSpinBox_clf_lr_maxl1ratio.text(), "wedget": self.doubleSpinBox_clf_lr_maxl1ratio},
"minl1ratio": {"value": self.doubleSpinBox_clf_lr_maxl1ratio.text(), "wedget": self.doubleSpinBox_clf_lr_minl1ration},
"numberl1ratio": {"value": self.spinBox__clf_lr_numl1ratio.text(), "wedget": self.spinBox__clf_lr_numl1ratio},
},
},
self.radioButton_classification_svm:{
"Support vector machine": {
"minl1ratio": {"value": self.doubleSpinBox_clf_svm_minc.text(), "wedget": self.doubleSpinBox_clf_svm_minc},
"maxl1ratio": {"value": self.doubleSpinBox_clf_svm_maxc.text(), "wedget": self.doubleSpinBox_clf_svm_maxc},
"numc": {"value": self.spinBox_clf_svm_numc.text(), "wedget": self.spinBox_clf_svm_numc},
"maxgamma": {"value": self.lineEdit_clf_svm_maxgamma.text(), "wedget": self.lineEdit_clf_svm_maxgamma},
"mingamma": {"value": self.lineEdit_clf_svm_mingamma.text(), "wedget": self.lineEdit_clf_svm_mingamma},
"numgamma": {"value": self.spinBox_clf_svm_numgamma.text(), "wedget": self.spinBox_clf_svm_numgamma},
},
},
self.radioButton_classification_ridge:{
"Ridge classification": {
"minalpha": {"value": self.doubleSpinBox_clf_ridgeclf_minalpha.text(), "wedget": self.doubleSpinBox_clf_ridgeclf_minalpha},
"maxalpha": {"value": self.doubleSpinBox_clf_ridgeclf_maxalpha.text(), "wedget": self.doubleSpinBox_clf_ridgeclf_maxalpha},
"numalpha": {"value": self.spinBox_clf_ridgeclf_numalpha.text(), "wedget": self.spinBox_clf_ridgeclf_numalpha},
},
},
self.radioButton_classification_gaussianprocess:{
"Gaussian process": {},
},
self.radioButton_classification_randomforest:{
"Random forest": {
"minestimators": {"value": self.spinBox_clf_randomforest_minestimators.text(), "wedget": self.spinBox_clf_randomforest_minestimators},
"maxestimators": {"value": self.spinBox_clf_randomforest_maxestimators.text(), "wedget": self.spinBox_clf_randomforest_maxestimators},
"maxdepth": {"value": self.spinBox_clf_randomforest_maxdepth.text(), "wedget": self.spinBox_clf_randomforest_maxdepth},
},
},
self.radioButton_classification_adaboost:{
"AdaBoost": {
"minestimators": {"value": self.spinBox_clf_adaboost_minestimators.text(), "wedget": self.spinBox_clf_adaboost_minestimators},
"maxestimators": {"value": self.spinBox_clf_adaboost_maxestimators.text(), "wedget": self.spinBox_clf_adaboost_maxestimators},
},
},
},
"regression": {
self.radioButton_pca: {
"Principal component analysis": {
"min": {"value": self.doubleSpinBox_pca_maxcomponents.text(), "wedget": self.doubleSpinBox_pca_maxcomponents},
"max": {"value": self.doubleSpinBox_pca_mincomponents.text(), "wedget": self.doubleSpinBox_pca_mincomponents},
"number": {"value": self.spinBox_pcanum.text(), "wedget": self.spinBox_pcanum}
},
},
self.radioButton_ica: {
"Independent component analysis": {
"min": {"value": self.doubleSpinBox_ica_minics.text(), "wedget": self.doubleSpinBox_ica_minics},
"max": {"value": self.doubleSpinBox_ica_maxics.text(), "wedget": self.doubleSpinBox_ica_maxics},
"number": {"value": self.spinBox_icnum.text(), "wedget": self.spinBox_icnum},
}
},
self.radioButton_lda: {"lda": {}},
self.radioButton_nmf: {
"Non-negative matrix factorization": {
"min": {"value": self.doubleSpinBox_nmf_mincompnents.text(), "wedget": self.doubleSpinBox_nmf_mincompnents},
"max": {"value": self.doubleSpinBox_nmf_maxcomponents.text(), "wedget": self.doubleSpinBox_nmf_maxcomponents},
"number": {"value": self.spinBox_icnum.text(), "wedget": self.spinBox_icnum},
}
},
self.radioButton_none: {"none": {}}
},
"feature_selection": {
self.radioButton_variance_threshold: {
"Variance threshold": {
"min": {"value": self.doubleSpinBox_variancethreshold_min.text(), "wedget": self.doubleSpinBox_variancethreshold_min},
"max": {"value": self.doubleSpinBox_variancethreshold_max.text(), "wedget": self.doubleSpinBox_variancethreshold_max},
"number": {"value": self.spinBox_variancethreshold_num.text(), "wedget": self.spinBox_variancethreshold_num}
}
},
self.radioButton_correlation: {
"Correlation": {
"min": {"value": self.doubleSpinBox_correlation_minabscoef.text(), "wedget": self.doubleSpinBox_correlation_minabscoef},
"max": {"value": self.doubleSpinBox_correlation_maxabscoef.text(), "wedget": self.doubleSpinBox_correlation_maxabscoef},
"number": {"value": self.spinBox_correlation_num.text(), "wedget": self.spinBox_correlation_num},
}
},
self.radioButton_distancecorrelation: {
"Distance correlation": {
"min": {"value": self.doubleSpinBox_distancecorrelation_minabscoef.text(), "wedget": self.doubleSpinBox_distancecorrelation_minabscoef},
"max": {"value": self.doubleSpinBox_distancecorrelation_maxabscoef.text(), "wedget": self.doubleSpinBox_distancecorrelation_maxabscoef},
"number": {"value": self.spinBox_distancecorrelation_num.text(), "wedget": self.spinBox_distancecorrelation_num},
}
},
self.radioButton_fscore: {
"F-Score (classification)": {
"max":{"value": self.doubleSpinBox_fscore_maxnum.text(), "wedget": self.doubleSpinBox_fscore_maxnum},
"min": {"value":self.doubleSpinBox_fscore_minnum.text(), "wedget": self.doubleSpinBox_fscore_minnum},
"number": {"value":self.spinBox_fscore_num.text(), "wedget": self.spinBox_fscore_num},
}
},
self.radioButton_mutualinfo_cls: {
"Mutual information (classification)": {
"max": {"value": self.doubleSpinBox_mutualinfocls_maxnum.text(), "wedget": self.doubleSpinBox_mutualinfocls_maxnum},
"min": {"value": self.doubleSpinBox_mutualinfocls_minnum.text(), "wedget": self.doubleSpinBox_mutualinfocls_minnum},
"number": {"value": self.spinBox_mutualinfocls_num.text(), "wedget": self.spinBox_mutualinfocls_num},
"n_neighbors": {"value": self.spinBox_mutualinfocls_neighbors.text(), "wedget": self.spinBox_mutualinfocls_neighbors},
}
},
self.radioButton_mutualinfo_regression: {
"Mutual information (regression)": {
"max": {"value": self.doubleSpinBox_mutualinforeg_maxnum.text(), "wedget": self.doubleSpinBox_mutualinforeg_maxnum},
"min": {"value": self.doubleSpinBox_mutualinforeg_minnum.text(), "wedget": self.doubleSpinBox_mutualinforeg_minnum},
"number": {"value": self.spinBox_mutualinforeg_num.text(), "wedget": self.spinBox_mutualinforeg_num},
"n_neighbors": {"value": self.spinBox_mutualinforeg_neighbors.text(), "wedget": self.spinBox_mutualinforeg_neighbors},
}
},
self.radioButton_relieff: {
"ReliefF": {
"max": {"value": self.doubleSpinBox_relieff_max.text(), "wedget": self.doubleSpinBox_relieff_max},
"min": {"value": self.doubleSpinBox_relieff_min.text(), "wedget": self.doubleSpinBox_relieff_min},
"number": {"value": self.spinBox_relief_num.text(), "wedget": self.spinBox_relief_num},
}
},
self.radioButton_anova: {
"ANOVA": {
"max": {"value": self.doubleSpinBox_anova_alpha_max.text(), "wedget": self.doubleSpinBox_anova_alpha_max},
"min": {"value": self.doubleSpinBox_anova_alpha_min.text(), "wedget": self.doubleSpinBox_anova_alpha_min},
"number": {"value": self.spinBox_anova_num.text(), "wedget": self.spinBox_anova_num},
"multiple_correction": {"value": self.comboBox_anova_multicorrect.currentText(), "wedget": self.comboBox_anova_multicorrect},
}
},
self.radioButton_rfe: {
"RFE": {
"step": {"value": self.doubleSpinBox_rfe_step.text(), "wedget": self.doubleSpinBox_rfe_step},
"n_folds": {"value": self.spinBox_rfe_nfold.text(), "wedget": self.spinBox_rfe_nfold},
"estimator": {"value": self.comboBox_rfe_estimator.currentText(), "wedget": self.comboBox_rfe_estimator},
"n_jobs": {"value": self.spinBox_rfe_njobs.text(), "wedget": self.spinBox_rfe_njobs}
}
},
self.radioButton_l1: {
"L1 regularization (Lasso)": {
"max": {"va1ue": self.doubleSpinBox_l1_alpha_max.text(), "wedget": self.doubleSpinBox_l1_alpha_max},
"min": {"va1ue": self.doubleSpinBox_l1_alpha_min.text(), "wedget": self.doubleSpinBox_l1_alpha_min},
"number": {"va1ue": self.spinBox_l1_num.text(), "wedget": self.spinBox_l1_num}
}
},
self.radioButton_elasticnet: {
"L1 + L2 regularization (Elastic net regression)": {
"max_alpha": {"value": self.doubleSpinBox_elasticnet_alpha_max.text(), "wedget": self.doubleSpinBox_elasticnet_alpha_max},
"min_alpha": {"value": self.doubleSpinBox_elasticnet_alpha_min.text(), "wedget": self.doubleSpinBox_elasticnet_alpha_min},
"number_alpha": {"value": self.spinBox_elasticnet_num.text(), "wedget": self.spinBox_elasticnet_num},
"max_l1ratio": {"value": self.doubleSpinBox_elasticnet_l1ratio_max.text(), "wedget": self.doubleSpinBox_elasticnet_l1ratio_max},
"min_l1ratio": {"value": self.doubleSpinBox_elasticnet_l1ratio_min.text(), "wedget": self.doubleSpinBox_elasticnet_l1ratio_min},
"Number_l1ratio": {"value": self.spinBox_l1ratio_num.text(), "wedget": self.spinBox_l1ratio_num},
}
}
},
"unbalance_treatment": {
self.radioButton_randover: {"randover": {}},
self.radioButton_smoteover: {"somteover": {}},
self.radioButton_smotencover: {"somtencover": {}},
self.radioButton_bsmoteover: {"bsmoteover": {}},
self.radioButton_randunder: {"randunder": {}},
self.radioButton_extractionunder: {"extractionunder": {}},
self.radioButton_cludterunder: {"clusterunder": {}},
self.radioButton_nearmissunder: {"nearmissunder": {}},
}
}
#%% ----------------------------------get current inputs---------------------------------------
for key_feature_engineering in self.all_backup_inputs:
for keys_one_feature_engineering in self.all_backup_inputs[key_feature_engineering]:
if keys_one_feature_engineering.isChecked():
self.machine_learning[key_feature_engineering] = self.all_backup_inputs[key_feature_engineering][keys_one_feature_engineering]
def load_configuration(self):
"""Load configuration, and refresh_gui configuration in GUI
"""
# Get current inputs before load configuration, so we can
# compare loaded configuration["machine_learning"] with the current self.machine_learning
self.get_current_inputs()
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
with open(self.configuration_file, 'r', encoding='utf-8') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
# If already exists self.machine_learning
if (self.machine_learning != {}):
# If the loaded self.configuration["machine_learning"] is not empty
# Then ask if rewrite self.machine_learning with self.configuration["machine_learning"]
if (list(self.configuration["machine_learning"].keys()) != []):
reply = QMessageBox.question(self, "Data loading configuration already exists",
"The machine_learning configuration is already exists, do you want to rewrite it with the loaded configuration?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.machine_learning = self.configuration["machine_learning"]
self.refresh_gui()
# If the loaded self.configuration["machine_learning"] is empty
# Then assign self.configuration["machine_learning"] with self.machine_learning
else:
self.configuration["machine_learning"] = self.machine_learning
else:
self.machine_learning = self.configuration["machine_learning"]
self.refresh_gui()
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def refresh_gui(self):
""" Refresh gui the display the loaded configuration in the GUI
"""
print("refresh_gui")
# Generate a dict for switch stacked wedgets
switch_dict = {
"feature_preprocessing": self.switche_stacked_wedge_for_preprocessing,
"dimreduction": self.switche_stacked_wedge_for_classification,
"feature_selection": self.switche_stacked_wedge_for_feature_selection,
}
for keys_one_feature_engineering in self.all_backup_inputs: # 4 feature eng module loop
for wedget in self.all_backup_inputs[keys_one_feature_engineering].keys(): # all wedgets in one feature eng loop
for method in self.all_backup_inputs[keys_one_feature_engineering][wedget].keys():
if keys_one_feature_engineering in self.machine_learning.keys():
if method in list(self.machine_learning[keys_one_feature_engineering].keys()):
# Make the wedget checked according loaded param
wedget.setChecked(True)
# Make setting to loaded text
for key_setting in self.machine_learning[keys_one_feature_engineering][method]:
print(keys_one_feature_engineering)
print(wedget)
print(key_setting)
print(self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting].keys())
if "wedget" in list(self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting].keys()):
loaded_text = self.machine_learning[keys_one_feature_engineering][method][key_setting]["value"]
print(f"method = {method}, setting = {key_setting}, loaded_text={loaded_text}")
# Identity wedget type, then using different methods to "setText"
# NOTE. 所有控件在设计时,尽量保留原控件的名字在命名的前部分,这样下面才好确定时哪一种类型的控件,从而用不同的赋值方式!
if "lineEdit" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setText(loaded_text)
elif "doubleSpinBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setValue(float(loaded_text))
elif "spinBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setValue(int(loaded_text))
elif "comboBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setCurrentText(loaded_text)
# Switch stacked wedget
switch_dict[keys_one_feature_engineering](True, method)
def save_configuration(self):
"""Save configuration
"""
# Get current inputs before saving machine_learning parameters
self.get_current_inputs()
# Delete wedgets object from self.machine_learning dict
for feature_engineering_name in list(self.machine_learning.keys()):
for method_name in list(self.machine_learning[feature_engineering_name].keys()):
for setting in self.machine_learning[feature_engineering_name][method_name]:
for content in list(self.machine_learning[feature_engineering_name][method_name][setting].keys()):
if "wedget" in list(self.machine_learning[feature_engineering_name][method_name][setting].keys()):
self.machine_learning[feature_engineering_name][method_name][setting].pop("wedget")
if self.configuration_file != "":
try:
# self.configuration = json.dumps(self.configuration, ensure_ascii=False)
self.configuration["machine_learning"] = self.machine_learning
self.configuration = json.dumps(self.configuration)
with open(self.configuration_file, 'w', encoding="utf-8") as config:
config.write(self.configuration)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration}'+ ' is not a valid JSON!')
else:
QMessageBox.warning( self, 'Warning', 'Please choose a configuration file first (press button at top left corner)!')
def switche_stacked_wedge_for_classification(self, signal_bool, method=None):
if self.sender():
if not method:
self.stackedWidget_setting.setCurrentIndex(self.classification_stackedwedge_dict[self.sender().text()])
else:
self.stackedWidget_setting.setCurrentIndex(self.classification_stackedwedge_dict[method])
else:
self.stackedWidget_setting.setCurrentIndex(-1)
# def closeEvent(self, event):
# """This function is called when exit icon of the window is clicked.
# This function make sure the program quit safely.
# """
# # Set qss to make sure the QMessageBox can be seen
# reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
# QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
# if reply == QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
if __name__ == "__main__":
app=QApplication(sys.argv)
md=EasylearnMachineLearningRun()
md.show()
sys.exit(app.exec_())
|
dongmengshi/easylearn | eslearn/stylesheets/PyQt5_stylesheets/PyQt5_stylesheets/pyqt5_style_Classic_rc.py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x05\xbe\
\x74\
\x61\x62\x57\x69\x64\x67\x65\x74\x7b\x0d\x0a\x20\x20\x20\x20\x66\
\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x20\x31\x35\x70\x78\x3b\x0d\
\x0a\x7d\x0d\x0a\x0d\x0a\x0d\x0a\x51\x57\x69\x64\x67\x65\x74\x20\
\x7b\x0d\x0a\x20\x20\x20\x20\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\
\x3a\x20\x31\x35\x70\x78\x3b\x0d\x0a\x7d\x0d\x0a\x0d\x0a\x51\x54\
\x61\x62\x6c\x65\x56\x69\x65\x77\x20\x7b\x0d\x0a\x20\x20\x20\x20\
\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\x20\x31\x35\x70\x78\x3b\
\x0d\x0a\x20\x20\x20\x20\x61\x6c\x74\x65\x72\x6e\x61\x74\x65\x2d\
\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x2d\x63\x6f\x6c\x6f\x72\
\x3a\x20\x23\x45\x45\x45\x45\x46\x46\x3b\x0d\x0a\x7d\x0d\x0a\x0d\
\x0a\x42\x72\x6f\x77\x73\x65\x72\x20\x51\x50\x75\x73\x68\x42\x75\
\x74\x74\x6f\x6e\x20\x7b\x0d\x0a\x20\x20\x20\x20\x66\x6f\x6e\x74\
\x2d\x73\x69\x7a\x65\x3a\x20\x31\x35\x70\x78\x3b\x0d\x0a\x20\x20\
\x20\x20\x6d\x69\x6e\x2d\x77\x69\x64\x74\x68\x3a\x20\x31\x35\x70\
\x78\x3b\x0d\x0a\x7d\x0d\x0a\x0d\x0a\x43\x6f\x6c\x6f\x72\x42\x75\
\x74\x74\x6f\x6e\x3a\x3a\x65\x6e\x61\x62\x6c\x65\x64\x20\x7b\x0d\
\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x20\x31\x70\x78\
\x20\x73\x6f\x6c\x69\x64\x20\x23\x34\x34\x34\x34\x34\x34\x3b\x0d\
\x0a\x7d\x0d\x0a\x0d\x0a\x43\x6f\x6c\x6f\x72\x42\x75\x74\x74\x6f\
\x6e\x3a\x3a\x64\x69\x73\x61\x62\x6c\x65\x64\x20\x7b\x0d\x0a\x20\
\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x20\x31\x70\x78\x20\x73\
\x6f\x6c\x69\x64\x20\x23\x41\x41\x41\x41\x41\x41\x3b\x0d\x0a\x7d\
\x0d\x0a\x0d\x0a\x0d\x0a\x42\x72\x6f\x77\x73\x65\x72\x20\x51\x47\
\x72\x6f\x75\x70\x42\x6f\x78\x20\x7b\x0d\x0a\x20\x20\x20\x20\x62\
\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x2d\x63\x6f\x6c\x6f\x72\x3a\
\x20\x71\x6c\x69\x6e\x65\x61\x72\x67\x72\x61\x64\x69\x65\x6e\x74\
\x28\x78\x31\x3a\x20\x30\x2c\x20\x79\x31\x3a\x20\x30\x2c\x20\x78\
\x32\x3a\x20\x30\x2c\x20\x79\x32\x3a\x20\x31\x2c\x0d\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x73\x74\x6f\x70\x3a\x20\x30\x20\x23\x45\x30\x45\
\x30\x45\x30\x2c\x20\x73\x74\x6f\x70\x3a\x20\x31\x20\x23\x46\x46\
\x46\x46\x46\x46\x29\x3b\x0d\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x3a\x20\x32\x70\x78\x20\x73\x6f\x6c\x69\x64\x20\x23\x39\
\x39\x39\x39\x39\x39\x3b\x0d\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\
\x65\x72\x2d\x72\x61\x64\x69\x75\x73\x3a\x20\x35\x70\x78\x3b\x0d\
\x0a\x20\x20\x20\x20\x6d\x61\x72\x67\x69\x6e\x2d\x74\x6f\x70\x3a\
\x20\x31\x65\x78\x3b\x20\x2f\x2a\x20\x6c\x65\x61\x76\x65\x20\x73\
\x70\x61\x63\x65\x20\x61\x74\x20\x74\x68\x65\x20\x74\x6f\x70\x20\
\x66\x6f\x72\x20\x74\x68\x65\x20\x74\x69\x74\x6c\x65\x20\x2a\x2f\
\x0d\x0a\x20\x20\x20\x20\x66\x6f\x6e\x74\x2d\x73\x69\x7a\x65\x3a\
\x20\x32\x30\x70\x78\x3b\x0d\x0a\x20\x20\x20\x20\x63\x6f\x6c\x6f\
\x72\x3a\x20\x62\x6c\x61\x63\x6b\x3b\x0d\x0a\x7d\x0d\x0a\x0d\x0a\
\x42\x72\x6f\x77\x73\x65\x72\x20\x51\x47\x72\x6f\x75\x70\x42\x6f\
\x78\x3a\x3a\x74\x69\x74\x6c\x65\x20\x7b\x0d\x0a\x20\x20\x20\x20\
\x73\x75\x62\x63\x6f\x6e\x74\x72\x6f\x6c\x2d\x6f\x72\x69\x67\x69\
\x6e\x3a\x20\x6d\x61\x72\x67\x69\x6e\x3b\x0d\x0a\x20\x20\x20\x20\
\x73\x75\x62\x63\x6f\x6e\x74\x72\x6f\x6c\x2d\x70\x6f\x73\x69\x74\
\x69\x6f\x6e\x3a\x20\x74\x6f\x70\x20\x63\x65\x6e\x74\x65\x72\x3b\
\x20\x2f\x2a\x20\x70\x6f\x73\x69\x74\x69\x6f\x6e\x20\x61\x74\x20\
\x74\x68\x65\x20\x74\x6f\x70\x20\x63\x65\x6e\x74\x65\x72\x20\x2a\
\x2f\x0d\x0a\x20\x20\x20\x20\x70\x61\x64\x64\x69\x6e\x67\x3a\x20\
\x30\x20\x33\x70\x78\x3b\x0d\x0a\x20\x20\x20\x20\x66\x6f\x6e\x74\
\x2d\x73\x69\x7a\x65\x3a\x20\x31\x35\x70\x78\x3b\x0d\x0a\x20\x20\
\x20\x20\x63\x6f\x6c\x6f\x72\x3a\x20\x62\x6c\x61\x63\x6b\x3b\x0d\
\x0a\x7d\x0d\x0a\x0d\x0a\x50\x6c\x75\x67\x69\x6e\x49\x74\x65\x6d\
\x20\x7b\x0d\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x20\
\x32\x70\x78\x20\x73\x6f\x6c\x69\x64\x20\x62\x6c\x61\x63\x6b\x3b\
\x0d\x0a\x20\x20\x20\x20\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\
\x3a\x20\x77\x68\x69\x74\x65\x3b\x0d\x0a\x7d\x0d\x0a\x0d\x0a\x0d\
\x0a\x50\x6c\x75\x67\x69\x6e\x49\x74\x65\x6d\x20\x46\x72\x61\x6d\
\x65\x20\x7b\x0d\x0a\x20\x20\x20\x20\x62\x61\x63\x6b\x67\x72\x6f\
\x75\x6e\x64\x3a\x20\x23\x43\x43\x43\x43\x43\x43\x3b\x0d\x0a\x7d\
\x0d\x0a\x0d\x0a\x0d\x0a\x54\x61\x62\x42\x75\x74\x74\x6f\x6e\x20\
\x7b\x0d\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x20\x31\
\x70\x78\x20\x73\x6f\x6c\x69\x64\x20\x23\x38\x66\x38\x66\x39\x31\
\x3b\x0d\x0a\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x2d\x72\x61\
\x64\x69\x75\x73\x3a\x20\x32\x70\x78\x3b\x0d\x0a\x20\x20\x20\x20\
\x70\x61\x64\x64\x69\x6e\x67\x3a\x20\x33\x70\x78\x3b\x0d\x0a\x20\
\x20\x20\x20\x6d\x69\x6e\x2d\x77\x69\x64\x74\x68\x3a\x20\x31\x32\
\x30\x70\x78\x3b\x0d\x0a\x7d\x0d\x0a\x0d\x0a\x54\x61\x62\x42\x75\
\x74\x74\x6f\x6e\x3a\x3a\x63\x68\x65\x63\x6b\x65\x64\x20\x7b\x0d\
\x0a\x20\x20\x20\x20\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x2d\
\x63\x6f\x6c\x6f\x72\x3a\x20\x71\x6c\x69\x6e\x65\x61\x72\x67\x72\
\x61\x64\x69\x65\x6e\x74\x28\x78\x31\x3a\x20\x30\x2c\x20\x79\x31\
\x3a\x20\x30\x20\x2c\x20\x78\x32\x3a\x20\x30\x2c\x20\x79\x32\x3a\
\x20\x31\x2c\x0d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x6f\x70\x3a\
\x20\x30\x20\x23\x39\x61\x39\x62\x39\x65\x2c\x20\x73\x74\x6f\x70\
\x3a\x20\x31\x20\x23\x62\x61\x62\x62\x62\x65\x29\x3b\x0d\x0a\x7d\
\x0d\x0a\x0d\x0a\x0d\x0a\x54\x61\x62\x42\x75\x74\x74\x6f\x6e\x3a\
\x3a\x70\x72\x65\x73\x73\x65\x64\x20\x7b\x0d\x0a\x20\x20\x20\x20\
\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x2d\x63\x6f\x6c\x6f\x72\
\x3a\x20\x71\x6c\x69\x6e\x65\x61\x72\x67\x72\x61\x64\x69\x65\x6e\
\x74\x28\x78\x31\x3a\x20\x30\x2c\x20\x79\x31\x3a\x20\x30\x20\x2c\
\x20\x78\x32\x3a\x20\x30\x2c\x20\x79\x32\x3a\x20\x31\x2c\x0d\x0a\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x73\x74\x6f\x70\x3a\x20\x30\x20\x23\x39\
\x61\x39\x62\x39\x65\x2c\x20\x73\x74\x6f\x70\x3a\x20\x31\x20\x23\
\x62\x61\x62\x62\x62\x65\x29\x3b\x0d\x0a\x7d\x0d\x0a\
"
qt_resource_name = b"\
\x00\x11\
\x0b\x14\x5d\x13\
\x00\x50\
\x00\x79\x00\x51\x00\x74\x00\x35\x00\x5f\x00\x73\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x73\
\
\x00\x11\
\x0e\x6d\x09\x43\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x5f\x00\x43\x00\x6c\x00\x61\x00\x73\x00\x73\x00\x69\x00\x63\x00\x2e\x00\x71\x00\x73\x00\x73\
\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x28\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x28\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x71\x7c\xe2\x50\xa2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
dongmengshi/easylearn | eslearn/utils/lc_screening_subject_folder_V3.py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 21:53:10 2018
改自selectSubjID_inScale_V2
根据给定的条件筛选大表的item和subjects' folder
inputs:
file_all:大表
column_basic1=[0,11,19,20,21,22,23,27,28,29,30]:基本信息列
column_basic2=['学历(年)','中国人利手量表']:基本信息名
column_hamd17=np.arange(104,126,1),
column_hama=np.arange(126,141,1),
column_yars=np.arange(141,153,1),
column_bprs=np.arange(153,177,1)
column_diagnosis='诊断':诊断的列名
column_quality='Resting_quality'
column_note1='诊断备注'
column_note2='备注'
note1_keyword='复扫':重复备注文字
outputs:
folder:筛选出来的ID
basic:筛选出来的基本信息
hamd17,hamm,yars,bprs:筛选出来的量表
logicIndex_scale:量表的逻辑index
logicIndex_repeat:重复量表的index
...
to fetch other output,please check results_dict
@author: <NAME>
new feature:任意条件筛选
"""
# ===============================================
import sys
# sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\workstation')
import pandas as pd
import re
import os
import numpy as np
class select_SubjID():
# initial parameters
def __init__(self,
file_all=r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx",
# 基本信息和量表,暂时不能加条件来筛选行
column_basic1=[0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30],
column_basic2=['学历(年)', '中国人利手量表', '诊断备注', '备注'],
column_hamd17=np.arange(104, 126, 1),
column_hama=np.arange(126, 141, 1),
column_yars=np.arange(141, 153, 1),
column_bprs=np.arange(153, 177, 1),
# 可以加条件筛选行的列(item),字典形式,key为列名,value为条件
# condition_name:{condition:[include_or_exclude,match_method]}
# 注意:对于某一列的所有条件而言,暂时只支持一种筛选方法,要么全纳入,要么全排出
# 事实上,一般情况下,纳入与排出不应该用在同一列
screening_dict={
'诊断': {1: ['include', 'exact'], 2: ['include', 'exact'], 3: ['include', 'exact'], 4: ['include', 'exact']},
'Resting_quality': {'Y': ['include', 'exact']},
'诊断备注': {'复扫': ['exclude', 'fuzzy'], '糖尿病': ['exclude', 'fuzzy'], '不能入组': ['exclude', 'fuzzy']},
'备注': {'复扫': ['exclude', 'fuzzy']}
}
# screening_dict={
# '诊断':{1:['include','exact'],2:['include','exact'],3:['include','exact'],4:['include','exact']},
# 'Resting_quality':{'Y':['include','exact']},
# '诊断备注':{'复扫':['exclude','fuzzy']}
# }
):
# ====================================================
self.file_all = file_all
self.column_basic1 = column_basic1
self.column_basic2 = column_basic2
self.column_hamd17 = column_hamd17
self.column_hama = column_hama
self.column_yars = column_yars
self.column_bprs = column_bprs
self.screening_dict = screening_dict
print('Initialized!\n')
# ====================================================
def loadExcel(self):
# load all clinical data in excel
self.allClinicalData = pd.read_excel(self.file_all)
return self
def extract_one_series(self, column_var):
# 选项目,项目列可以是数字编号,也可以是列名字符串
if isinstance(column_var[0], str):
data = self.allClinicalData.loc[:, column_var]
elif isinstance(self.column_basic1[0], np.int32):
data = self.allClinicalData.iloc[:, column_var]
elif isinstance(self.column_basic1[0], int):
data = self.allClinicalData.iloc[:, column_var]
else:
print('basicIndex 的输入有误!\n')
return data
# ====================================================
def select_item(self):
# 选项目,项目列可以是数字编号,也可以是列名字符串(注意:这些项目暂时不支持行筛选)
basic1 = self.extract_one_series(self.column_basic1)
basic2 = self.extract_one_series(self.column_basic2)
self.basic = pd.concat([basic1, basic2], axis=1)
self.hamd17 = self.extract_one_series(self.column_hamd17)
self.hama = self.extract_one_series(self.column_hama)
self.yars = self.extract_one_series(self.column_yars)
self.bprs = self.extract_one_series(self.column_bprs)
return self
# ====================================================
# 条件筛选
def screen_data_according_conditions_in_dict_one(
self, series_for_screening, condition_in_dict):
# 根据字典里面的条件筛选,并得到index。注意条件可能是字符串也可以是数字。
# 注意:此函数只处理一列。
# 由于contains函数不能处理null,先把null替换为'未知'
series_for_screening = series_for_screening.mask(
series_for_screening.isnull(), '未知')
# 生成index为series_for_screening的index的空pd.DataFrame,用于后续join
screened_ind_all = pd.DataFrame([])
for condition_name in condition_in_dict:
screened_ind = pd.DataFrame([], index=series_for_screening.index)
# 每个key值筛选后,都用pd.DataFrame.join方法求并集
# print(condition_name)
# print(condition_in_dict[condition_name])
# print(condition_in_dict[condition_name][-1])
# 进入条件筛选
# 精确匹配,一般数字为精确匹配
if condition_in_dict[condition_name][-1] == 'exact':
if condition_in_dict[condition_name][0] == 'exclude':
screened_ind = screened_ind.loc[series_for_screening.index[series_for_screening != condition_name]]
elif condition_in_dict[condition_name][0] == 'include':
screened_ind = screened_ind.loc[series_for_screening.index[series_for_screening == condition_name]]
# 模糊匹配
elif condition_in_dict[condition_name][-1] == 'fuzzy':
if condition_in_dict[condition_name][0] == 'exclude':
screened_ind_tmp = series_for_screening.mask(
series_for_screening.str.contains(condition_name), None).dropna()
screened_ind = screened_ind.loc[screened_ind_tmp.dropna(
).index]
elif condition_in_dict[condition_name][0] == 'include':
screened_ind_tmp = series_for_screening.where(
series_for_screening.str.contains(condition_name), None)
screened_ind = screened_ind.loc[screened_ind_tmp.dropna(
).index]
# 未指名匹配方式
else:
print(
'__ini__ is wrong!\n### may be words "exact OR fuzzy" is wrong ###\n')
# pd.join 求并集或交集
# 注意:此处混合有'exclude','和include'时,有可能筛选出错,所以对于某一列而言,最好只用一种方法
if screened_ind_all.index.empty:
screened_ind_all = screened_ind_all.join(
pd.DataFrame(screened_ind), how='outer')
else:
if condition_in_dict[condition_name][0] == 'exclude':
screened_ind_all = screened_ind_all.join(
pd.DataFrame(screened_ind), how='inner')
elif condition_in_dict[condition_name][0] == 'include':
screened_ind_all = screened_ind_all.join(
pd.DataFrame(screened_ind), how='outer')
return screened_ind_all
def screen_data_according_conditions_in_dict_all(self):
# 把字典中每一列的index筛选出来,然后逐个join求交集,从而得到满足所有条件的index
self.index_selected = pd.DataFrame(
[], index=self.allClinicalData.index)
for key in self.screening_dict:
# key='诊断'
condition_in_dict = self.screening_dict[key]
series_for_screening = self.extract_one_series(key)
screened_ind_all = self.screen_data_according_conditions_in_dict_one(
series_for_screening, condition_in_dict)
# join 交集
self.index_selected = self.index_selected.join(
screened_ind_all, how='inner')
return self
# ====================================================
def selcet_subscale_according_index(self):
# 根据index 选择量表
self.basic = self.basic.loc[self.index_selected.index]
self.hamd17 = self.hamd17.loc[self.index_selected.index]
self.hama = self.hama.loc[self.index_selected.index]
self.yars = self.yars.loc[self.index_selected.index]
self.bprs = self.bprs.loc[self.index_selected.index]
# print('bprs2:{}\n'.format(self.bprs))
return self
# ===============================================
def selMain(self):
print('Running...\n')
# load
self = self.loadExcel() # item
# item
self = self.select_item()
# 条件筛选
self.screen_data_according_conditions_in_dict_all()
# 亚量表
self = self.selcet_subscale_according_index()
# folder ID
self.folder = self.basic['folder']
#
print('Done!\n')
return self
# ===============================================
if __name__ == '__main__':
print('==================================我是分割线====================================\n')
# allFile=r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx"
import lc_screening_subject_folder_V3 as select
#
current_path = os.getcwd()
print('当前路径是:[{}]\n'.format(current_path))
#
ini_path = os.path.join(current_path, '__ini__.txt')
print('初始化参数位于:[{}]\n'.format(ini_path))
print('正在读取初始化参数...\n')
ini = open(ini_path).read()
ini = ini.strip('').split('//')
ini = [ini_ for ini_ in ini if ini_.strip()]
ini = [ini_ for ini_ in ini if not re.findall('#', ini_)]
name = locals()
for ini_param in ini:
name[ini_param.strip().split('=')[0]] = eval(
ini_param.strip().split('=')[1])
print('{}={}\n'.format(ini_param.strip().split('=')
[0], name[ini_param.strip().split('=')[0]]))
print('初始化参数读取完成!\n')
sel = select.select_SubjID(
file_all=file_all,
column_basic1=column_basic1,
column_basic2=column_basic2,
column_hamd17=column_hamd17,
column_hama=column_hama,
column_yars=column_yars,
column_bprs=column_bprs,
screening_dict=screening_dict
)
results = sel.selMain()
# check results
results_dict = results.__dict__
print('所有结果为:{}\n'.format(list(results_dict.keys())))
# save to excel
results.folder.to_excel('folder.xlsx', header=False, index=False)
results.allClinicalData.set_index(
'folder').loc[results.folder].to_excel('screened_scale.xlsx')
print(
'###筛选的folder 保存在:[{}]###\n'.format(
os.path.join(
current_path,
'folder.xlsx')))
print('作者:黎超\n邮箱:<EMAIL>\n')
input("######按任意键推出######\n")
print('==================================我是分割线====================================\n')
|
dongmengshi/easylearn | eslearn/utils/lc_copy_selected_file_V5.py | <filename>eslearn/utils/lc_copy_selected_file_V5.py
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以限定某个file)
input:
#1 referencePath:需要复制的被试名字所在text文件(大表中的folder)
#2 regularExpressionOfsubjName_forReference:如提取量表中subjName的正则表达式
#3 folderNameContainingFile_forSelect:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
#4 num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
#5 regularExpressionOfSubjName_forNeuroimageDataFiles:用来筛选mri数据中subject name字符串的正则表达式
#6 keywordThatFileContain:用来筛选file的正则表达式或keyword
#7 neuroimageDataPath:原始数据的根目录
#8 savePath: 将原始数据copy到哪个大路径
# n_processess=5几个线程
#9 ifSaveLog:是否保存复制log
#10 ifCopy:是否执行复制功能
#11 ifMove:是否移动(0)
#12 saveInToOneOrMoreFolder:保存到每个被试文件夹下,还是保存到一个文件夹下
#13 saveNameSuffix:文件保存的尾缀('.nii')
#14 ifRun:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:savePath/saveFolderName/subjName/files
@author: <NAME>
new featrue:真多核多线程处理,类的函数统一返回self
匹配file name:正则表达式匹配
"""
# =========================================================================
# import
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import numpy as np
import pandas as pd
import time
import os
import shutil
import sys
sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\workstation')
#from lc_selectFile_ import selectFile
#from sklearn.externals.joblib import Parallel, delayed
# =========================================================================
# def
class copy_fmri():
def __init__(
self,
referencePath=r'E:\wangfeidata\folder.txt',
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=2,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='nii',
neuroimageDataPath=r'E:\wangfeidata\FunImgARWD',
savePath=r'E:\wangfeidata',
n_processess=2,
ifSaveLog=1,
ifCopy=0,
ifMove=0,
saveInToOneOrMoreFolder='saveToEachSubjFolder',
saveNameSuffix='.nii',
ifRun=0):
# =========================================================================
self.referencePath = referencePath
self.regularExpressionOfsubjName_forReference = regularExpressionOfsubjName_forReference
self.folderNameContainingFile_forSelect = folderNameContainingFile_forSelect
self.num_countBackwards = num_countBackwards
self.regularExpressionOfSubjName_forNeuroimageDataFiles = regularExpressionOfSubjName_forNeuroimageDataFiles
self.keywordThatFileContain = keywordThatFileContain
self.neuroimageDataPath = neuroimageDataPath
self.savePath = savePath
self.n_processess = n_processess
self.ifSaveLog = ifSaveLog
self.ifCopy = ifCopy
self.ifMove = ifMove
self.saveInToOneOrMoreFolder = saveInToOneOrMoreFolder
self.saveNameSuffix = saveNameSuffix
self.ifRun = ifRun
# 核对参数信息
if self.ifCopy == 1 & self.ifMove == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
# 新建结果保存文件夹
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
# 读取referencePath(excel or text)
try:
self.subjName_forSelect = pd.read_excel(
self.referencePath, dtype='str', header=None, index=None)
except BaseException:
self.subjName_forSelect = pd.read_csv(
self.referencePath, dtype='str', header=None)
#
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
ith = 0
if self.regularExpressionOfsubjName_forReference:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]\
.str.findall('[1-9]\d*')
self.subjName_forSelect = [self.subjName_forSelect_[ith]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
# ===================================================================
def walkAllPath(self):
self.allWalkPath = os.walk(self.neuroimageDataPath)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return self
def fetch_allFilePath(self):
self.allFilePath = []
for onePath in self.allWalkPath:
for oneFile in onePath[2]:
path = os.path.join(onePath[0], oneFile)
self.allFilePath.append(path)
return self
def fetch_allSubjName(self):
'''
num_countBackwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
self.allSubjName = self.allFilePath
for i in range(self.num_countBackwards - 1):
self.allSubjName = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSubjName_raw = self.allSubjName
return self
def fetch_folerNameContainingFile(self):
'''
如果file上一级folder不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的folder name(可能是模态名),然后根据你的关键词来筛选
'''
self.folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allFilePath]
self.folerNameContainingFile = [os.path.basename(
folderName) for folderName in self.folerNameContainingFile]
return self
def fetch_allFileName(self):
'''
获取把所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
self.allFileName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allFilePath]
return self
# ===================================================================
def screen_pathLogicalLocation_accordingTo_yourSubjName(self):
# 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功
# maker sure subjName_forSelect is pd.Series and its content is string
if isinstance(self.subjName_forSelect, type(pd.DataFrame([1]))):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if not isinstance(self.subjName_forSelect[0], str):
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
try:
# 提取所有被试的folder
self.allSubjName = self.allSubjName.iloc[:, 0].str.findall(
self.regularExpressionOfSubjName_forNeuroimageDataFiles)
# 正则表达提取后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in self.allSubjName.values:
if name:
allSubjName_temp.append(name[0])
else:
allSubjName_temp.append(None)
self.allSubjName = allSubjName_temp
self.allSubjName = pd.DataFrame(self.allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
self.logic_index_subjname = pd.DataFrame(
np.zeros(len(self.allSubjName)) == 1)
for i in range(len(self.subjName_forSelect)):
self.logic_index_subjname = self.logic_index_subjname.mask(
self.allSubjName == self.subjName_forSelect.iloc[i, 0], True)
except BaseException:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
return self
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(self):
# 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
if self.folderNameContainingFile_forSelect:
self.logic_index_foler_name_containing_file = [
self.folderNameContainingFile_forSelect in oneName_ for oneName_ in self.folerNameContainingFile]
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
else:
self.logic_index_foler_name_containing_file = np.ones(
[len(self.folerNameContainingFile), 1]) == 1
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
return self
def screen_pathLogicalLocation_accordingTo_fileName(self):
# 匹配file name:正则表达式匹配
if self.keywordThatFileContain:
self.allFileName = pd.Series(self.allFileName)
self.logic_index_file_name = self.allFileName.str.contains(
self.keywordThatFileContain)
else:
self.logic_index_file_name = np.ones(
[len(self.allFileName), 1]) == 1
self.logic_index_file_name = pd.DataFrame(
self.logic_index_file_name)
return self
def fetch_totalLogicalLocation(self):
self.logic_index_all = pd.concat(
[
self.logic_index_file_name,
self.logic_index_foler_name_containing_file,
self.logic_index_subjname],
axis=1)
self.logic_index_all = np.sum(
self.logic_index_all,
axis=1) == np.shape(
self.logic_index_all)[1]
return self
def fetch_selectedFilePath_accordingPathLogicalLocation(self):
# path
self.allFilePath = pd.DataFrame(self.allFilePath)
self.allSelectedFilePath = self.allFilePath[self.logic_index_all]
self.allSelectedFilePath = self.allSelectedFilePath.dropna()
# folder name
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSelectedSubjName = self.allSubjName[self.logic_index_all]
self.allSelectedSubjName = self.allSelectedSubjName.dropna()
# raw name
self.allSubjName_raw = pd.DataFrame(self.allSubjName_raw)
self.allSelectedSubjName_raw = self.allSubjName_raw[self.logic_index_all]
self.allSelectedSubjName_raw = self.allSelectedSubjName_raw.dropna()
return self
# ===================================================================
def copy_allDicomsOfOneSubj(self, i, subjName):
n_allSelectedSubj = len(self.allSelectedSubjName_raw)
# print('Copying the {}/{}th subject: {}...'.format(i+1,n_allSelectedSubj,subjName))
# 每个file保存到每个subjxxx文件夹下面
if self.saveInToOneOrMoreFolder == 'saveToEachSubjFolder':
output_folder = os.path.join(self.savePath, subjName)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个folder下面(file的名字以subjxxx命名)
elif self.saveInToOneOrMoreFolder == 'saveToOneFolder':
output_folder = os.path.join(
self.savePath, subjName + self.saveNameSuffix)
# copying OR moving OR do nothing
fileIndex = self.allSelectedSubjName_raw[(
self.allSelectedSubjName_raw.values == subjName)].index.tolist()
if self.ifCopy == 1 and self.ifMove == 0:
[shutil.copy(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 1:
[shutil.move(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.ifCopy == 0 and self.ifMove == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('Copy the {}/{}th subject: {} OK!\n'.format(i +
1, n_allSelectedSubj, subjName))
#
def copy_allDicomsOfAllSubj_multiprocess(self):
s = time.time()
# 每个file保存到每个subjxxx文件夹下面
if self.saveInToOneOrMoreFolder == 'saveToEachSubjFolder':
pass
elif self.saveInToOneOrMoreFolder == 'saveToOneFolder':
pass
else:
print(
"###没有指定复制到一个文件夹还是每个被试文件夹###\n###{}跟'saveToOneFolder' OR 'saveToEachSubjFolder'都不符合###".format(
self.saveInToOneOrMoreFolder))
# return -1
# 多线程
# unique的name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
print('Copying...\n')
# 单线程
# for i,subjName in enumerate(uniSubjName):
# self.copy_allDicomsOfOneSubj(i,subjName)
# 多线程
cores = multiprocessing.cpu_count()
if self.n_processess > cores:
self.n_processess = cores - 1
with ThreadPoolExecutor(self.n_processess) as executor:
for i, subjName in enumerate(uniSubjName):
task = executor.submit(
self.copy_allDicomsOfOneSubj, i, subjName)
# print(task.done())
print('=' * 30)
#
e = time.time()
# print('Done!\nRunning time is {:.1f} second'.format(e-s))
# ===================================================================
def main_run(self):
# all path and name
self = self.walkAllPath()
self = self.fetch_allFilePath()
self = self.fetch_allSubjName()
self = self.fetch_allFileName()
# select
self = self.fetch_folerNameContainingFile()
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
self = self.screen_pathLogicalLocation_accordingTo_yourSubjName()
self = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile()
self = self.screen_pathLogicalLocation_accordingTo_fileName()
self = self.fetch_totalLogicalLocation()
self = self.fetch_selectedFilePath_accordingPathLogicalLocation()
self.unmatched_ref = \
pd.DataFrame(list(
set.difference(set(list(self.subjName_forSelect.astype(np.int32).iloc[:, 0])),
set(list(self.allSelectedSubjName.astype(np.int32).iloc[:, 0])))
)
)
print('=' * 50 + '\n')
print(
'Files that not found are : {}\n\nThey may be saved in:\n[{}]\n'.format(
self.unmatched_ref.values,
self.savePath))
print('=' * 50 + '\n')
# save for checking
if self.ifSaveLog:
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
#
uniSubjName = self.allSelectedSubjName.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName = pd.DataFrame(uniSubjName)
self.allSelectedFilePath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedFilePath.txt'),
index=False,
header=False)
allSelectedSubjPath = [os.path.dirname(
allSelectedFilePath_) for allSelectedFilePath_ in self.allSelectedFilePath.iloc[:, 0]]
allSelectedSubjPath = pd.DataFrame(
allSelectedSubjPath).drop_duplicates()
allSelectedSubjPath.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjPath.txt'),
index=False,
header=False)
uniSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSelectedSubjName.txt'),
index=False,
header=False)
self.unmatched_ref.to_csv(
os.path.join(
self.savePath,
'log_unmatched_reference.txt'),
index=False,
header=False)
self.allSubjName.to_csv(
os.path.join(
self.savePath,
'log_allSubjName.txt'),
index=False,
header=False)
#
f = open(os.path.join(self.savePath, "log_copy_inputs.txt"), 'a')
f.write("\n\n")
f.write('====================' + now + '====================')
f.write("\n\n")
f.write("referencePath is: " + self.referencePath)
f.write("\n\n")
f.write(
"folderNameContainingFile_forSelect are: " +
self.folderNameContainingFile_forSelect)
f.write("\n\n")
f.write("num_countBackwards is: " + str(self.num_countBackwards))
f.write("\n\n")
f.write("regularExpressionOfSubjName_forNeuroimageDataFiles is: " +
str(self.regularExpressionOfSubjName_forNeuroimageDataFiles))
f.write("\n\n")
f.write("keywordThatFileContain is: " +
str(self.keywordThatFileContain))
f.write("\n\n")
f.write("neuroimageDataPath is: " + self.neuroimageDataPath)
f.write("\n\n")
f.write("savePath is: " + self.savePath)
f.write("\n\n")
f.write("n_processess is: " + str(self.n_processess))
f.write("\n\n")
f.close()
# copy
if self.ifRun:
self.copy_allDicomsOfAllSubj_multiprocess()
return self
if __name__ == '__main__':
import lc_copy_selected_file_V5 as copy
path = r'J:\dynamicALFF\Results\static_ALFF\ALFF_FunImgARWDFCB'
folder = r'J:\dynamicFC\state\folder_HC.xlsx'
save_path = r'J:\dynamicALFF\Results\static_ALFF\test\HC_ALFF'
sel = copy.copy_fmri(
referencePath=folder,
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=1,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='^ALFF',
neuroimageDataPath=path,
savePath=save_path,
n_processess=6,
ifSaveLog=0,
ifCopy=1,
ifMove=0,
saveInToOneOrMoreFolder='saveToOneFolder',
saveNameSuffix='',
ifRun=1)
result = sel.main_run()
# results=result.__dict__
# print(results.keys())
# print('Done!')
|
dongmengshi/easylearn | eslearn/utils/lc_featureSelection_variance.py | <filename>eslearn/utils/lc_featureSelection_variance.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 14:38:20 2018
dimension reduction with VarianceThreshold using sklearn.
Feature selector that removes all low-variance features.
@author: lenovo
"""
from sklearn.feature_selection import VarianceThreshold
import numpy as np
#
np.random.seed(1)
X = np.random.randn(100, 10)
X = np.hstack([X, np.zeros([100, 5])])
#
def featureSelection_variance(X, thrd):
sel = VarianceThreshold(threshold=thrd)
X_selected = sel.fit_transform(X)
mask = sel.get_support()
return X_selected, mask
X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
selector = VarianceThreshold()
selector.fit_transform(X)
selector.variances_
|
dongmengshi/easylearn | eslearn/utils/lc_read_dicominfo_subjlevel.py | <filename>eslearn/utils/lc_read_dicominfo_subjlevel.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 15:30:50 2019
This script is used to read dicom informations from one subject which contains several series folders.
@author: lenovo
"""
import sys
import os
import numpy
homedir = os.path.dirname(os.getcwd())
sys.path.append(homedir)
from concurrent.futures import ThreadPoolExecutor
from Utils.lc_read_dicominfo_base import readdcmseries
def run(subjpath, get_seriesinfo):
"""
For several series folder of one subject
Input:
subjpath: one subject's folder path (containing several seriers subfolders)
Returns:
spacing, machine, seriesname, shape, errorseries
"""
allsubj = os.listdir(subjpath)
allseriespath = [os.path.join(subjpath, subj) for subj in allsubj]
spacing, machine, seriesname, shape, errorseries = [], [], [], [], []
n_subj = len(allseriespath)
for i, seriespath in enumerate(allseriespath):
print(f'running {i+1}/{n_subj} \n: {seriespath}...')
_, s, m, _, shp, es = readdcmseries(seriespath, get_seriesinfo)
spacing.append(s)
machine.append(m)
shape.append(shp)
errorseries.append(es)
seriesname.append(seriespath)
return spacing, machine, seriesname, shape, errorseries
def main():
# python lc_readcminfo_forradiomics_onefolder.py -sp I:\\Project_Lyph\\Grouped_ROI_venous\\Origin\\ROI_1 -gs True -op I:\\Project_Lyph\\Grouped_ROI_venous\\Origin\\ROI_1
# input
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-sp', '--subjpath', type=str, help='所有被试根目录')
parser.add_argument('-gs', '--get_seriesinfo', type=str, help='是否提取dcm series信息')
parser.add_argument('-op', '--outpath', type=str, help='保存结果的路径')
args = parser.parse_args()
# run
spacing, machine, seriesname, shape, errorseries = run(args.subjpath, args.get_seriesinfo)
dcminfo = numpy.column_stack([seriesname, machine, spacing, shape])
# save
numpy.savetxt(os.path.join(args.outpath, 'errorseries.txt'), errorseries, fmt='%s', delimiter=' ')
numpy.savetxt(os.path.join(args.outpath, 'dcminfo.txt'),
dcminfo, fmt='%s', delimiter=',',
header='series_name, machine, spacing_x, spacing_y, slice_thickness, x, y , z',
comments='')
print('Done!')
if __name__ == '__main__':
subjpath = r'D:\dms\13567701_CHENSHUANG_R03509555'
spacing, machine, seriesname, shape, errorseries = run(subjpath, True)
# for debug
# rootdir = r'I:\Project_Lyph\Grouped_ROI_venous\Origin\ROI_1'
# dcminfo = run_all(r'D:\dms-lymph-nodes\test', True, 2)
# outpath = r'D:\dms-lymph-nodes'
#
# dcminfo_list = [list(di) for di in dcminfo]
# errorseries = [dl[3] for dl in dcminfo_list] # dimension error series
# dcminfo_df = [numpy.column_stack((dl[2], dl[1], dl[0]))
# for dl in dcminfo_list]
# dcminfo_alldf = numpy.row_stack(dcminfo_df)
#
# # save
# numpy.savetxt(os.path.join(outpath, 'errorseries.txt'), errorseries, fmt='%s', delimiter=' ')
# numpy.savetxt(os.path.join(outpath, 'dcminfo.txt'),
# dcminfo_alldf, fmt='%s', delimiter=',',
# header='series_name, machine, spacing_x, spacing_y, slice_thickness',
# comments='')
|
dongmengshi/easylearn | eslearn/SSD_classification/ML/lc_pca_svc_pooling.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on 2019/11/20
This script is used to training a linear svc model using training dataset,
and test this model using test dataset with pooling cross-validation stratage.
All datasets (4 datasets) were concatenate into one single dataset, then using cross-validation strategy.
Classfier: Linear SVC
Dimension reduction: PCA
@author: <NAME>
Email: <EMAIL>
"""
import sys
import numpy as np
from sklearn import svm
from sklearn.model_selection import KFold
from sklearn import preprocessing
import eslearn.utils.el_preprocessing as elprep
import eslearn.utils.lc_dimreduction as dimreduction
from eslearn.utils.lc_evaluation_model_performances import eval_performance
class PCASVCPooling():
"""
Parameters:
----------
dataset_our_center_550 : path str
path of dataset 1.
NOTE: The first column of the dataset is subject unique index, the second is the diagnosis label(0/1),
the rest of columns are features. The other dataset are the same as this dataset.
dataset_206: path str
path of dataset 2
dataset_COBRE: path str
path of dataset 3
dataset_UCAL: path str
path of dataset 4
is_dim_reduction: bool
if perform dimension reduction (PCA)
components: float
How many percentages of the cumulatively explained variance to be retained. This is used to select the top principal components.
cv: int
How many folds of the cross-validation.
out_name: str
The name of the output results.
Returns:
--------
Classification results, such as accuracy, sensitivity, specificity, AUC and figures that used to report.
"""
def __init__(sel,
dataset_our_center_550=r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_550.npy',
dataset_206=r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_206.npy',
dataset_COBRE=r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_COBRE.npy',
dataset_UCAL=r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_UCLA.npy',
is_dim_reduction=True,
components=0.95,
cv=5):
sel.dataset_our_center_550 = dataset_our_center_550
sel.dataset_206 = dataset_206
sel.dataset_COBRE = dataset_COBRE
sel.dataset_UCAL = dataset_UCAL
sel.is_dim_reduction = is_dim_reduction
sel.components = components
sel.cv = cv
def main_function(sel):
"""
The training data, validation data and test data are randomly splited
"""
print('Training model and testing...\n')
# load data
dataset_our_center_550 = np.load(sel.dataset_our_center_550)
dataset_206 = np.load(sel.dataset_206)
dataset_COBRE = np.load(sel.dataset_COBRE)
dataset_UCAL = np.load(sel.dataset_UCAL)
# Extracting features and label
features_our_center_550 = dataset_our_center_550[:, 2:]
features_206 = dataset_206[:, 2:]
features_COBRE = dataset_COBRE[:, 2:]
features_UCAL = dataset_UCAL[:, 2:]
label_our_center_550 = dataset_our_center_550[:, 1]
label_206 = dataset_206[:, 1]
label_COBRE = dataset_COBRE[:, 1]
label_UCAL = dataset_UCAL[:, 1]
# Generate training data and test data
data_all = np.concatenate(
[features_our_center_550, features_206, features_UCAL, features_COBRE], axis=0)
label_all = np.concatenate(
[label_our_center_550, label_206, label_UCAL, label_COBRE], axis=0)
# Unique ID
uid_our_center_550 = np.int32(dataset_our_center_550[:, 0])
uid_206 = np.int32(dataset_206[:, 0])
uid_all = np.concatenate([uid_our_center_550, uid_206,
np.zeros(len(label_UCAL, )) -1,
np.zeros(len(label_COBRE, )) -1], axis=0)
uid_all = np.int32(uid_all)
# KFold Cross Validation
sel.label_test_all = np.array([], dtype=np.int16)
train_index = np.array([], dtype=np.int16)
test_index = np.array([], dtype=np.int16)
sel.decision = np.array([], dtype=np.int16)
sel.prediction = np.array([], dtype=np.int16)
sel.accuracy = np.array([], dtype=np.float16)
sel.sensitivity = np.array([], dtype=np.float16)
sel.specificity = np.array([], dtype=np.float16)
sel.AUC = np.array([], dtype=np.float16)
sel.coef = []
kf = KFold(n_splits=sel.cv, shuffle=True, random_state=0)
for i, (tr_ind, te_ind) in enumerate(kf.split(data_all)):
print(f'------{i+1}/{sel.cv}...------\n')
train_index = np.int16(np.append(train_index, tr_ind))
test_index = np.int16(np.append(test_index, te_ind))
feature_train = data_all[tr_ind, :]
label_train = label_all[tr_ind]
feature_test = data_all[te_ind, :]
label_test = label_all[te_ind]
sel.label_test_all = np.int16(np.append(sel.label_test_all, label_test))
# resampling training data
# feature_train, label_train = sel.re_sampling(feature_train, label_train)
# normalization
prep = elprep.Preprocessing(data_preprocess_method='StandardScaler', data_preprocess_level='subject')
feature_train, feature_test = prep.data_preprocess(feature_train, feature_test)
# dimension reduction
if sel.is_dim_reduction:
feature_train, feature_test, model_dim_reduction = sel.dimReduction(
feature_train, feature_test, sel.components)
print(f'After dimension reduction, the feature number is {feature_train.shape[1]}')
else:
print('No dimension reduction perfromed\n')
# train
print('training and testing...\n')
# model, weight = rfeCV(feature_train, label_train, step=0.2, cv=3, n_jobs=-1, permutation=0)
model = sel.training(feature_train, label_train)
coef = model.coef_
# coef = weight
# Weight
if sel.is_dim_reduction:
sel.coef.append(model_dim_reduction.inverse_transform(coef)) # save coef
else:
sel.coef.append(coef) # save coef
# test
pred, dec = sel.testing(model, feature_test)
sel.prediction = np.append(sel.prediction, np.array(pred))
sel.decision = np.append(sel.decision, np.array(dec))
# Evaluating classification performances
acc, sens, spec, auc = eval_performance(label_test, pred, dec,
accuracy_kfold=None, sensitivity_kfold=None, specificity_kfold=None, AUC_kfold=None,
verbose=1, is_showfig=0)
sel.accuracy = np.append(sel.accuracy, acc)
sel.sensitivity = np.append(sel.sensitivity, sens)
sel.specificity = np.append(sel.specificity, spec)
sel.AUC = np.append(sel.AUC, auc)
uid_all_sorted = np.int32(uid_all[test_index])
sel.special_result = np.concatenate(
[uid_all_sorted, sel.label_test_all, sel.decision, sel.prediction], axis=0).reshape(4, -1).T
print('Done!')
return sel
def re_sampling(sel, feature, label):
"""
Used to over-sampling unbalanced data
"""
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
feature_resampled, label_resampled = ros.fit_resample(feature, label)
from collections import Counter
print(sorted(Counter(label_resampled).items()))
return feature_resampled, label_resampled
def dimReduction(sel, train_X, test_X, pca_n_component):
train_X, trained_pca = dimreduction.pca(train_X, pca_n_component)
test_X = trained_pca.transform(test_X)
return train_X, test_X, trained_pca
def training(sel, train_X, train_y):
svc = svm.SVC(kernel='linear', C=1, class_weight='balanced',
max_iter=5000, random_state=0)
svc.fit(train_X, train_y)
return svc
def testing(sel, model, test_X):
predict = model.predict(test_X)
decision = model.decision_function(test_X)
return predict, decision
def save_results(sel, data, name):
import pickle
with open(name, 'wb') as f:
pickle.dump(data, f)
def save_fig(sel, out_name):
# Save ROC and Classification 2D figure
acc, sens, spec, auc = eval_performance(sel.label_test_all, sel.prediction, sel.decision,
sel.accuracy, sel.sensitivity, sel.specificity, sel.AUC,
verbose=0, is_showfig=1, legend1='HC', legend2='SZ', is_savefig=1,
out_name=out_name)
#
if __name__ == '__main__':
sel=PCASVCPooling()
results=sel.main_function()
sel.save_fig(out_name=r'D:\WorkStation_2018\SZ_classification\Figure\Classification_performances_pooling.pdf')
results=results.__dict__
sel.save_results(results, r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_pooling.npy')
print(np.mean(sel.accuracy))
print(np.std(sel.accuracy))
print(np.mean(sel.sensitivity))
print(np.std(sel.sensitivity))
print(np.mean(sel.specificity))
print(np.std(sel.specificity))
print(np.mean(sel.AUC))
print(np.std(sel.AUC))
|
dongmengshi/easylearn | eslearn/visualization/lc_test_circle.py |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne.viz import plot_connectivity_circle, circular_layout
def test_plot_connectivity_circle():
"""Test plotting connectivity circle."""
node_order = ['aaa', 'bbb', 'ccc', 'ddd', 'eee', 'fff']
label_names = ['aaa', 'bbb', 'ccc', 'ddd', 'eee', 'fff']
group_boundaries = [0, 2, 4]
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=group_boundaries)
con = np.random.RandomState(0).randn(6, 6)
con[con < 0.7] = 0
fig, ax = plot_connectivity_circle(con, label_names, n_lines=60,
node_angles=node_angles, title='test',
colormap='RdBu_r', vmin=0, vmax=2, linewidth=.5,facecolor='k')
plt.show()
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[-1])
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[20, 0])
# plt.close('all')
if __name__ == "__main__":
test_plot_connectivity_circle() |
dongmengshi/easylearn | eslearn/utils/lc_financial.py | <filename>eslearn/utils/lc_financial.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 16:14:22 2018
财务
@author: lenovo
"""
# import
import pandas as pd
import numpy as np
# input
textFile = r'I:\其他文件\老舅财务\李锰等\201702\201702-1.txt'
targetName = '张华'
targetItem = ['户名', '帐号', '交易日期', '摘要',
'借贷标志', '交易金额', '投资人', '卡号']
def extractDataFromTxt():
# read txt to pd
df = pd.read_table(textFile, engine='python', delimiter="|")
# name
allName = df.iloc[:, 1]
# loc_name=allName==targetName
# item
allItem = df.columns
# allItem==item
loc_item = np.zeros([1, len(allItem)])
for i in targetItem:
loc_item = np.vstack([loc_item, allItem == i])
loc_item = [bool(boo) for boo in np.sum(loc_item, axis=0)]
content = df.iloc[:, loc_item]
return content
|
dongmengshi/easylearn | eslearn/utils/el_create_random_balanced_sequence.py | <filename>eslearn/utils/el_create_random_balanced_sequence.py
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 07:50:18 2020
@author: <NAME>
Email: <EMAIL>
"""
import numpy as np
import time
class CreateSequence(object):
"""
This class is used to create random and balanced sequences for k stimulations
This task must meet the following three conditions:
1. There are no s continuous stimulus types.
2. Each types of stimulation has the same number of stimulation.
3. All stimulation occurs randomly.
Parameters:
-----------
data: dict,
Keys are stimulation label (int); items are stimulation id for each stimulation label (array).
s: int
Continuous upper limit
rt: int
Repeat times for each category of stimulation.
initial_rand_seed: int
initial random seed
tolerance: int
How many iteration if you can not get accept results, given that available keys is not in key_all_for_picking
return
-----------
random_squence: numpy array
created random and balanced sequence
"""
def __init__(sel, data={1:np.arange(0, 60, 1), 2:np.arange(60, 90, 1), 3:np.arange(90, 120)},
s=3, rt=50, initial_rand_seed=5, tolerance=1000):
# Debug: 生成三个类别,1,2,3。第一个类别有60个不同的具体刺激,后两者分别有30个。
sel.data = data
sel.s = s
sel.rt = rt
sel.initial_rand_seed = initial_rand_seed
sel.tolerance = tolerance
# ---------
sel.category_of_stimutations = list(data.keys())
sel.n = len(sel.category_of_stimutations) * sel.rt # totle sequence length
np.random.seed(sel.initial_rand_seed)
def main(sel):
for tol in range(sel.tolerance):
sel.key_to_store = np.arange(-sel.s, 0, 1) # 避免在内部循环中判断 i >= sel.s, 节省时间。切记完成循环后删除前s个元素。
sel.random_squence = np.zeros([sel.n + sel.s, ]) # 切记完成循环后删除前s个元素。
sel.key_all_for_picking = np.repeat(sel.category_of_stimutations, sel.rt, 0)
for i in np.arange(sel.s, sel.n + sel.s, 1):
# 判断 np.arange(-1, -sel.s, -1)个数是否连续: 即n个数前面的sel.s - 1 个数是否相同,相同则第n个数不能再重复。
# 否
if len(np.unique(sel.key_to_store[np.arange(-1, -sel.s, -1)])) > 1:
id_selected = np.random.randint(len(sel.key_all_for_picking))
key_choose = sel.key_all_for_picking[id_selected]
sel.key_all_for_picking = np.delete(sel.key_all_for_picking, id_selected)
sel.key_to_store = np.append(sel.key_to_store, key_choose)
sel.random_squence[i] = sel.pick_one_stimulation(key_choose)
# 是
else:
key_available_pick_point = np.array(list(set(sel.category_of_stimutations) - set(sel.key_to_store[[-1, -1]])))
# Chose those keys that exist in sel.key_all_for_picking
is_available_keys = np.isin(key_available_pick_point , sel.key_all_for_picking)
# If have at least one availabel key in sel.key_all_for_picking
if any(is_available_keys):
key_available_pick_point = key_available_pick_point[is_available_keys]
key_choose = key_available_pick_point[np.random.randint(len(key_available_pick_point))]
loc_bool = np.where(sel.key_all_for_picking == key_choose)[0]
id_selected = loc_bool[np.random.randint(len(loc_bool))]
sel.key_all_for_picking = np.delete(sel.key_all_for_picking, id_selected)
sel.key_to_store = np.append(sel.key_to_store, key_choose)
sel.random_squence[i] = sel.pick_one_stimulation(key_choose)
# If have no availabel key in sel.key_all_for_picking, then go to next tol and rand_seed.
else:
break
else: # for-else pair: All iteration reached: succeed
# Delete the first sel.s items
return (np.delete(sel.random_squence, np.arange(0, sel.s, 1), axis=0),
np.delete(sel.key_to_store, np.arange(0, sel.s, 1), axis=0),
sel.initial_rand_seed)
# Not all iteration reached: failed
print(f'Failed!\nThe {tol}th Try...')
sel.rand_seed = tol
np.random.seed(sel.rand_seed)
continue
def pick_one_stimulation(sel, key_choose):
"""
Pick one stimulation from the one stimulation category.
"""
return sel.data[key_choose][np.random.randint(len(sel.data[key_choose]))]
if __name__ == "__main__":
st = time.time()
sel = CreateSequence()
rand_sequ, category_of_stimutations, rand_seed = sel.main()
et = time.time()
print(f"Running time = {et - st}")
print(f"Repeat times for each category of stimutation is {sel.rt}")
print(f"Stimutation category are {sel.category_of_stimutations}")
print("--"*20)
for i in np.unique(category_of_stimutations):
print(f"Number of created {i}th category = {np.sum(category_of_stimutations == i)} ")
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_svc_rfe_cv_byYourSelf.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Wed self.decision 5 21:12:49 2018
自定义训练集和测试集,进行训练和测试
rfe-svm-CV
input:
k=3:k-fold
step=0.1:rfe step
num_jobs=1: parallel
scale_method='StandardScaler':standardization method
pca_n_component=0.9
permutation=0
@author: <NAME>
new: 函数统一返回给self
"""
# =============================================================================
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\machine_learning_python\Utils')
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\LC_Machine_learning-(Python)\Machine_learning\classfication')
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\machine_learning_python\Machine_learning\neural_network')
from lc_featureSelection_rfe import rfeCV
import lc_pca as pca
import lc_scaler as scl
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#==============================================================================
class svc_rfe_cv():
# initial parameters
def __init__(self,
k=3,
seed=10,
step=0.1,
num_jobs=1,
scale_method='StandardScaler',
pca_n_component=0.8,
permutation=0,
show_results=1,
show_roc=0):
self.k=k
self.seed=seed # 随机种子
self.step=step
self.num_jobs=num_jobs
self.scale_method=scale_method
self.pca_n_component=pca_n_component
self.permutation=permutation
self.show_results=show_results
self.show_roc=show_roc
def main_svc_rfe_cv(self,x,y):
# 自定义训练集和测试集
print('training model and testing using '+ str(self.k)+'-fold CV...\n')
# split data
x_train, x_test,y_train,self.y_test=train_test_split(x, y, random_state=0)
# scale
x_train,x_test=self.scaler(x_train,x_test,self.scale_method)
# pca
if 0<self.pca_n_component<1:
x_train,x_test,trained_pca=self.dimReduction(x_train,x_test,self.pca_n_component)
else:
pass
# train
model,weight=self.training(x_train,y_train,\
step=self.step, cv=self.k,n_jobs=self.num_jobs,\
permutation=self.permutation)
# fetch orignal weight
self.weight_all=pd.DataFrame([])
if 0<self.pca_n_component<1:
weight=trained_pca.inverse_transform(weight)
self.weight_all=pd.concat([self.weight_all,pd.DataFrame(weight)],axis=1)
# test
self.predict=pd.DataFrame([])
self.decision=pd.DataFrame([])
prd,de=self.testing(model,x_test)
prd=pd.DataFrame(prd)
de=pd.DataFrame(de)
self.predict=pd.concat([self.predict,prd])
self.decision=pd.concat([self.decision,de])
# 打印并显示模型性能
if self.show_results:
self.eval_prformance()
return self
def scaler(self,train_X,test_X,scale_method):
train_X,model=scl.scaler(train_X,scale_method)
test_X=model.transform(test_X)
return train_X,test_X
def dimReduction(self,train_X,test_X,pca_n_component):
train_X,trained_pca=pca.pca(train_X,pca_n_component)
test_X=trained_pca.transform(test_X)
return train_X,test_X,trained_pca
def training(self,x,y,\
step, cv,n_jobs,permutation):
# refCV
model,weight=rfeCV(x,y,step, cv,n_jobs,permutation)
return model,weight
def testing(self,model,test_X):
predict=model.predict(test_X)
decision=model.decision_function(test_X)
return predict,decision
def eval_prformance(self):
# 此函数返回self
# accurcay, self.specificity(recall of negative) and self.sensitivity(recall of positive)
self.accuracy= accuracy_score (self.y_test,self.predict.values)
report=classification_report(self.y_test,self.predict.values)
report=report.split('\n')
self.specificity=report[2].strip().split(' ')
self.sensitivity=report[3].strip().split(' ')
self.specificity=float([spe for spe in self.specificity if spe!=''][2])
self.sensitivity=float([sen for sen in self.sensitivity if sen!=''][2])
# self.confusion_matrix matrix
self.confusion_matrix=confusion_matrix(self.y_test,self.predict.values)
# roc and self.auc
fpr, tpr, thresh = roc_curve(self.y_test,self.decision.values)
self.auc=roc_auc_score(self.y_test,self.decision.values)
# print performances
# print('混淆矩阵为:\n{}'.format(self.confusion_matrix))
print('\naccuracy={:.2f}\n'.format(self.accuracy))
print('sensitivity={:.2f}\n'.format(self.sensitivity))
print('specificity={:.2f}\n'.format(self.specificity))
print('auc={:.2f}\n'.format(self.auc))
if self.show_roc:
fig,ax=plt.subplots()
ax.plot(figsize=(5, 5))
ax.set_title('ROC Curve')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.grid(True)
ax.plot(fpr, tpr,'-')
#设置坐标轴在axes正中心
ax.spines['top'].set_visible(False) #去掉上边框
ax.spines['right'].set_visible(False) #去掉右边框
# ax.spines['bottom'].set_position(('axes',0.5 ))
# ax.spines['left'].set_position(('axes', 0.5))
return self
#
if __name__=='__main__':
# 导入1channel,让数据一致,以便于比较
import lc_CNN_DynamicFC_1channels as CNN
sel=CNN.CNN_FC_1channels()
sel=sel.load_data_and_label()
sel=sel.prepare_data()
x=sel.data
y=sel.label
y=np.argmax(y,axis=1)
import lc_svc_rfe_cv_byYourSelf as lsvc
model=lsvc.svc_rfe_cv(k=3)
results=model.main_svc_rfe_cv(x,y)
results=results.__dict__
|
dongmengshi/easylearn | eslearn/preprocessing.py | def data_preprocess(sel, feature_train, feature_test, data_preprocess_method, data_preprocess_level):
'''
This function is used to preprocess features
Method 1: preprocess data in group level, one feature by one feature.
Method 2: preprocess data in subject level.
'''
# Method 1: Group level preprocessing.
if data_preprocess_level == 'group':
feature_train, model = elscaler.scaler(feature_train, data_preprocess_method)
feature_test = model.transform(feature_test)
elif data_preprocess_level == 'subject':
# Method 2: Subject level preprocessing.
scaler = preprocessing.StandardScaler().fit(feature_train.T)
feature_train = scaler.transform(feature_train.T) .T
scaler = preprocessing.StandardScaler().fit(feature_test.T)
feature_test = scaler.transform(feature_test.T) .T
else:
print('Please provide which level to preprocess features\n')
return
return feature_train, feature_test |
dongmengshi/easylearn | eslearn/utils/lc_group_all_dcm_accordingto_series_radiomics.py | <reponame>dongmengshi/easylearn<filename>eslearn/utils/lc_group_all_dcm_accordingto_series_radiomics.py
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 18:10:18 2019
@author: lenovo
"""
import sys
sys.path.append(r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python-master\Utils')
import os
import numpy as np
import nibabel as nib
import shutil
from lc_read_nii import read_sigleNii_LC
from lc_read_nii import save_nii
class GroupSeries():
"""group dcm series to each series folder
Attr:
all_subj_path:所有dicom文件存放的文件夹路径
out_path:group 成各个series后,保存到哪个路径(代码自动生成亚文件夹,来保存单series文件)
Return:
NO return, only perform grouped dcm
Original directory like this: root/subj$i/all_dcm.dcm
Output diretory like this: root/subj&i/uniqueID_S$i/all_dcm.dcm
"""
def __init__(sel, all_subj_path=r'D:\dms-lymph-nodes\1_finish',
out_path=r'D:\dms-lymph-nodes\1_finish'):
sel.all_subj_path = all_subj_path
sel.out_path = out_path
def read_roi_path(sel):
sel.subj_name = os.listdir(sel.all_subj_path)
sel.all_subj_path = [os.path.join(
sel.all_subj_path, filename) for filename in sel.subj_name]
return sel
def group_series_for_all_subj(sel):
n_subj = len(sel.all_subj_path)
num_proc = np.arange(0, n_subj, 1)
for i, file_path, subjname in zip(num_proc, sel.all_subj_path, sel.subj_name):
print("Grouping {}/{} subject\n".format(i+1, n_subj))
sel.group_series_for_one_subj(file_path, subjname)
def group_series_for_one_subj(sel, file_path, subjname):
"""
load dcm--split dcm according series name--save to
"""
# load dcm
dcm_name = os.listdir(file_path)
# exclude folder, only include file
dcm_name = np.array([dn for dn in dcm_name if len(dn.split('.')) > 1])
dcm_path = np.array([os.path.join(file_path, dn) for dn in dcm_name])
s_name = [dcm.split('_')[-2] for dcm in dcm_name]
# grroup dcm
uni_sname = np.unique(s_name)
s_logic = [np.where(np.array(s_name) == sn) for sn in uni_sname]
# split and save
for loc, sname in zip(s_logic, uni_sname):
# split
sdcmpath = dcm_path[loc]
sdcmname = dcm_name[loc]
# creat Sn folder
save_folder_name = os.path.join(
sel.out_path, subjname, subjname + '_' + sname)
if not os.path.exists(save_folder_name):
os.mkdir(save_folder_name)
# save
for dp, dn in zip(sdcmpath, sdcmname):
try:
shutil.move(dp, os.path.join(save_folder_name, dn))
except FileNotFoundError:
print('{} may be moved'.format(dp))
def main(sel):
sel.read_roi_path()
sel.group_series_for_all_subj()
if __name__ == "__main__":
sel = GroupSeries()
print(sel.all_subj_path)
# sel.main()
|
dongmengshi/easylearn | eslearn/GUI/el_logger.py | import logging
from logging import handlers
def logger():
#初始化logger
logger = logging.getLogger()
#设置日志记录级别
logger.setLevel(logging.INFO)
#fmt设置日志输出格式,datefmt设置 asctime 的时间格式
formatter = logging.Formatter(fmt='[%(asctime)s]%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
#配置日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
console.setFormatter(formatter)
logger.addHandler(console)
#配置日志输出到文件
file_logging = logging.FileHandler(r'D:\My_Codes\easylearn-fmri\eslearn\gui_test\log\data_loading.log')
file_logging.setLevel(logging.ERROR)
file_logging.setFormatter(formatter)
logger.addHandler(file_logging)
#配置日志输出到文件,限制单个日志文件的最大体积
# file_rotating_file = handlers.RotatingFileHandler('app_rotating.log', maxBytes=1024, backupCount=3)
# file_rotating_file.setLevel(logging.WARNING)
# file_rotating_file.setFormatter(formatter)
# logger.addHandler(file_rotating_file)
#配置日志输出到文件,在固定的时间内记录日志文件
# file_time_rotating = handlers.TimedRotatingFileHandler("app_time.log", when="s", interval=10, backupCount=5)
# file_time_rotating.setLevel(logging.INFO)
# file_time_rotating.setFormatter(formatter)
# logger.addHandler(file_time_rotating)
#use
logger.debug('Debug Message')
logger.info('Info Message')
logger.warning('Warning Message111')
logger.error('Error Message')
logger.critical('Critical Message')
if __name__ == '__main__':
easylearn_logger() |
dongmengshi/easylearn | eslearn/utils/download_asd.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 20:47:55 2019
@author: lenovo
"""
# download_abide_preproc.py
#
# Author: <NAME>, 2015
'''
This script downloads data from the Preprocessed Connetomes Project's
ABIDE Preprocessed data release and stores the files in a local
directory; users specify derivative, pipeline, strategy, and optionally
age ranges, sex, site of interest
Usage:
python download_abide_preproc.py -d <derivative> -p <pipeline>
-s <strategy> -o <out_dir>
[-lt <less_than>] [-gt <greater_than>]
[-x <sex>] [-t <site>]
'''
# Main collect and download function
def collect_and_download(derivative, pipeline, strategy, out_dir, less_than, greater_than):
'''
Function to collect and download images from the ABIDE preprocessed
directory on FCP-INDI's S3 bucket
Parameters
----------
derivative : string
derivative or measure of interest
pipeline : string
pipeline used to process data of interest
strategy : string
noise removal strategy used to process data of interest
out_dir : string
filepath to a local directory to save files to
less_than : float
upper age (years) threshold for participants of interest
greater_than : float
lower age (years) threshold for participants of interest
site : string
acquisition site of interest
sex : string
'M' or 'F' to indicate whether to download male or female data
Returns
-------
None
this function does not return a value; it downloads data from
S3 to a local directory
'''
# Import packages
import os
import urllib
# Init variables
mean_fd_thresh = 0.2
s3_prefix = 'https://s3.amazonaws.com/fcp-indi/data/Projects/ABIDE_Initiative'
s3_pheno_path = '/'.join([s3_prefix, 'Phenotypic_V1_0b_preprocessed1.csv'])
# Format input arguments to be lower case, if not already
derivative = derivative.lower()
pipeline = pipeline.lower()
strategy = strategy.lower()
# Check derivative for extension
if 'roi' in derivative:
extension = '.1D'
else:
extension = '.nii.gz'
# If output path doesn't exist, create it
if not os.path.exists(out_dir):
print ('Could not find %s, creating now...' % out_dir)
os.makedirs(out_dir)
# Load the phenotype file from S3
s3_pheno_file = urllib.request.urlopen(s3_pheno_path)
pheno_list = s3_pheno_file.readlines()
# Get header indices
header = str(pheno_list[0]).split(',')
try:
site_idx = header.index('SITE_ID')
file_idx = header.index('FILE_ID')
age_idx = header.index('AGE_AT_SCAN')
sex_idx = header.index('SEX')
mean_fd_idx = header.index('func_mean_fd')
except Exception as exc:
err_msg = 'Unable to extract header information from the pheno file: %s'\
'\nHeader should have pheno info: %s\nError: %s'\
% (s3_pheno_path, str(header), exc)
raise Exception(err_msg)
# Go through pheno file and build download paths
print ('Collecting images of interest...')
s3_paths = []
for pheno_row in pheno_list[1:]:
# Comma separate the row
cs_row = str(pheno_row).split(',')
try:
# See if it was preprocessed
row_file_id = cs_row[file_idx]
# Read in participant info
row_site = cs_row[site_idx]
row_age = float(cs_row[age_idx])
row_sex = cs_row[sex_idx]
row_mean_fd = float(cs_row[mean_fd_idx])
except Exception as exc:
err_msg = 'Error extracting info from phenotypic file, skipping...'
print (err_msg)
continue
# If the filename isn't specified, skip
if row_file_id == 'no_filename':
continue
# If mean fd is too large, skip
if row_mean_fd >= mean_fd_thresh:
continue
# Test phenotypic criteria (three if's looks cleaner than one long if)
# Test sex
# if (sex == 'M' and row_sex != '1') or (sex == 'F' and row_sex != '2'):
# continue
# Test site
# if (site is not None and site.lower() != row_site.lower()):
# continue
# Test age range
if row_age < less_than and row_age > greater_than:
filename = row_file_id + '_' + derivative + extension
s3_path = '/'.join([s3_prefix, 'Outputs', pipeline, strategy, derivative, filename])
print ('Adding %s to download queue...' % s3_path)
s3_paths.append(s3_path)
else:
continue
# And download the items
total_num_files = len(s3_paths)
print(f'***\n***Totle target files number is: {total_num_files}***\n***')
for path_idx, s3_path in enumerate(s3_paths):
rel_path = s3_path.lstrip(s3_prefix)
download_file = os.path.join(out_dir, rel_path)
download_dir = os.path.dirname(download_file)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
try:
if not os.path.exists(download_file):
print ('Retrieving: %s' % download_file)
urllib.request.urlretrieve(s3_path, download_file)
print (f'{path_idx}/{total_num_files}')
else:
print ('File %s already exists, skipping...' % download_file)
except Exception as exc:
print ('There was a problem downloading %s.\n'\
'Check input arguments and try again.' % s3_path)
# Print all done
print ('Done!')
# Make module executable
if __name__ == '__main__':
# Import packages
# import argparse
# import os
# import sys
# Init arparser
# parser = argparse.ArgumentParser(description=__doc__)
# # Required arguments
# parser.add_argument('-d', '--derivative', nargs=1, required=True, type=str,
# help='Derivative of interest (e.g. \'reho\')')
# parser.add_argument('-p', '--pipeline', nargs=1, required=True, type=str,
# help='Pipeline used to preprocess the data '\
# '(e.g. \'cpac\')')
# parser.add_argument('-s', '--strategy', nargs=1, required=True, type=str,
# help='Noise-removal strategy used during preprocessing '\
# '(e.g. \'nofilt_noglobal\'')
# parser.add_argument('-o', '--out_dir', nargs=1, required=True, type=str,
# help='Path to local folder to download files to')
# # Optional arguments
# parser.add_argument('-lt', '--less_than', nargs=1, required=False,
# type=float, help='Upper age threshold (in years) of '\
# 'particpants to download (e.g. for '\
# 'subjects 30 or younger, \'-lt 31\')')
# parser.add_argument('-gt', '--greater_than', nargs=1, required=False,
# type=int, help='Lower age threshold (in years) of '\
# 'particpants to download (e.g. for '\
# 'subjects 31 or older, \'-gt 30\')')
# parser.add_argument('-t', '--site', nargs=1, required=False, type=str,
# help='Site of interest to download from '\
# '(e.g. \'Caltech\'')
# parser.add_argument('-x', '--sex', nargs=1, required=False, type=str,
# help='Participant sex of interest to download only '\
# '(e.g. \'M\' or \'F\')')
# # Parse and gather arguments
# args = parser.parse_args()
# # Init variables
# derivative = args.derivative[0].lower()
# pipeline = args.pipeline[0].lower()
# strategy = args.strategy[0].lower()
# out_dir = os.path.abspath(args.out_dir[0])
# # Try and init optional arguments
# try:
# less_than = args.less_than[0]
# print ('Using upper age threshold of %d...' % less_than)
# except TypeError as exc:
# less_than = 200.0
# print ('No upper age threshold specified')
# try:
# greater_than = args.greater_than[0]
# print ('Using lower age threshold of %d...' % less_than)
# except TypeError as exc:
# greater_than = -1.0
# print ('No lower age threshold specified')
# try:
# site = args.site[0]
# except TypeError as exc:
# site = None
# print ('No site specified, using all sites...')
# try:
# sex = args.sex[0].upper()
# if sex == 'M':
# print ('Downloading only male subjects...')
# elif sex == 'F':
# print ('Downloading only female subjects...')
# else:
# print ('Please specify \'M\' or \'F\' for sex and try again')
# sys.exit()
# except TypeError as exc:
# sex = None
# print ('No sex specified, using all sexes...')
# Call the collect and download routine
derivative = 'degree_weighted'
pipeline = 'dparsf'
strategy = 'filt_global'
out_dir = r'F:\Data\ASD'
less_than = 100
greater_than = 0
collect_and_download(derivative, pipeline, strategy,out_dir, less_than, greater_than)
|
dongmengshi/easylearn | eslearn/feature_engineering/feature_selection/el_fscore.py | """ Selection features using F-score
Copy form https://www.zealseeker.com/archives/f-score-for-feature-selection-python/
TODO: Extend to multiple-class classification (Refrence: http://www.joca.cn/EN/Y2010/V30/I4/993#)
"""
def fscore_core(np,nn,xb,xbp,xbn,xkp,xkn):
'''
np: number of positive features
nn: number of negative features
xb: list of the average of each feature of the whole instances
xbp: list of the average of each feature of the positive instances
xbn: list of the average of each feature of the negative instances
xkp: list of each feature which is a list of each positive instance
xkn: list of each feature which is a list of each negatgive instance
reference: http://link.springer.com/chapter/10.1007/978-3-540-35488-8_13
'''
def sigmap (i,np,xbp,xkp):
return sum([(xkp[i][k]-xbp[i])**2 for k in range(np)])
def sigman (i,nn,xbn,xkn):
print sum([(xkn[i][k]-xbn[i])**2 for k in range(nn)])
return sum([(xkn[i][k]-xbn[i])**2 for k in range(nn)])
n_feature = len(xb)
fscores = []
for i in range(n_feature):
fscore_numerator = (xbp[i]-xb[i])**2 + (xbn[i]-xb[i])**2
fscore_denominator = (1/float(np-1))*(sigmap(i,np,xbp,xkp))+ \
(1/float(nn-1))*(sigman(i,nn,xbn,xkn))
fscores.append(fscore_numerator/fscore_denominator)
return fscores
def fscore(feature,classindex):
'''
feature: a matrix whose row indicates instances, col indicates features
classindex: 1 indicates positive and 0 indicates negative
'''
n_instance = len(feature)
n_feature = len(feature[0])
np = sum(classindex)
nn = n_instance - np
xkp =[];xkn =[];xbp =[];xbn =[];xb=[]
for i in range(n_feature):
xkp_i = [];xkn_i = []
for k in range(n_instance):
if classindex[k] == 1:
xkp_i.append(feature[k][i])
else:
xkn_i.append(feature[k][i])
xkp.append(xkp_i)
xkn.append(xkn_i)
sum_xkp_i = sum(xkp_i)
sum_xkn_i = sum(xkn_i)
xbp.append(sum_xkp_i/float(np))
xbn.append(sum_xkn_i/float(nn))
xb.append((sum_xkp_i+sum_xkn_i)/float(n_instance))
return fscore_core(np,nn,xb,xbp,xbn,xkp,xkn) |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_permutation_svc_njobs_noBlock.py | <filename>eslearn/machine_learning/classfication/lc_permutation_svc_njobs_noBlock.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 30 14:57:38 2018
permutation test: Parallel elastic net regression
Note.:The program is divided into many blocks
so as to avoid interruption.
input:
fileName=fileName to save results
@author: <NAME>
"""
#from scipy import io
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
# import module
#from joblib import Parallel, delayed
from sklearn.externals.joblib import Parallel, delayed
#from lc_write_read_h5py import write_h5py
from lc_read_write_Mat import write_mat
#from read_write_Mat_LC import write_mat
import time,os
import numpy as np
import lc_svc_rfe_cv as lsvc
# def
def permutation(X,y,k,N_perm,fileName):
# instantiating object
model=lsvc.svc_rfe_cv(permutation=1,num_jobs=1)#
#
start_time=time.clock()
#
Parallel(n_jobs=10,backend='threading')\
(delayed(run_svc)(X,y,k,n_perm,model,fileName)\
for n_perm in np.arange(N_perm))
#
end_time=time.clock()
print('running time is: {:.1f} second'.format(end_time-start_time))
def run_svc(X,y,k,n_perm,model,fileName):
print('we have already completed {} permutation'.format(n_perm))
y_rand=np.random.permutation(y)
predict,dec,y_sorted,weight=model.main_svc_rfe_cv(X,y_rand,k)
# # write h5py
# write_h5py(fileName,'perm'+str(n_perm),['predict','dec','y_sorted','weight'],\
# [predict,dec,y_sorted,weight])
# write mat
write_mat(os.path.join(fileName,str(n_perm)),\
dataset_name=['predict','dec','y_sorted','weight'],\
dataset=[predict,dec,y_sorted,weight])
#
if __name__=='__main__':
print('=====running======')
permutation(X,y,k=5,N_perm=20,fileName=r'D:\myCodes\LC_MVPA\Python\MVPA_Python\perm') |
dongmengshi/easylearn | eslearn/machine_learning/test/gcn_test.py | <gh_stars>0
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.data import Data
class CGConv(MessagePassing):
r"""The crystal graph convolutional operator from the
`"Crystal Graph Convolutional Neural Networks for an
Accurate and Interpretable Prediction of Material Properties"
<https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301>`_
paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{x}_i + \sum_{j \in \mathcal{N}(i)}
\sigma \left( \mathbf{z}_{i,j} \mathbf{W}_f + \mathbf{b}_f \right)
\odot g \left( \mathbf{z}_{i,j} \mathbf{W}_s + \mathbf{b}_s \right)
where :math:`\mathbf{z}_{i,j} = [ \mathbf{x}_i, \mathbf{x}_j,
\mathbf{e}_{i,j} ]` denotes the concatenation of central node features,
neighboring node features and edge features.
In addition, :math:`\sigma` and :math:`g` denote the sigmoid and softplus
functions, respectively.
Args:
channels (int): Size of each input sample.
dim (int): Edge feature dimensionality.
aggr (string, optional): The aggregation operator to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, channels, dim, aggr='add', bias=True, **kwargs):
super(CGConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = channels
self.out_channels = channels
self.dim = dim
self.lin_f = Linear(2 * channels + dim, channels, bias=bias)
self.lin_s = Linear(2 * channels + dim, channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_f.reset_parameters()
self.lin_s.reset_parameters()
def forward(self, data):
""""""
x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
return self.propagate(edge_index, x=x, edge_attr=edge_attr)
def message(self, x_i, x_j, edge_attr):
z = torch.cat([x_i, x_j, edge_attr], dim=-1)
return self.lin_f(z).sigmoid() * F.softplus(self.lin_s(z))
def update(self, aggr_out, x):
return aggr_out + x
def __repr__(self):
return '{}({}, {}, dim={})'.format(self.__class__.__name__,
self.in_channels, self.out_channels,
self.dim)
if __name__ == "__main__":
# Generate Data
edge_index = torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)
x = torch.tensor([[-1], [0], [1]], dtype=torch.float)
y = torch.tensor([[-1], [1], [1]], dtype=torch.float)
edge_attr = torch.tensor([[1], [0], [0]], dtype=torch.float)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
# Training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = CGConv(1, 1).to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
model.train()
for epoch in range(20):
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
# Evaluation
model.eval()
_, pred = model(data).max(dim=1)
correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct / data.test_mask.sum().item()
print('Accuracy: {:.4f}'.format(acc)) |
dongmengshi/easylearn | eslearn/utils/lc_split_roi_radiomics.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 18:10:18 2019
@author: lenovo
"""
from lc_read_nii import save_nii
from lc_read_nii import read_sigleNii_LC
import nibabel as nib
import numpy as np
import os
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
from concurrent.futures import ThreadPoolExecutor
class SplitROI():
"""把融合的ROI分成多个单独的ROI,并且保存到不同的文件夹
Attr:
all_roi_path:所有融合ROI文件存放的文件夹路径
out_path:分成单个ROI文件后,保存到哪个路径(代码自动生成亚文件夹,来保存单ROI文件)
Return:
将单个ROI文件保存到相应的文件夹下
"""
def __init__(self, all_roi_path='', out_path=''):
"""所有输入"""
self.all_roi_path = all_roi_path
self.out_path = out_path
self.overcopy = True # if over copy
if self.all_roi_path == '':
print(f'input path not given')
self.my_exit()
if self.out_path == '':
print(f'output path not given')
self.my_exit()
def my_exit(self):
sys.exit(1)
def _read_roi_path(self):
file_name = os.listdir(self.all_roi_path)
all_file_path = [
os.path.join(
self.all_roi_path,
filename) for filename in file_name]
return all_file_path
def split_roi_for_all_subj(self, all_file_path):
n_subj = len(all_file_path)
# with ThreadPoolExecutor(1) as executor:
# print("Multiprocessing begin...\n")
for i, file_path in enumerate(all_file_path):
print("spliting {}/{} subject\n".format(i + 1, n_subj))
# self.split_roi_for_one_subj(i, n_subj, file_path)
self.split_roi_for_one_subj(file_path)
def read(self, nii_name):
nii_data, nii_object = read_sigleNii_LC(nii_name)
yield nii_data
def split_roi_for_one_subj(self, file_path):
"""load nii--split roi--save to """
# load nii
nii_name = file_path
nii_data, nii_object = read_sigleNii_LC(nii_name)
header = nii_object.header
affine = nii_object.affine
# split roi
uni_label = np.unique(nii_data)
uni_label = list(set(uni_label) - set([0])) # 去掉0背景
subjname = os.path.basename(file_path).split('.')[0]
# split and save to nii
for label in uni_label:
# creat folder
save_folder_name = os.path.join(self.out_path, subjname, 'ROI_' + str(label))
if not os.path.exists(save_folder_name):
os.makedirs(save_folder_name)
save_file_name = os.path.join(
save_folder_name, os.path.basename(nii_name))
if os.path.exists(save_file_name):
print('{} exist!\n'.format(save_file_name))
if self.overcopy:
print(f'overwrite!')
else:
continue
# split
roi_logic = np.array(nii_data == label, dtype=float)
roi = nii_data*roi_logic
# ndarry to nifti object
roi = nib.Nifti1Image(roi, affine=affine, header=header)
# save
save_nii(roi, save_file_name)
def run(self):
all_file_path = self._read_roi_path()
self.split_roi_for_all_subj(all_file_path)
if __name__ == "__main__":
splitroi = SplitROI(all_roi_path='', out_path=r'I:\Project_Lyph\Raw\Grouped_ROI_Nocontrast_v1')
splitroi.run() |
dongmengshi/easylearn | eslearn/machine_learning/regression/lc_permutation_enet.py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 7 11:18:41 2018
permutation test: Parallel elastic net regression
Note.:The program is divided into many blocks
so as to avoid interruption.
input:
fileName=fileName to save results
@author: <NAME>
"""
# import module
from joblib import Parallel, delayed
#from scipy import io
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
from lc_write_read_h5py import write_h5py
#from read_write_Mat_LC import write_mat
import time
import numpy as np
import lc_elasticNet as enet
# def
def permutation(X,y,N_perm,batchsize,fileName):
# instantiating object
model=enet.elasticNet_LC(permutation=1,num_jobs=1)#
blocks=int(np.ceil(N_perm/batchsize))
start_time=time.clock()
start=0
end=batchsize
for i in range(blocks):
print('completed {:.1f}%'.format((i*100)/blocks))
Parallel(n_jobs=8,backend='threading')\
(delayed(run_enet)(X,y,n_perm,model,fileName)\
for n_perm in np.arange(start,end))
start+=batchsize
end=min(end+batchsize,N_perm)
end_time=time.clock()
print('running time is: {:.1f} second'.format(end_time-start_time))
def run_enet(X,y,n_perm,model,fileName):
y_rand=np.random.permutation(y)
Predict,y_sorted,Coef,r=model.elasticNetCV_Outer(X,y_rand)#
# write h5py
write_h5py(fileName,'perm'+str(n_perm),['Predict','y_sorted','Coef','r'],\
[Predict,y_sorted,Coef,r])
# write mat
# write_mat(fileName='enet_test.mat',\
# dataset_name=['Predict'+str(n_perm),\
# 'y_sorted'+str(n_perm),\
# 'Coef'+str(n_perm),\
# 'r'+str(n_perm)],\
# dataset=[Predict,y_sorted,Coef,r])
if __name__=='__main__':
permutation(X,y,N_perm=5,batchsize=1,fileName='t4test') |
dongmengshi/easylearn | eslearn/utils/el_rename_by_replace.py | <reponame>dongmengshi/easylearn
import os
import re
def rename(directory, old_string, new_string):
"""Rename files
This function is used to rename files in a directory by replacing some strings in old nane with new strings.
For example, we want to rename "MVPA_2019_schizophrenia" to "Machine_Learning_2019_schizophrenia",
You just need to apply old strings of "MVPA" and new strings of "Machine_Learning" as well as directory containing old files.
NOTE: old_string and new_string must be standarded regular expression
"""
old_files_name = os.listdir(directory)
new_files_name = [re.sub(old_string, new_string, file) for file in old_files_name]
# print(old_files_name)
# print(new_files_name)
[os.rename(os.path.join(os.getcwd(), old_file_name), os.path.join(os.getcwd(), new_file_name)) for (old_file_name, new_file_name) in zip (old_files_name, new_files_name)]
if __name__ == "__main__":
rename("D:/Papers/DorctorDegree", r"Machine_Learning", "Machine_learning") |
dongmengshi/easylearn | eslearn/utils/tricks.py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 14:12:52 2018
@author: lenovo
"""
#import numpy as np
seq = [1, 2, 3, 5]
# 类似于matlab的@,定义一个函数
def f(x): return pow(x, 3)
f(2)
# lambda配合map来对一个列表遍历求值
myMap = map(f, seq)
print(list(myMap))
# lambda 配合filter函数
result = list(filter(lambda x: x >= 2, seq))
print(result)
|
dongmengshi/easylearn | eslearn/SSD_classification/Visulization/lc_visualizing_performances.py | # -*- coding: utf-8 -*-
"""
This script is used to perform post-hoc analysis and visualization:
the classification performance of subsets (only for Schizophrenia Spectrum: SZ and Schizophreniform).
Unless otherwise specified, all results are for Schizophrenia Spectrum.
"""
#%%
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
sys.path.append(r'D:\My_Codes\easylearn\eslearn\statistics')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.pyplot import MultipleLocator
import pickle
from lc_binomialtest import binomial_test
#%% Inputs
scale_550_file = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
scale_206_file = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
scale_206_drug_file = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_109_drug.xlsx'
classification_results_pooling_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_pooling.npy'
classification_results_results_leave_one_site_cv_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_leave_one_site_cv.npy'
classification_results_feu_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_unmedicated_and_firstepisode_550.npy'
is_plot = 1
is_savefig = 1
#%% Load and proprocess
scale_550 = pd.read_excel(scale_550_file)
scale_206 = pd.read_csv(scale_206_file)
scale_206_drug = pd.read_excel(scale_206_drug_file)
results_pooling = np.load(classification_results_pooling_file, allow_pickle=True)
results_leave_one_site_cv = np.load(classification_results_results_leave_one_site_cv_file, allow_pickle=True)
results_feu = np.load(classification_results_feu_file, allow_pickle=True)
results_special = results_pooling['special_result']
results_special = pd.DataFrame(results_special)
results_special.iloc[:, 0] = np.int32(results_special.iloc[:, 0])
scale_206['ID'] = scale_206['ID'].str.replace('NC','10')
scale_206['ID'] = scale_206['ID'].str.replace('SZ','20')
scale_206['ID'] = np.int32(scale_206['ID'])
scale_550['folder'] = np.int32(scale_550['folder'])
scale_206_drug['P0001'] = scale_206_drug['P0001'].str.replace('NC','10')
scale_206_drug['P0001'] = scale_206_drug['P0001'].str.replace('SZ','20')
scale_206_drug['P0001'] = np.int32(scale_206_drug['P0001'])
# Filter subjects that have .mat files
scale_550_selected = pd.merge(results_special, scale_550, left_on=0, right_on='folder', how='inner')
scale_206_selected = pd.merge(results_special, scale_206, left_on=0, right_on='ID', how='inner')
scale_206_selected = pd.merge(scale_206_selected, scale_206_drug, left_on=0, right_on='P0001', how='inner')
#%% ---------------------------------Calculate performance for Schizophrenia Spectrum subgroups-------------------------------
## Step 1: Dataset1
duration = 18 # Upper limit of first episode:
""" reference:
1. <NAME>, <NAME>, Schooler NR, et al. Comprehensive versus usual
community care for first-episode psychosis: 2-year outcomes from the NIMH
RAISE early treatment program. Am J Psychiatry. 2016;173(4):362-372. doi:10.1176/appi.ajp.2015.15050632.
2. Cognitive Impairment in Never-Medicated Individuals on the Schizophrenia Spectrum. doi:10.1001/jamapsychiatry.2020.0001"
"""
data_firstepisode_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['首发'] == 1) & (scale_550_selected['病程月'] <= duration) & (scale_550_selected['病程月'] >= 6)]
data_not_firstepisode_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & ((scale_550_selected['首发'] == 0) | (scale_550_selected['病程月'] > duration))] # Including the persistent patients
data_schizophreniform_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] < 6)]
data_shortdurationSZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] <= duration) & (scale_550_selected['病程月'] >= 6)]
data_longdurationSZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] > duration)]
onsetage_all_550 = scale_550_selected['Age_of_first_episode'][scale_550_selected['诊断']==3].dropna()
ind_young_onsetage_550 = onsetage_all_550.index[onsetage_all_550.values <= np.median(onsetage_all_550)]
ind_old_onsetage_550 = onsetage_all_550.index[onsetage_all_550.values > np.median(onsetage_all_550)]
data_young_onset_age_550 = scale_550_selected[scale_550_selected['诊断']==3].loc[ind_young_onsetage_550]
data_old_onset_age_550 = scale_550_selected[scale_550_selected['诊断']==3].loc[ind_old_onsetage_550]
data_medicated_SSD_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['用药'] == 1)]
data_unmedicated_SSD_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['用药'] == 0) ]
# Frist episode and nerver medicated
data_unmedicated_schizophreniform_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] < 6) &
(scale_550_selected['用药'] == 0)]
data_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] >= 6) &
(scale_550_selected['用药'] == 0)]
data_firstepisode_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['首发'] == 1) &
(scale_550_selected['病程月'] <= duration) &
(scale_550_selected['病程月'] >= 6) &
(scale_550_selected['用药'] == 0)]
data_chronic_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] > duration) &
(scale_550_selected['用药'] == 0)]
# data_unmedicated_SSD_550['folder'].to_csv(r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\feu_63.txt', index=False)
## Calculating Accuracy
acc_firstepisode_SZ_550 = np.sum(data_firstepisode_SZ_550[1]-data_firstepisode_SZ_550[3]==0)/len(data_firstepisode_SZ_550)
acc_not_firstepisode_SZ_550 = np.sum(data_not_firstepisode_SZ_550[1]-data_not_firstepisode_SZ_550[3]==0)/len(data_not_firstepisode_SZ_550)
acc_schizophreniform_550 = np.sum(data_schizophreniform_550[1]-data_schizophreniform_550[3]==0)/len(data_schizophreniform_550)
acc_shortduration_550 = np.sum(data_shortdurationSZ_550[1]-data_shortdurationSZ_550[3]==0)/len(data_shortdurationSZ_550)
acc_longduration_550 = np.sum(data_longdurationSZ_550[1]-data_longdurationSZ_550[3]==0)/len(data_longdurationSZ_550)
acc_young_onsetage_550 = np.sum(data_young_onset_age_550[1]-data_young_onset_age_550[3]==0)/len(data_young_onset_age_550)
acc_old_onsetage_550 = np.sum(data_old_onset_age_550[1]-data_old_onset_age_550[3]==0)/len(data_old_onset_age_550)
acc_medicated_SSD_550 = np.sum(data_medicated_SSD_550[1]-data_medicated_SSD_550[3]==0)/len(data_medicated_SSD_550)
acc_ummedicated_SSD_550 = np.sum(data_unmedicated_SSD_550[1]-data_unmedicated_SSD_550[3]==0)/len(data_unmedicated_SSD_550)
acc_unmedicated_schizophreniform_550 = np.sum(data_unmedicated_schizophreniform_550[1]-data_unmedicated_schizophreniform_550[3]==0) / len(data_unmedicated_schizophreniform_550)
acc_unmedicated_SZ_550 = np.sum(data_unmedicated_SZ_550[1]-data_unmedicated_SZ_550[3]==0) / len(data_unmedicated_SZ_550)
acc_firstepisode_unmedicated_SZ_550 = np.sum(data_firstepisode_unmedicated_SZ_550[1]-data_firstepisode_unmedicated_SZ_550[3]==0) / len(data_firstepisode_unmedicated_SZ_550)
acc_chronic_unmedicated_SZ_550 = np.sum(data_chronic_unmedicated_SZ_550[1]-data_chronic_unmedicated_SZ_550[3]==0) / len(data_chronic_unmedicated_SZ_550)
print(f'Accuracy of firstepisode in dataset550 = {acc_firstepisode_SZ_550}')
print(f'Accuracy of none-firstepisode in dataset550 = {acc_not_firstepisode_SZ_550}')
print(f'Accuracy of schizophreniform in dataset550 = {acc_schizophreniform_550}')
print(f'Accuracy of shortduration in dataset550 = {acc_shortduration_550}')
print(f'Accuracy of longduration in dataset550 = {acc_longduration_550}')
print(f'Accuracy of young onsetage of 550 = {acc_young_onsetage_550}')
print(f'Accuracy of old onsetage of 550 = {acc_old_onsetage_550}')
print(f'Accuracy of medicated SSD in dataset550 = {acc_medicated_SSD_550}')
print(f'Accuracy of ummedicated_SSD in dataset550 = {acc_ummedicated_SSD_550}')
print(f'Accuracy of firstepisode unmedicated SZ in dataset550 = {acc_firstepisode_unmedicated_SZ_550}')
print('-'*50)
# Step 2: Dataset 2
## Preprocessing
scale_206_selected['duration'] = [np.int32(duration) if duration != ' ' else 10000 for duration in scale_206_selected['duration']]
scale_206_selected['firstepisode'] = [np.int32(firstepisode) if firstepisode != ' ' else 10000 for firstepisode in scale_206_selected['firstepisode']]
scale_206_selected['CPZ_eq'] = [np.int32(duration) if duration != ' ' else 0 for duration in scale_206_selected['CPZ_eq']]
scale_206_selected['onsetage'] = [np.int32(duration) if duration != ' ' else 0 for duration in scale_206_selected['onsetage']]
## Filter subgroups
data_firstepisode_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['firstepisode'] == 1) & (scale_206_selected['duration'] <= duration)]
data_notfirstepisode_206 = scale_206_selected[(scale_206_selected['group']==1) & ((scale_206_selected['firstepisode'] == 0) | (scale_206_selected['duration'] > duration))]
data_shortduration_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['duration'] <= duration)]
data_longduration_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['duration'] > duration)]
onsetage = scale_206_selected['onsetage'][scale_206_selected['group']==1]
data_young_onsetage_206 = scale_206_selected[(scale_206_selected['group']==1) & (onsetage <= np.median(onsetage))]
data_old_onsetage_206 = scale_206_selected[(scale_206_selected['group']==1) & (onsetage > np.median(onsetage))]
CPZ_eq = scale_206_selected['CPZ_eq'][scale_206_selected['group']==1]
data_drugless_206 = scale_206_selected[(scale_206_selected['group']==1) & (CPZ_eq <= np.median(CPZ_eq))]
data_drugmore_206 = scale_206_selected[(scale_206_selected['group']==1) & (CPZ_eq > np.median(CPZ_eq))]
## Calculating acc
acc_firstepisode_206 = np.sum(data_firstepisode_206[1]-data_firstepisode_206[3]==0)/len(data_firstepisode_206)
acc_notfirstepisode_206 = np.sum(data_notfirstepisode_206[1]-data_notfirstepisode_206[3]==0)/len(data_notfirstepisode_206)
acc_shortduration_206 = np.sum(data_shortduration_206[1]-data_shortduration_206[3]==0)/len(data_shortduration_206)
acc_longduration_206 = np.sum(data_longduration_206[1]-data_longduration_206[3]==0)/len(data_longduration_206)
acc_young_onsetage_206 = np.sum(data_young_onsetage_206[1]-data_young_onsetage_206[3]==0)/len(data_young_onsetage_206)
acc_old_onsetage_206 = np.sum(data_old_onsetage_206[1]-data_old_onsetage_206[3]==0)/len(data_old_onsetage_206)
acc_drugless_206 = np.sum(data_drugless_206[1]-data_drugless_206[3]==0)/len(data_drugless_206)
acc_drugmore_206 = np.sum(data_drugmore_206[1]-data_drugmore_206[3]==0)/len(data_drugmore_206)
##
print(f'Accuracy of first episode of 206 = {acc_firstepisode_206}')
print(f'Accuracy of recurrent of 206 = {acc_notfirstepisode_206}')
print(f'Accuracy of shortduration of 206 = {acc_shortduration_206}')
print(f'Accuracy of longduration of 206 = {acc_longduration_206}')
print(f'Accuracy of young onsetage of 206 = {acc_young_onsetage_206}')
print(f'Accuracy of old onsetage of 206 = {acc_old_onsetage_206}')
print(f'Accuracy of drugless of 206 = {acc_drugless_206}')
print(f'Accuracy of drugmore of 206 = {acc_drugmore_206}')
#%% -------------------------------------Visualization-----------------------------------------------
if is_plot:
accuracy_pooling = results_pooling['accuracy']
sensitivity_pooling = results_pooling['sensitivity']
specificity_pooling = results_pooling['specificity']
AUC_pooling = results_pooling['AUC']
performances_pooling = [accuracy_pooling, sensitivity_pooling, specificity_pooling, AUC_pooling]
performances_pooling = pd.DataFrame(performances_pooling)
accuracy_leave_one_site_cv = results_leave_one_site_cv['accuracy']
sensitivity_leave_one_site_cv = results_leave_one_site_cv['sensitivity']
specificity_leave_one_site_cv = results_leave_one_site_cv['specificity']
AUC_leave_one_site_cv = results_leave_one_site_cv['AUC']
performances_leave_one_site_cv = [accuracy_leave_one_site_cv, sensitivity_leave_one_site_cv, specificity_leave_one_site_cv, AUC_leave_one_site_cv]
performances_leave_one_site_cv = pd.DataFrame(performances_leave_one_site_cv)
accuracy_feu = results_feu['accuracy']
sensitivity_feu = results_feu['sensitivity']
specificity_feu = results_feu['specificity']
AUC_feu = results_feu['AUC']
performances_feu = [accuracy_feu, sensitivity_feu, specificity_feu, AUC_feu]
performances_feu = pd.DataFrame(performances_feu)
# Save weights to .mat file for visulazation using MATLAB.
import scipy.io as io
weight_pooling_1d = np.mean(np.squeeze(np.array(results_pooling['coef'])), axis=0)
weight_leave_one_out_cv_1d = np.mean(np.squeeze(np.array(results_leave_one_site_cv['coef'])), axis=0)
weight_feu_1d = np.mean(np.squeeze(np.array(results_feu['coef'])), axis=0)
mask = np.triu(np.ones([246, 246]), 1) == 1
weight_pooling = np.zeros([246, 246])
weight_leave_one_out_cv = np.zeros([246, 246])
weight_feu = np.zeros([246, 246])
weight_pooling[mask] = weight_pooling_1d
weight_leave_one_out_cv[mask] = weight_leave_one_out_cv_1d
weight_pooling = weight_pooling + weight_pooling.T;
weight_leave_one_out_cv = weight_leave_one_out_cv + weight_leave_one_out_cv.T
weight_feu = weight_feu + weight_feu.T
io.savemat(r'D:\WorkStation_2018\SZ_classification\Figure\weights.mat',
{'weight_pooling': weight_pooling,
'weight_leave_one_out_cv': weight_leave_one_out_cv,
'weight_feu': weight_feu})
# Bar: performances in the whole Dataset.
import seaborn as sns
plt.figure(figsize=(20,20))
all_mean = np.concatenate([np.mean(performances_pooling.values,1), np.mean(performances_leave_one_site_cv.values,1), np.mean(performances_feu.values,1)])
error = np.concatenate([np.std(performances_pooling.values, 1), np.std(performances_leave_one_site_cv.values, 1), np.std(performances_feu.values, 1)])
plt.subplot(2, 1, 1)
color = ['darkturquoise'] * 4 + ['paleturquoise'] * 4 + ['lightblue'] * 4
plt.bar(np.arange(0,len(all_mean)), all_mean, yerr = error,
capsize=5, linewidth=2, color=color)
plt.tick_params(labelsize=20)
plt.xticks(np.arange(0,len(all_mean)), ['Accuracy', 'Sensitivity', 'Sensitivity', 'AUC'] * 3, fontsize=20, rotation=45)
plt.title('Classification performances', fontsize=25, fontweight='bold')
y_major_locator=MultipleLocator(0.1)
ax = plt.gca()
ax.yaxis.set_major_locator(y_major_locator)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.grid(axis='y')
plt.fill_between(np.linspace(-0.4,3.4), 0.95, 1.08, color='darkturquoise')
plt.fill_between(np.linspace(3.6, 7.4), 0.95, 1.08, color='paleturquoise')
plt.fill_between(np.linspace(7.6, 11.4), 0.95, 1.08, color='lightblue')
ax=plt.gca()
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
# Bar: Dataset 1
plt.subplot(2,1,2)
barcont_550 = [acc_firstepisode_SZ_550, acc_not_firstepisode_SZ_550,
acc_schizophreniform_550, acc_shortduration_550, acc_longduration_550,
acc_young_onsetage_550, acc_old_onsetage_550,
acc_medicated_SSD_550, acc_ummedicated_SSD_550, acc_unmedicated_schizophreniform_550,
acc_unmedicated_SZ_550, acc_firstepisode_unmedicated_SZ_550, acc_chronic_unmedicated_SZ_550]
label_550 = ['First episode SZ', 'Recurrent SZ', 'Schizophreniform', 'Short duration SZ', 'Long duration SZ',
'Young onset age SSD','Elder onset age SSD',
'Medicated SSD', 'Unmedicated SSD',
'Unmedicated schizophreniform', 'Unmedicated SZ', 'First episode unmedicated SZ', 'Recurrent unmedicated SZ']
samplesize_550 = [data_firstepisode_SZ_550.shape[0], data_not_firstepisode_SZ_550.shape[0],
data_schizophreniform_550.shape[0], data_shortdurationSZ_550.shape[0], data_longdurationSZ_550.shape[0],
data_young_onset_age_550.shape[0], data_old_onset_age_550.shape[0],
data_medicated_SSD_550.shape[0], data_unmedicated_SSD_550.shape[0], data_unmedicated_schizophreniform_550.shape[0],
data_unmedicated_SZ_550.shape[0], data_firstepisode_unmedicated_SZ_550.shape[0], data_chronic_unmedicated_SZ_550.shape[0]]
mean_550 = [0, 0, 0,
data_shortdurationSZ_550['病程月'].mean(), data_longdurationSZ_550['病程月'].mean(),
data_young_onset_age_550['Age_of_first_episode'].mean(), data_old_onset_age_550['Age_of_first_episode'].mean(),
0, 0, 0,
0, 0, 0]
std_550 = [0, 0, 0,
data_shortdurationSZ_550['病程月'].std(), data_longdurationSZ_550['病程月'].std(),
data_young_onset_age_550['Age_of_first_episode'].std(), data_old_onset_age_550['Age_of_first_episode'].std(),
0, 0, 0,
0, 0, 0]
# Bar: Dataset 2
barcont_206 = [acc_firstepisode_206, acc_notfirstepisode_206,
acc_shortduration_206, acc_longduration_206,
acc_young_onsetage_206, acc_old_onsetage_206,
acc_drugless_206, acc_drugmore_206]
label_206 = ['First episode SZ', 'Recurrent SZ', 'Short duration SZ', 'Long duration SZ',
'Young onset age SZ','Elder onset age SZ',
'Larger dosage SZ', 'Small dosage SZ']
samplesize_206 = [data_firstepisode_206.shape[0], data_notfirstepisode_206.shape[0],
data_shortduration_206.shape[0], data_longduration_206.shape[0],
data_young_onsetage_206.shape[0], data_old_onsetage_206.shape[0],
data_drugless_206.shape[0], data_drugmore_206.shape[0]]
mean_206 = [0, 0,
data_shortduration_206['duration'].mean(), data_longduration_206['duration'].mean(),
data_young_onsetage_206['onsetage'].mean(), data_old_onsetage_206['onsetage'].mean(),
data_drugless_206['CPZ_eq'].mean(), data_drugmore_206['CPZ_eq'].mean()]
std_206 = [0, 0,
data_shortduration_206['duration'].std(), data_longduration_206['duration'].std(),
data_young_onsetage_206['onsetage'].std(), data_old_onsetage_206['onsetage'].std(),
data_drugless_206['CPZ_eq'].std(), data_drugmore_206['CPZ_eq'].std()]
## Plot
barcont_all = barcont_206 + barcont_550
label_all = label_206 + label_550
mean_all = mean_206 + mean_550
std_all = std_206 + std_550
samplesize_all = samplesize_206 + samplesize_550
color_206 = ['lightblue' for i in range(len(label_206))]
color_550 = ['darkturquoise' for i in range(len(label_550))]
# h = plt.barh(np.arange(0, len(barcont_all)), barcont_all, color=color)
h206 = plt.barh(np.arange(0, len(barcont_206)), barcont_206, color=color_206)
h550 = plt.barh(np.arange(len(barcont_206), len(barcont_206) + len(barcont_550)), barcont_550, color=color_550)
plt.legend([h550, h206], ['Dataset 1', 'Dataset 2'], fontsize=20)
plt.tick_params(labelsize=20)
plt.yticks(np.arange(0,len(barcont_all)), label_all, fontsize=20, rotation=0)
plt.title('Sensitivity of each subgroup of SSD in dataset 1 and dateset 2', fontsize=25, fontweight='bold')
x_major_locator=MultipleLocator(0.1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_major_locator)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel('Sensitivity', fontsize=25)
plt.grid(axis='x')
xticks = ax.get_xticks()
yticks = ax.get_yticks()
for i, (y, x, n, m, s) in enumerate(zip(yticks, barcont_all, samplesize_all, mean_all, std_all)):
p, _, _, _ = binomial_test(n, np.int(n * x), 0.5, 0.5)
if m: plt.text(0.101, y-0.3, f'mean={m:.1f}({s:.1f})', fontsize=15)
plt.text(0.31, y-0.3, 'N = %.0f' % n, fontsize=15)
if np.isin(i, (20, 19, 18, 17, 16)):
plt.text(0.41, y-0.3, 'P = %.3f' % p, fontsize=16, color='k', fontweight='bold')
plt.text(x+0.01, y-0.3, '%.2f' % x,fontsize=16, color='k', fontweight='bold')
else:
plt.text(0.41, y-0.3, 'P = %.3f' % p, fontsize=15)
plt.text(x+0.01, y-0.3, '%.2f' % x,fontsize=15)
#%% Save to PDF format
if is_savefig & is_plot:
plt.tight_layout()
plt.subplots_adjust(left=0.25, wspace = 0.5, hspace = 0.5) # wspace 左右
pdf = PdfPages(r'D:\WorkStation_2018\SZ_classification\Figure\Classification_performances_all_cutoff' + str(duration) + '.pdf')
pdf.savefig()
pdf.close()
plt.show()
|
dongmengshi/easylearn | eslearn/developer/function_template.py | # -*- coding: utf-8 -*-
""" This module is used to perform dimension reduction
Created on Wed Jul 4 13:57:15 2018
@author: <NAME>
Email:<EMAIL>
GitHub account name: lichao312214129
Institution (company): Brain Function Research Section, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
License: MIT
"""
from sklearn.decomposition import PCA
def pca_apply(train_x, test_x, pca_n_component):
"""Fit pca from training data, then apply the pca model to test data
Parameters
----------
train_x : numpy.ndarray
features in the training dataset
test_x : numpy.ndarray
features in the test dataset
pca_n_component : float, range = (0, 1]
how many percentages of the cumulatively explained variance to be retained. This is used to select the top principal components.
Returns
------
train_x_reduced: numpy.ndarray
features in the training dataset after dimension reduction
test_x_reduced: numpy.ndarray
features in the test dataset after dimension reduction
"""
train_x_reduced, trained_pca = pca(train_x, pca_n_component)
test_x_reduced = trained_pca.transform(test_x)
return train_x_reduced, test_x_reduced, trained_pca
def pca(x, n_components):
"""Just training a pca model
Parameters
----------
x : numpy.ndarray
features in the training dataset
pca_n_component : float, range = (0, 1]
how many percentages of the cumulatively explained variance to be retained. This is used to select the top principal components.
return
------
x_reduced: numpy.ndarray
features in the training dataset after dimension reduction
"""
trained_pca = PCA(n_components=n_components)
reduced_x = trained_pca.fit_transform(x)
return reduced_x, trained_pca
# The following code is used to debug.
if __name__ == "__main__":
from sklearn import datasets
x, y = datasets.make_classification(n_samples=500, n_classes=3,
n_informative=50, n_redundant=3,
n_features=600, random_state=1)
x1, x2, _ = pca_apply(x, x, 0.9) |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_svc_rfe_cv_excelcsv.py | # -*- coding: utf-8 -*-
"""
For 胜男姐
数据输入格式为excel
svc
@author: <NAME>
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
import numpy as np
import pandas as pd
# import pickle
from sklearn.preprocessing import LabelEncoder
class SVCForDataFromExcel(object):
def __init__(sel,
#==============================================================
# 等号之间为所有输入
patients_path, # excel数据位置
col_name_of_label, # label所在列的项目名字
col_num_of_data, # 特征所在列的序号(第哪几列)
is_save_results, # 是否保存结果(True:保存;False:不保存)
save_path, # 结果保存在哪个地方
# Default parameters
kfold=10, # 几折交叉验证,此处为5折交叉验证
scale_method='MinMaxScaler',
pca_n_component=0.8, # 主成分降维,不降维设为1 # label项目名字
show_results=1, # 是否在屏幕打印结果
show_roc=0, # 是否在屏幕显示ROC曲线
_seed=100,
step=0.05
#==============================================================
):
sel.patients_path = patients_path
sel.col_name_of_label = col_name_of_label
sel.col_num_of_data = col_num_of_data
sel.is_save_results = is_save_results
sel.save_path = save_path
sel.kfold = kfold
sel.scale_method = scale_method
sel.pca_n_component = pca_n_component
sel.show_results = show_results
sel.show_roc = show_roc
sel._seed = _seed
sel.step = step
print("SVCForDataFromExcel initated!")
def _load_data(sel):
# According suffix to load data
# TODO: expanding to other data type
if os.path.basename(sel.patients_path).split('.')[-1] == 'xlsx' or \
os.path.basename(sel.patients_path).split('.')[-1] == 'xls':
data = pd.read_excel(sel.patients_path)
elif os.path.basename(sel.patients_path).split('.')[-1] == 'csv':
data = pd.read_csv(sel.patients_path, encoding='gbk',engine='c')
else:
print(f'Unspported data type!')
return
return data
def _prep_data(sel, data):
"""
Preprocessing data
"""
data = data.dropna()
# sel.data_all = sel.data_all.iloc[:-30,:]
label = data[sel.col_name_of_label].values
data = data.iloc[:, sel.col_num_of_data].values
# up-resample
le = LabelEncoder()
le.fit(label)
label = le.transform(label)
return data, label
def tr_te(sel, data, label):
"""
Training and test
"""
import lc_svc_rfe_cv_V3 as lsvc
svc = lsvc.SVCRfeCv(pca_n_component=sel.pca_n_component,
show_results=sel.show_results,
show_roc=sel.show_roc,
outer_k=sel.kfold,
scale_method=sel.scale_method,
step=sel.step,
_seed=sel._seed)
results = svc.svc_rfe_cv(data, label)
return results
def save(sel):
import time
now = time.strftime("%Y%m%d%H%M%S", time.localtime())
with open(os.path.join(sel.save_path, "".join(["results_", now, "_.pkl"])), "wb") as file:
pickle.dump(sel.results, file, True)
# # load pkl file
# with open("".join(["results_",now,"_.pkl"]),"rb") as file:
# results = pickle.load(file)
def main(sel):
data = sel._load_data()
data, label = sel._prep_data(data)
results = sel.tr_te(data, label)
if sel.is_save_results:
sel.save()
return results
if __name__ == "__main__":
sel = SVCForDataFromExcel(
patients_path=r'D:\workstation_b\宝宝\allROIvenous_information.csv', # excel数据位置
col_name_of_label=r"Lable(malignant-1,benign-0)", # label所在列的项目名字
col_num_of_data=np.arange(4, 400), # 特征所在列的序号(第哪几列)
is_save_results=0, # 是否保存结果(True:保存;False:不保存)
save_path=r"D:\workstation_b\宝宝" # 结果保存在哪个地方
)
results = sel.main()
a=results.__dict__
for keys in a:
print(f'{keys}:{a[keys]}')
#
# d0 = a[a['Lable(malignant-1,benign-0)']==0]
# d1 = a[a['Lable(malignant-1,benign-0)']==1]
# import matplotlib.pyplot as plt
# plt.hist(d0.iloc[:,20])
# plt.hist(d1.iloc[:,20])
# plt.show()
|
dongmengshi/easylearn | eslearn/visualization/lc_circleBarPlot2.py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 22:29:21 2018
@author: lenovo
"""
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
#df=np.abs(pd.DataFrame(np.random.randn(5,3),index=['age','sex','height','weigh','BMI'], columns=['SZ','BD','MDD'])*50)
df = pd.read_excel(r'D:\others\彦鸽姐\0630.xlsx')
df = df.set_index(df.columns[0])
df = df.iloc[:,[1,3,5]]
sca = 100
df = df * sca
# 统计数据维度
nCol=np.shape(df)[1]
nRow=np.shape(df)[0]
nameCol=df.columns
maxNum=np.max(df.values)
minNum=np.min(df.values)
nCircle=5 # 画几条等高线
# 图像基本配置
width = 2000
height = 2000
inner_radius = 100
outer_radius = 200
#配置各个扇形区域的颜色
#fanShapedColors = pd.DataFrame([['bisque']*3+['lightgreen']*4+['goldenrod']*2+
# ['skyblue']*6+['thistle']*5+['pink']*1]) # 配置颜色
#fanShapedColors =[fanShapedColors.iloc[0,x] for x in range(len(df))]
fanShapedColors = ['w']*len(df)
# 配置每个bar的颜色
barColors = ['green','red','peru'] # 配置颜色
# =============================================================================
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / nCircle
# 整体配置
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="black",
background_fill_color="#f0e1d2")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# annular wedges
angles = np.pi / 2 - big_angle / 2 - np.arange(0,nRow,1) * big_angle #计算角度
p.annular_wedge(0, 0, inner_radius, outer_radius,
-big_angle + angles, angles, color=fanShapedColors)
#
# small wedges
p.annular_wedge(0, 0, inner_radius, inner_radius+df.iloc[:,0],
-big_angle + angles + 1 * small_angle, -big_angle + angles + 1.5 * small_angle,
color=barColors[0])
p.annular_wedge(0, 0, inner_radius, inner_radius+df.iloc[:,1],
-big_angle + angles + 2* small_angle, -big_angle + angles + 2.5 * small_angle,
color=barColors[1])
p.annular_wedge(0, 0, inner_radius, inner_radius+df.iloc[:,2],
-big_angle + angles + 3 * small_angle, -big_angle + angles + 3.5 * small_angle,
color=barColors[2])
#
#p.annular_wedge(0, 0, inner_radius, inner_radius+df.iloc[:,3],
# -big_angle + angles + 4 * small_angle, -big_angle + angles + 4.5 * small_angle,
# color=barColors[3])
# =============================================================================
## 绘制等高线和刻度
radii = np.around(np.linspace(inner_radius,outer_radius,nCircle), decimals=1)
labels = radii / sca
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
p.text(0, radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="10pt", text_align="center", text_baseline="middle")
## 分割线
p.annular_wedge(0, 0, inner_radius, outer_radius + 10,
-big_angle + angles, -big_angle + angles, color="black")
### 细菌标签
xr = (50+radii[len(radii)-1]) * np.cos(np.array(-big_angle / 2 + angles))
yr = (50+radii[len(radii)-1]) * np.sin(np.array(-big_angle / 2 + angles))
label_angle = np.array(-big_angle / 2 + angles)
label_angle[label_angle < -np.pi / 2] += np.pi # easier to read labels on the left side
## 绘制各个细菌的名字
p.text(xr, yr, df.index, angle=label_angle,
text_font_size="12pt", text_align="center", text_baseline="middle")
#扇形区域颜色的legend
p.circle([-40, -40], [-370, -390], color=np.unique(list(fanShapedColors)), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in barColors],
text_font_size="7pt", text_align="left", text_baseline="middle")
# 分组的legend
p.rect([-30, -30, -30,-30], [20, 10, 0,-10], width=20, height=8,
color=list(barColors))
# 配置中间标签文字、文字大小、文字对齐方式
p.text([-15, -15, -15,-15], [20, 10, 0,-10], text=list(nameCol),
text_font_size="10pt", text_align="left", text_baseline="middle")
# ============================================================================
# show
output_file("circleBar.html", title="circleBar")
show(p) |
dongmengshi/easylearn | eslearn/machine_learning/regression/lc_elasticNet.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 16:31:20 2018
This is a class of enastic net regression
Refer to {"Individualized Prediction of Reading Comprehension
Ability Using Gray Matter Volume"}
{Elastic net with nesting cross-validationt alpha=lambda l1_ratio=lasso 惩罚系数}
Input:
X: features
y: responses (continuous variable)
alphas: corresponds to the lambda (pow(np.e,np.linspace(-6,5,20)))
l1_ratio: when= 1 elastic net is the lasso penalty (np.linspace(0.2,1,10))
n_jobs: number of CPUs to perform CV
Output:
predict=predict responses/dependent variable
y_sorted=sorted original responses
Coef=predict coef
r=Pearson's correlation coefficients between y_sorted and Predict
@author: <NAME>
Email:<EMAIL>
"""
# search path append
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
### import module
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
import sys,time
import numpy as np
import scipy.stats.stats as stats
# my module
from lc_splitX_accord_sorted_y import splitX_accord_sorted_y
import lc_pca as pca
import lc_scaler as scl
# sample data
n_sample,n_features =1000,5000
np.random.seed(0)
coef = np.random.random(n_features)
#coef = np.array([[.8],[.1],[0.2]])
coef[100:] = 0.0
X = np.random.random([n_sample, n_features])
np.random.randint(0,10)
y = np.dot(X, coef)+np.random.random([n_sample,])
#a=np.corrcoef(y[:,0],X[:,2])
#import lc_elasticNet as enet
#e=enet.elasticNet()
#Predict,y_sorted,Coef,r,p=e.elasticNetCV_Outer(X,y)
#scatter_LC(Predict,y_sorted)
#### class and def
# class
class elasticNet():
# initial parameters
def __init__(self,k=3,\
alphas=pow(np.e,np.linspace(-6,5,20)),\
l1_ratio=np.linspace(0.2,1,10),\
num_jobs=10,\
scale_method='StandardScaler',\
pca_n_component=0.9,\
permutation=0):
self.k=k
self.alphas=alphas
self.l1_ratio=l1_ratio
self.num_jobs=num_jobs
self.scale_method=scale_method
self.pca_n_component=pca_n_component
self.permutation=permutation
# print(self.alphas)
# main function
def elasticNetCV_Outer(self,X,y):
#为了让sklearn处理,label不能是2D
y=np.reshape(np.array(y),len(y))
# pre-allocating
n_samples,n_features=X.shape
Predict=np.array([])
y_sorted=np.array([])
Coef=np.empty([n_features,1])
#
if not self.permutation:start_time=time.clock()
# obtian split index
ind_orig=splitX_accord_sorted_y(y,self.k)
ind_orig=np.array(ind_orig)
for i in range(self.k):
if not self.permutation:
print('{}/{}'.format(i,self.k))
# 1 split
# X
test_X=X[ind_orig[i],:]
train_X=X
train_X=np.delete(train_X,[ind_orig[i]],axis=0)
# y
test_y=y[ind_orig[i]]
train_y=y
train_y=np.delete(train_y,[ind_orig[i]],axis=0)
## 2 scale(optional)
train_X,model=scl.scaler(train_X,self.scale_method)
test_X=model.transform(test_X)
# ## 3 reducing dimension(optional)
if self.pca_n_component!=0:
# s=time.time()
train_X,trained_pca=pca.pca(train_X,self.pca_n_component)
# e1=time.time()
test_X=trained_pca.transform(test_X)
# e2=time.time()
# print('pca time is {} + {} and comp num is {}'.format(s-e1,s-e2,test_X.shape[1]))
## 4 parameter optimization
# s=time.time()
(optimized_alpha,optimized_l1_ratio,_,_)=\
self.elasticNetCV_Inner(train_X,train_y,\
self.alphas,\
self.l1_ratio,\
self.num_jobs)
# e=time.time()
# print('optimized time is {}'.format(s-e))
## 4 train
# s=time.time()
enet=self.elasticNet_OneFold(train_X,train_y,\
optimized_alpha,optimized_l1_ratio)
# e=time.time()
# print('train time is {}'.format(s-e))
# 5 test
predict=enet.predict(test_X)
Predict=np.append(Predict,predict)
coef=enet.coef_
coef=coef.reshape(len(coef),1)
if self.pca_n_component!=0:
coef=coef.reshape(1,len(coef))
coef=trained_pca.inverse_transform(coef)
coef=coef.reshape(coef.size,1)
Coef=np.hstack([Coef,coef])
y_sorted=np.append(y_sorted,test_y)
# 6 iteration/repeat
if not self.permutation:
end_time=time.clock()
print('running time is {:.1f} second'.format(end_time-start_time))
r,p=stats.pearsonr(Predict,y_sorted)
if not self.permutation:
print('pearson\'s correlation coefficient r={:.3},p={}'.format(r,p))
Coef=np.delete(Coef,0,axis=1)
return Predict,y_sorted,Coef,r,p
###
def elasticNetCV_Inner(self,X,y,alphas,l1_ratio,num_jobs):
enet = ElasticNetCV(cv=5,\
random_state=0,\
alphas=alphas,\
l1_ratio=l1_ratio,\
n_jobs=num_jobs)
enet.fit(X, y)
# intercept=enet.intercept_
optimized_alpha=enet.alpha_
optimized_l1_ratio=enet.l1_ratio_
optimized_coef=enet.coef_
predict=enet.predict(X)
# mean_mse=enet.mse_path_
return optimized_alpha,optimized_l1_ratio,optimized_coef,predict
###
def elasticNet_OneFold(self,X,y,alpha,l1_ratio):
enet = ElasticNet(random_state=0,alpha=alpha,l1_ratio=l1_ratio)
enet.fit(X, y)
return enet |
dongmengshi/easylearn | eslearn/utils/coveredPercentage.py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 19:10:02 2018
@author: lenovo
"""
from lc_read_nii import read_sigleNii_LC
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
img_path_3d = r'D:\其他\陈逸凡\aLL_mask3D.nii'
img_path_alff = r'D:\其他\陈逸凡\all_maskALFF.nii'
data_3d = read_sigleNii_LC(img_path_3d)
data_alff = read_sigleNii_LC(img_path_alff)
|
dongmengshi/easylearn | eslearn/statistical analysis/lc_chisqure.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 15:31:57 2018
构成比的卡方检验
Chi-square test of independence of variables in a contingency table.
@author: lenovo
"""
from scipy.stats import chi2_contingency
from scipy.stats import chi2
import numpy as np
def lc_chi2(data):
# data=np.array([[10,20],[10,30],[20,400]])
results = chi2_contingency(data)
chi2value = results[0]
pvalue = results[1]
return (chi2value, pvalue)
def lc_chisqure(obs, tt):
"""
obs: observed frequence
tt: total number of each group
NOTE. Make sure the number are np.array
The results is in line with SPSS
"""
tt = np.array(tt)
obs1 = obs
obs1 = np.array(obs1)
obs2 = tt - obs1
obs_all = np.vstack([obs1, obs2]).T
n_row = np.shape(obs_all)[0]
n_col = np.shape(obs_all)[1]
df = (n_row - 1) * (n_col) / 2 # free degree
frq1 = np.sum(obs1) / np.sum(tt)
frq2 = np.sum(obs2) / np.sum(tt)
f_exp1 = tt * frq1
f_exp2 = tt * frq2
f_exp = np.vstack([f_exp1, f_exp2]).T
chisqurevalue = np.sum(((obs_all - f_exp)**2) / f_exp)
p = (1 - chi2.cdf(chisqurevalue, df=df))
return chisqurevalue, p
if __name__ == "__main__":
tt = [120, 81 - 20]
obs = [31, 67 - 20]
print(lc_chisqure(obs, tt))
|
dongmengshi/easylearn | eslearn/utils/lc_autopep8.py | # -*- coding: utf-8 -*-
"""
Created on Tue May 7 21:14:51 2019
@author: lenovo
"""
import subprocess
import os
import numpy as np
def my_autopep8(py_name):
cmd = "autopep8 --in-place --aggressive --aggressive" + " " + py_name
print('converting {}...'.format(py_name))
state = subprocess.call(cmd, shell=True)
if not state:
print("Succeed!")
else:
print("Failed!")
def my_autopep8_folder(folder):
"""
autopep8 for all .py file in the folder
"""
folder = r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python\Utils'
file_name = os.listdir(folder)
py_name = [filename for filename in file_name if '.py' in filename]
all_cmd = ["autopep8 --in-place --aggressive --aggressive" +
" " + pyname for pyname in py_name]
num_py = np.arange(1, len(py_name) + 1)
len_py = len(py_name)
for i, cmd, pyname in zip(num_py, all_cmd, py_name):
print('converting {} ({}/{})...'.format(pyname, i, len_py))
state = subprocess.call(cmd, shell=True)
if not state:
print("Succeed!\n")
else:
print("Failed!\n")
else:
print("Done!")
if __name__ == '__main__':
# my_autopep8('lc_delect_sensitive_info.py')
my_autopep8_folder(r'F:\黎超\dynamicFC\Code\lc_rsfmri_tools_python\Utils')
|
dongmengshi/easylearn | eslearn/visualization/lc_ancova.py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 31 20:03:53 2018
ancova
@author: lenovo
"""
from __future__ import print_function
import statsmodels.api as sm
import statsmodels.formula.api as smf
star98 = sm.datasets.star98.load_pandas().data
formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + \
PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',
'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK',
'PERSPENK', 'PTRATIO', 'PCTAF']].copy()
endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW'))
del dta['NABOVE']
dta['SUCCESS'] = endog
mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod1.summary() |
dongmengshi/easylearn | eslearn/SSD_classification/Data_Inspection/lc_preprocess_for_COBRE.py | <filename>eslearn/SSD_classification/Data_Inspection/lc_preprocess_for_COBRE.py
"""
This script is used to transform the UCLA dataset into .npy format.
1.Transform the .mat files to one .npy file
2. Give labels to each subject, concatenate at the first column
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import numpy as np
import pandas as pd
import os
from eslearn.utils.lc_read_write_Mat import read_mat
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC_COBRE' # all mat files directory
scale = r'H:\Data\精神分裂症\COBRE\COBRE_phenotypic_data.csv' # whole scale path
n_node = 246 # number of nodes in the mat network
# Transform the .mat files to one .npy file
allmatname = os.listdir(matroot)
allmatpath = [os.path.join(matroot, matpath) for matpath in allmatname]
mask = np.triu(np.ones(n_node),1)==1
allmat = [read_mat(matpath)[mask].T for matpath in allmatpath]
allmat = np.array(allmat,dtype=np.float32)
# Give labels to each subject, concatenate at the first column
allmatname = pd.DataFrame(allmatname)
allsubjname = allmatname.iloc[:,0].str.findall(r'[1-9]\d*')
allsubjname = pd.DataFrame([name[0] for name in allsubjname])
scale_data = pd.read_csv(scale,sep=',',dtype='str')
print(scale_data)
diagnosis = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type']]
scale_data = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Control'] = 0
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Patient'] = 1
include_loc = diagnosis['Subject Type'] != 'Disenrolled'
diagnosis = diagnosis[include_loc.values]
allmat = allmat[include_loc.values]
allsubjname = allsubjname[include_loc.values]
diagnosis = np.array(np.int32(diagnosis))
allmat_plus_label = np.concatenate([diagnosis, allmat], axis=1)
print(allmat_plus_label.shape)
# np.save(r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\COBRE.npy',allmat_plus_label)
#%% Extract covariances: age and sex
cov = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type', 'Current Age', 'Gender']]
cov[['ID','Subject Type']] = diagnosis
cov['Gender'] = cov['Gender'] == 'Male'
cov = pd.DataFrame(np.int64(cov))
cov.columns = ['folder', 'diagnosis', 'age', 'sex']
cov.to_csv(r'D:\WorkStation_2018\SZ_classification\Scale\cov_COBRE.txt', index=False)
|
dongmengshi/easylearn | eslearn/feature_engineering/feature_preprocessing/el_filter_anova.py | from sklearn.feature_selection import f_classif
def dimReduction_filter(feature_train, label_train, feature_test, p_thrd = 0.05):
"""This function is used to Univariate Feature Selection: ANOVA
"""
f, p = f_classif(feature_train, label_train)
mask_selected = p < p_thrd
feature_train = feature_train[:,mask_selected]
feature_test = feature_test[:, mask_selected]
return feature_train, feature_test, mask_selected |
dongmengshi/easylearn | eslearn/utils/lc_read_h5py_value.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 10:03:45 2018
@author: lenovo
"""
# import
import h5py
import numpy as np
# def
def read_h5py_value(file_name='enet', dataset_name='Predict'):
f = h5py.File(file_name + ".hdf5", "a")
value = np.array([])
for g in f.keys():
d = f[g]
value = np.append(value, d[dataset_name])
# value=np.hstack((value,d[dataset_name]))
try:
col_num = d[dataset_name].shape[0]
# print(col_num)+
except BaseException:
col_num = value.size
# print(col_num)
value = value.reshape([value.size // col_num, col_num])
return value
f.close()
|
dongmengshi/easylearn | eslearn/machine_learning/regression/lc_nx.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 22:21:28 2019
@author: lenovo
"""
import matplotlib.pyplot as plt
import networkx as nx
G=nx.path_graph(400)
G.add_path([10,11,12])
nx.draw(G,with_labels=True,label_size=1000,node_size=1000,font_size=20)
plt.show()
for c in sorted(nx.connected_components(G),key=len,reverse=True):
print(c) #看看返回来的是什么?结果是{0,1,2,3}
print(type(c)) #类型是set
print(len(c)) #长度分别是4和3(因为reverse=True,降序排列)
largest_components=max(nx.connected_components(G),key=len) # 高效找出最大的联通成分,其实就是sorted里面的No.1
print(largest_components) #找出最大联通成分,返回是一个set{0,1,2,3}
print(len(largest_components)) #4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.