AshmithaIRRI commited on
Commit
47b46cd
·
verified ·
1 Parent(s): 34d4d0e

Upload 2 files

Browse files
Files changed (2) hide show
  1. deepmap_models_final.py +503 -0
  2. requirement1.txt +195 -0
deepmap_models_final.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Wed Jan 15 10:25:34 2025
4
+
5
+ @author: Ashmitha
6
+ """
7
+
8
+ #-----------------------------------------------------------Libraries----------------------------------------------------------------------------
9
+ import pandas as pd
10
+ import numpy as np
11
+ import gradio as gr
12
+ from sklearn.metrics import mean_squared_error,r2_score
13
+ from scipy.stats import pearsonr
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.model_selection import KFold
16
+ import tensorflow as tf
17
+ from tensorflow.keras.models import Sequential
18
+ from tensorflow.keras.layers import GRU,Dense,Dropout,BatchNormalization,LeakyReLU
19
+ from tensorflow.keras.optimizers import Adam
20
+ from tensorflow.keras import regularizers
21
+ from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
22
+ import os
23
+ from sklearn.preprocessing import MinMaxScaler
24
+ from keras.layers import Conv1D,MaxPooling1D,Dense,Flatten,Dropout,LeakyReLU
25
+ from keras.callbacks import ReduceLROnPlateau,EarlyStopping
26
+ from sklearn.ensemble import RandomForestRegressor
27
+ from xgboost import XGBRegressor
28
+ import io
29
+ from sklearn.feature_selection import SelectFromModel
30
+ import tempfile
31
+ import pyinstaller
32
+
33
+ #--------------------------------Random Forest for Feature selection-------------------------------------------
34
+ def RandomForestFeatureSelection(trainX, trainy,num_features=60):
35
+ rf=RandomForestRegressor(n_estimators=1000,random_state=50)
36
+ rf.fit(trainX,trainy)
37
+ importances=rf.feature_importances_
38
+ indices=np.argsort(importances)[-num_features:]
39
+ return indices
40
+ #------------------------------------------------------------------GRU model--------------------------------------------------
41
+ def GRUModel(trainX,trainy,testX,testy,epochs=1000,batch_size=64,learning_rate=0.0001,l1_reg=0.001,l2_reg=0.001,dropout_rate=0.2,feature_selection=True):
42
+ if feature_selection:
43
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
44
+ rf.fit(trainX,trainy)
45
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
46
+ trainX=selector.transform(trainX)
47
+ if testX is not None:
48
+ testX=selector.transform(testX)
49
+ print(f"Selected {trainX.shape[1]} features based on feature importance")
50
+ scaler=MinMaxScaler()
51
+ trainX_scaled=scaler.fit_transform(trainX)
52
+ if testX is not None:
53
+ testX_scaled=scaler.transform(testX)
54
+ target_scaler=MinMaxScaler()
55
+ trainy_scaled=target_scaler.fit_transform(trainy.reshape(-1,1))
56
+ trainX=trainX_scaled.reshape((trainX.shape[0],1,trainX.shape[1]))
57
+ if testX is not None:
58
+ testX=testX_scaled.reshape((testX.shape[0],1,testX.shape[1]))
59
+ model=Sequential()
60
+ model.add(GRU(512, input_shape=(trainX.shape[1],trainX.shape[2]), return_sequences=False,kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
61
+ model.add(Dense(256,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
62
+ model.add(BatchNormalization())
63
+ model.add(Dropout(dropout_rate))
64
+ model.add(LeakyReLU(alpha=0.1))
65
+
66
+ model.add(Dense(128,kernel_initializer="he_normal",kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
67
+ model.add(BatchNormalization())
68
+ model.add(Dropout(dropout_rate))
69
+ model.add(LeakyReLU(alpha=0.1))
70
+
71
+ model.add(Dense(64,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
72
+ model.add(BatchNormalization())
73
+ model.add(Dropout(dropout_rate))
74
+ model.add(LeakyReLU(alpha=0.1))
75
+
76
+ model.add(Dense(32,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
77
+ model.add(BatchNormalization())
78
+ model.add(Dropout(dropout_rate))
79
+ model.add(LeakyReLU(alpha=0.1))
80
+
81
+ model.add(Dense(1,activation="relu"))
82
+ model.compile(loss="mse",optimizer=Adam(learning_rate=learning_rate),metrics=["mse"])
83
+ learning_rate_reduction=ReduceLROnPlateau(monitor="val_loss",patience=10,verbose=1,factor=0.5,min_lr=1e-6)
84
+ early_stopping=EarlyStopping(monitor='val_loss',verbose=1,restore_best_weights=True,patience=10)
85
+ history = model.fit(trainX, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
86
+ callbacks=[learning_rate_reduction, early_stopping])
87
+ predicted_train=model.predict(trainX)
88
+ predicted_test=model.predict(testX) if testX is not None else None
89
+ predicted_train=model.predict(trainX)
90
+ predicted_test=model.predict(testX) if testX is not None else None
91
+ predicted_train=predicted_train.flatten()
92
+ if predicted_test is not None:
93
+ predicted_test =predicted_test.flatten()
94
+ else:
95
+ predicted_test=np.zeros_like(predicted_train)
96
+ predicted_train=target_scaler.inverse_transform(predicted_train.reshape(-1,1)).flatten()
97
+ if predicted_test is not None:
98
+ predicted_test=target_scaler.inverse_transform(predicted_test.reshape(-1,1).flatten())
99
+ return predicted_train.predicted_test,history
100
+ #----------------------------------------------------CNN-----------------------------------------------
101
+ def CNNModel(trainX,trainy,testX,testy,epochs=1000,batch_size=64,learning_rate=0.0001,l1_reg=0.0001,l2_reg=0.0001,dropout_rate=0.3,feature_selection=True):
102
+ if feature_selection:
103
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
104
+ rf.fit(trainX,trainy)
105
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
106
+ trainX=selector.transform(trainX)
107
+ if testX is not None:
108
+ testX=selector.transform(testX)
109
+ print(f"Selected {trainX.shape[1]} feature based on the importance feature")
110
+ scaler=MinMaxScaler()
111
+ trainX_scaled=scaler.fit.transform(trainX)
112
+ if testX is not None:
113
+ testX_scaled=scaler.transfom(testX)
114
+ trainX=trainX_scaled.reshape((trainX.shape[0], trainX.shape[1],1))
115
+ if testX is not None:
116
+ testX = testX_scaled.reshape((testX.shape[0]),testX.shape[1],1)
117
+ model=Sequential()
118
+ model.add(Conv1D(512, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
119
+ model.add(MaxPooling1D(pool_size=2))
120
+ model.add(Dropout(dropout_rate))
121
+
122
+ model.add(Conv1D(256, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
123
+ model.add(MaxPooling1D(pool_size=2))
124
+ model.add(Dropout(dropout_rate))
125
+
126
+ model.add(Conv1D(128, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
127
+ model.add(MaxPooling1D(pool_size=2))
128
+ model.add(Dropout(dropout_rate))
129
+
130
+ model.add(Flatten())
131
+ model.add(Dense(64, kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
132
+ model.add(LeakyReLU(alpha=0.1))
133
+ model.add(Dropout(dropout_rate))
134
+
135
+ model.add(Dense(1, activation='linear'))
136
+
137
+
138
+ model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
139
+
140
+
141
+ learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1, factor=0.5, min_lr=1e-6)
142
+ early_stopping = EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
143
+
144
+
145
+ history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
146
+ callbacks=[learning_rate_reduction, early_stopping])
147
+
148
+ predicted_train = model.predict(trainX).flatten()
149
+ predicted_test = model.predict(testX).flatten() if testX is not None else None
150
+
151
+ return predicted_train, predicted_test, history
152
+ #-------------------------------------------------------------------RFModel---------------------------------------------------------
153
+
154
+ def RFModel(trainX, trainy, testX, testy, n_estimators=100, max_depth=None,feature_selection=True):
155
+ if feature_selection:
156
+ rf=RandomForestRegressor(n_estimators=100, random_state=42)
157
+ rf.fit(trainX, trainy)
158
+ selector=SelectFromModel(rf, threshold="mean", prefit=True)
159
+ trainX=selector.transform(trainX)
160
+ if testX is not None:
161
+ testX=selector.transform(testX)
162
+ print(f"Selected {trainX.shape[1]} feature based on the feature selection")
163
+
164
+
165
+
166
+
167
+
168
+ scaler = MinMaxScaler()
169
+ trainX_scaled = scaler.fit_transform(trainX)
170
+ if testX is not None:
171
+ testX_scaled = scaler.transform(testX)
172
+
173
+
174
+ rf_model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
175
+ history=rf_model.fit(trainX_scaled, trainy)
176
+
177
+
178
+
179
+ predicted_train = rf_model.predict(trainX_scaled)
180
+ predicted_test = rf_model.predict(testX_scaled) if testX is not None else None
181
+
182
+ return predicted_train, predicted_test,history
183
+ #------------------------------------------------------------------------------XGboost---------------------------------------------------------------
184
+ def XGBoostModel(trainX, trainy, testX, testy,learning_rate,min_child_weight,feature_selection=True, n_estimators=100, max_depth=None):
185
+ if feature_selection:
186
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
187
+ rf.fit(trainX,trainy)
188
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
189
+ trainX=selector.transform(trainX)
190
+ if testX is not None:
191
+ testX=selector.transform(testX)
192
+ print(f"Selected {trainX.shape[1]} features based on feature importance")
193
+
194
+
195
+
196
+
197
+ scaler = MinMaxScaler()
198
+ trainX_scaled = scaler.fit_transform(trainX)
199
+ if testX is not None:
200
+ testX_scaled = scaler.transform(testX)
201
+
202
+
203
+ xgb_model=XGBRegressor(objective="reg:squarederror",random_state=42)
204
+ history=xgb_model.fit(trainX, trainy)
205
+ param_grid={
206
+ "learning_rate":0.01,
207
+ "max_depth" : 10,
208
+ "n_estimators": 100,
209
+ "min_child_weight": 5
210
+ }
211
+
212
+
213
+ # Predictions
214
+ predicted_train = xgb_model.predict(trainX_scaled)
215
+ predicted_test = xgb_model.predict(testX_scaled) if testX is not None else None
216
+
217
+
218
+ return predicted_train, predicted_test,history
219
+
220
+
221
+
222
+
223
+
224
+
225
+ #----------------------------------------reading file----------------------------------------------------------------------------------------
226
+
227
+
228
+
229
+
230
+
231
+
232
+ def read_csv_file(uploaded_file):
233
+ if uploaded_file is not None:
234
+ if hasattr(uploaded_file, 'data'):
235
+ return pd.read_csv(io.BytesIO(uploaded_file.data))
236
+ elif hasattr(uploaded_file, 'name'):
237
+ return pd.read_csv(uploaded_file.name)
238
+ return None
239
+
240
+
241
+ #-----------------------------------------------------------------calculate topsis score--------------------------------------------------------
242
+
243
+
244
+ def calculate_topsis_score(df):
245
+
246
+ metrics = df[['Train_MSE', 'Train_RMSE', 'Train_R2', 'Train_Corr']].dropna() # Ensure no NaN values
247
+ norm_metrics = metrics / np.sqrt((metrics ** 2).sum(axis=0))
248
+
249
+
250
+ ideal_best = pd.Series(index=norm_metrics.columns)
251
+ ideal_worst = pd.Series(index=norm_metrics.columns)
252
+
253
+
254
+ for col in ['Train_MSE', 'Train_RMSE']:
255
+ ideal_best[col] = norm_metrics[col].min()
256
+ ideal_worst[col] = norm_metrics[col].max()
257
+
258
+
259
+ for col in ['Train_R2', 'Train_Corr']:
260
+ ideal_best[col] = norm_metrics[col].max()
261
+ ideal_worst[col] = norm_metrics[col].min()
262
+
263
+
264
+ dist_to_best = np.sqrt(((norm_metrics - ideal_best) ** 2).sum(axis=1))
265
+ dist_to_worst = np.sqrt(((norm_metrics - ideal_worst) ** 2).sum(axis=1))
266
+
267
+
268
+ topsis_score = dist_to_worst / (dist_to_best + dist_to_worst)
269
+ df['TOPSIS_Score'] = np.nan
270
+ df.loc[metrics.index, 'TOPSIS_Score'] = topsis_score # Assign TOPSIS scores
271
+ return df
272
+
273
+ #--------------------------------------------------- Nested Cross validation---------------------------------------------------------------------------
274
+
275
+ def NestedKFoldCrossValidation(training_data, training_additive, testing_data, testing_additive,
276
+ training_dominance, testing_dominance, epochs,learning_rate,min_child_weight, batch_size=64,
277
+ outer_n_splits=2, inner_n_splits=2, output_file='cross_validation_results.csv',
278
+ predicted_phenotype_file='predicted_phenotype.csv', feature_selection=True):
279
+
280
+ if 'phenotypes' not in training_data.columns:
281
+ raise ValueError("Training data does not contain the 'phenotypes' column.")
282
+
283
+
284
+ training_additive = training_additive.iloc[:, 1:]
285
+ testing_additive = testing_additive.iloc[:, 1:]
286
+ training_dominance = training_dominance.iloc[:, 1:]
287
+ testing_dominance = testing_dominance.iloc[:, 1:]
288
+
289
+ # Merge training and testing data with additive and dominance components
290
+ training_data_merged = pd.concat([training_data, training_additive, training_dominance], axis=1)
291
+ testing_data_merged = pd.concat([testing_data, testing_additive, testing_dominance], axis=1)
292
+
293
+ phenotypic_info = training_data['phenotypes'].values
294
+ phenotypic_test_info = testing_data['phenotypes'].values if 'phenotypes' in testing_data.columns else None
295
+ sample_ids = testing_data.iloc[:, 0].values
296
+
297
+ training_genotypic_data_merged = training_data_merged.iloc[:, 2:].values
298
+ testing_genotypic_data_merged = testing_data_merged.iloc[:, 2:].values
299
+
300
+
301
+ if feature_selection:
302
+ rf = RandomForestRegressor(n_estimators=100, random_state=42)
303
+ rf.fit(training_genotypic_data_merged, phenotypic_info)
304
+ selector = SelectFromModel(rf, threshold="mean", prefit=True)
305
+ training_genotypic_data_merged = selector.transform(training_genotypic_data_merged)
306
+ testing_genotypic_data_merged = selector.transform(testing_genotypic_data_merged)
307
+ print(f"Selected {training_genotypic_data_merged.shape[1]} features based on importance.")
308
+
309
+
310
+ scaler = StandardScaler()
311
+ training_genotypic_data_merged = scaler.fit_transform(training_genotypic_data_merged)
312
+ testing_genotypic_data_merged = scaler.transform(testing_genotypic_data_merged)
313
+
314
+ outer_kf = KFold(n_splits=outer_n_splits)
315
+
316
+ results = []
317
+ all_predicted_phenotypes = []
318
+
319
+ def calculate_metrics(true_values, predicted_values):
320
+ mse = mean_squared_error(true_values, predicted_values)
321
+ rmse = np.sqrt(mse)
322
+ r2 = r2_score(true_values, predicted_values)
323
+ corr = pearsonr(true_values, predicted_values)[0]
324
+ return mse, rmse, r2, corr
325
+
326
+ models = [
327
+ ('GRUModel', GRUModel),
328
+ ('CNNModel', CNNModel),
329
+ ('RFModel', RFModel),
330
+ ('XGBoostModel', XGBoostModel)
331
+ ]
332
+
333
+ for outer_fold, (outer_train_index, outer_test_index) in enumerate(outer_kf.split(phenotypic_info), 1):
334
+ outer_trainX = training_genotypic_data_merged[outer_train_index]
335
+ outer_trainy = phenotypic_info[outer_train_index]
336
+
337
+ outer_testX = testing_genotypic_data_merged
338
+ outer_testy = phenotypic_test_info
339
+
340
+ for model_name, model_func in models:
341
+ print(f"Running model: {model_name} for fold {outer_fold}")
342
+ if model_name in ['GRUModel', 'CNNModel']:
343
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, epochs=epochs, batch_size=batch_size)
344
+ elif model_name in ['RFModel']:
345
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy)
346
+ else:
347
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy,learning_rate,min_child_weight)
348
+
349
+
350
+
351
+ mse_train, rmse_train, r2_train, corr_train = calculate_metrics(outer_trainy, predicted_train)
352
+ mse_test, rmse_test, r2_test, corr_test = calculate_metrics(outer_testy, predicted_test) if outer_testy is not None else (None, None, None, None)
353
+
354
+ results.append({
355
+ 'Model': model_name,
356
+ 'Fold': outer_fold,
357
+ 'Train_MSE': mse_train,
358
+ 'Train_RMSE': rmse_train,
359
+ 'Train_R2': r2_train,
360
+ 'Train_Corr': corr_train,
361
+ 'Test_MSE': mse_test,
362
+ 'Test_RMSE': rmse_test,
363
+ 'Test_R2': r2_test,
364
+ 'Test_Corr': corr_test
365
+ })
366
+
367
+ if predicted_test is not None:
368
+ predicted_test_df = pd.DataFrame({
369
+ 'Sample_ID': sample_ids,
370
+ 'Predicted_Phenotype': predicted_test,
371
+ 'Model': model_name
372
+ })
373
+ all_predicted_phenotypes.append(predicted_test_df)
374
+
375
+ results_df = pd.DataFrame(results)
376
+
377
+
378
+ avg_results_df = results_df.groupby('Model').agg({
379
+ 'Train_MSE': 'mean',
380
+ 'Train_RMSE': 'mean',
381
+ 'Train_R2': 'mean',
382
+ 'Train_Corr': 'mean',
383
+ 'Test_MSE': 'mean',
384
+ 'Test_RMSE': 'mean',
385
+ 'Test_R2': 'mean',
386
+ 'Test_Corr': 'mean'
387
+ }).reset_index()
388
+
389
+
390
+ def calculate_topsis_score(df):
391
+
392
+ norm_df = (df.iloc[:, 1:] - df.iloc[:, 1:].min()) / (df.iloc[:, 1:].max() - df.iloc[:, 1:].min())
393
+
394
+
395
+ ideal_positive = norm_df.max(axis=0)
396
+ ideal_negative = norm_df.min(axis=0)
397
+
398
+
399
+ dist_positive = np.sqrt(((norm_df - ideal_positive) ** 2).sum(axis=1))
400
+ dist_negative = np.sqrt(((norm_df - ideal_negative) ** 2).sum(axis=1))
401
+
402
+
403
+ topsis_score = dist_negative / (dist_positive + dist_negative)
404
+
405
+
406
+ df['TOPSIS_Score'] = topsis_score
407
+
408
+ return df
409
+
410
+ avg_results_df = calculate_topsis_score(avg_results_df)
411
+
412
+
413
+ avg_results_df.to_csv(output_file, index=False)
414
+
415
+
416
+ if all_predicted_phenotypes:
417
+ predicted_all_df = pd.concat(all_predicted_phenotypes, axis=0, ignore_index=True)
418
+ predicted_all_df.to_csv(predicted_phenotype_file, index=False)
419
+
420
+ return avg_results_df, predicted_all_df if all_predicted_phenotypes else None
421
+
422
+ #--------------------------------------------------------------------Gradio interface---------------------------------------------------------------
423
+
424
+ def run_cross_validation(training_file, training_additive_file, testing_file, testing_additive_file,
425
+ training_dominance_file, testing_dominance_file,feature_selection,learning_rate,min_child_weight):
426
+
427
+
428
+ epochs = 1000
429
+ batch_size = 64
430
+ outer_n_splits = 2
431
+ inner_n_splits = 2
432
+ min_child_weight=5
433
+ learning_rate=0.001
434
+
435
+
436
+
437
+ training_data = pd.read_csv(training_file.name)
438
+ training_additive = pd.read_csv(training_additive_file.name)
439
+ testing_data = pd.read_csv(testing_file.name)
440
+ testing_additive = pd.read_csv(testing_additive_file.name)
441
+ training_dominance = pd.read_csv(training_dominance_file.name)
442
+ testing_dominance = pd.read_csv(testing_dominance_file.name)
443
+
444
+
445
+ results, predicted_phenotypes = NestedKFoldCrossValidation(
446
+ training_data=training_data,
447
+ training_additive=training_additive,
448
+ testing_data=testing_data,
449
+ testing_additive=testing_additive,
450
+ training_dominance=training_dominance,
451
+ testing_dominance=testing_dominance,
452
+ epochs=epochs,
453
+ batch_size=batch_size,
454
+ outer_n_splits=outer_n_splits,
455
+ inner_n_splits=inner_n_splits,
456
+ learning_rate=learning_rate,
457
+ min_child_weight=min_child_weight,
458
+ feature_selection=feature_selection
459
+ )
460
+
461
+
462
+ results_file = "cross_validation_results.csv"
463
+ predicted_file = "predicted_phenotype.csv"
464
+ results.to_csv(results_file, index=False)
465
+ predicted_phenotypes.to_csv(predicted_file, index=False)
466
+
467
+ return results_file, predicted_file
468
+
469
+ with gr.Blocks() as interface:
470
+ gr.Markdown("# DeepMap - An Integrated GUI for Genotype to Phenotype Prediction")
471
+
472
+ with gr.Row():
473
+ training_file = gr.File(label="Upload Training Data (CSV)")
474
+ training_additive_file = gr.File(label="Upload Training Additive Data (CSV)")
475
+ training_dominance_file = gr.File(label="Upload Training Dominance Data (CSV)")
476
+
477
+ with gr.Row():
478
+ testing_file = gr.File(label="Upload Testing Data (CSV)")
479
+ testing_additive_file = gr.File(label="Upload Testing Additive Data (CSV)")
480
+ testing_dominance_file = gr.File(label="Upload Testing Dominance Data (CSV)")
481
+
482
+ with gr.Row():
483
+ feature_selection = gr.Checkbox(label="Enable Feature Selection", value=True)
484
+
485
+ output1 = gr.File(label="Cross-Validation Results (CSV)")
486
+ output2 = gr.File(label="Predicted Phenotypes (CSV)")
487
+
488
+ submit_btn = gr.Button("Run DeepMap")
489
+ submit_btn.click(
490
+ run_cross_validation,
491
+ inputs=[
492
+ training_file, training_additive_file, testing_file,
493
+ testing_additive_file, training_dominance_file,testing_dominance_file,
494
+ feature_selection
495
+ ],
496
+ outputs=[output1, output2]
497
+ )
498
+
499
+
500
+ interface.launch()
501
+
502
+
503
+
requirement1.txt ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _tflow_select == 2.3.0
2
+ abseil-cpp == 20230802.0
3
+ absl-py == 2.1.0
4
+ accelerate == 1.0.0
5
+ aiofiles == 23.2.1
6
+ aiohappyeyeballs == 2.4.0
7
+ aiohttp == 3.10.5
8
+ aiosignal == 1.2.0
9
+ altgraph == 0.17.4
10
+ annotated-types == 0.7.0
11
+ anyio == 4.6.2.
12
+ astunparse == 1.6.3
13
+ async-timeout == 4.0.3
14
+ attrs == 23.1.0
15
+ be-great == 0.0.7
16
+ blas == 1.0
17
+ blinker == 1.6.2
18
+ bottleneck == 1.3.7
19
+ brotli-python == 1.0.9
20
+ bzip2 == 1.0.8
21
+ c-ares == 1.19.1
22
+ ca-certificates == 2024.9.24
23
+ cachetools == 5.3.3
24
+ catboost == 1.2.7
25
+ category-encoders == 2.6.4
26
+ certifi == 2024.8.30
27
+ cffi == 1.17.1
28
+ charset-normalizer == 3.4.1
29
+ click == 8.1.7
30
+ colorama == 0.4.6
31
+ contourpy == 1.3.0
32
+ cryptography == 43.0.0
33
+ cx-freeze == 7.2.8
34
+ cx-logging == 3.2.1
35
+ cycler == 0.12.1
36
+ datasets == 3.0.1
37
+ dill == 0.3.8
38
+ exceptiongroup == 1.2.2
39
+ fastapi == 0.115.4
40
+ ffmpy == 0.4.0
41
+ filelock == 3.16.1
42
+ flatbuffers == 2.0.0
43
+ fonttools == 4.54.1
44
+ frozenlist == 1.4.0
45
+ fsspec == 2024.6.1
46
+ gast == 0.4.0
47
+ giflib == 5.2.1
48
+ google-auth == 2.29.0
49
+ google-auth-oauthlib == 0.4.1
50
+ google-pasta == 0.2.0
51
+ gradio == 5.4.0
52
+ gradio-client == 1.4.2
53
+ grpc-cpp == 1.48.2
54
+ grpcio == 1.48.2
55
+ gtest == 1.14.0
56
+ h11 == 0.14.0
57
+ h5py == 3.11.0
58
+ hdf5 == 1.12.1
59
+ httpcore == 1.0.6
60
+ httpx == 0.27.2
61
+ huggingface-hub == 0.25.1
62
+ icc_rt == 2022.1.0
63
+ icu == 73.1
64
+ idna == 3.7
65
+ intel-openmp == 2023.1.0
66
+ jinja2 == 3.1.4
67
+ joblib == 1.4.2
68
+ jpeg == 9e
69
+ keras == 2.11.0
70
+ keras-preprocessing== 1.1.2
71
+ kiwisolver == 1.4.7
72
+ libclang == 18.1.1
73
+ libcurl == 8.9.1
74
+ libffi == 3.4.4
75
+ libpng == 1.6.39
76
+ libprotobuf == 3.20.3
77
+ libssh2 == 1.11.0
78
+ lief == 0.16.2
79
+ lightgbm == 4.5.0
80
+ markdown== 3.4.1
81
+ markdown-it-py == 3.0.0
82
+ markupsafe == 2.1.3
83
+ matplotlib == 3.9.2
84
+ mdurl == 0.1.2
85
+ mkl == 2023.1.0
86
+ mkl-service == 2.4.0
87
+ mkl_fft== 1.3.10
88
+ mkl_random == 1.2.7
89
+ ml-dtypes == 0.4.1
90
+ mpmath == 1.3.0
91
+ multidict == 6.0.4
92
+ multiprocess == 0.70.16
93
+ namex == 0.0.8
94
+ networkx == 3.3
95
+ numexpr == 2.8.7
96
+ numpy == 1.26.4
97
+ numpy-base == 1.26.4
98
+ oauthlib == 3.2.2
99
+ openssl == 3.0.15
100
+ opt_einsum == 3.3.0
101
+ optree == 0.13.0
102
+ orjson == 3.10.11
103
+ packaging == 24.1
104
+ pandas == 2.2.2
105
+ patsy == 0.5.6
106
+ pefile == 2023.2.7
107
+ pillow == 10.4.0
108
+ pip == 24.2
109
+ plotly == 5.24.1
110
+ protobuf == 3.19.6
111
+ psutil == 6.0.0
112
+ py2exe == 0.13.0.2
113
+ pyarrow == 17.0.0
114
+ pyasn1 == 0.4.8
115
+ pyasn1-modules == 0.2.8
116
+ pybind11-abi == 5
117
+ pycparser == 2.21
118
+ pydantic == 2.9.2
119
+ pydantic-core == 2.23.4
120
+ pydub == 0.25.1
121
+ pygments == 2.18.0
122
+ pyinstaller == 6.11.1
123
+ pyjwt == 2.8.0
124
+ pyopenssl == 24.2.1
125
+ pyparsing == 3.1.4
126
+ pysocks == 1.7.1
127
+ python == 3.10.15
128
+ python-dateutil == 2.9.0
129
+ python-flatbuffers == 24.3.25
130
+ python-graphviz == 0.20.3
131
+ python-multipart == 0.0.12
132
+ python-tzdata == 2023.3
133
+ pytz == 2024.1
134
+ pywin32-ctypes == 0.2.3
135
+ pyyaml == 6.0.2
136
+ re2 == 2022.04.01
137
+ regex == 2024.9.11
138
+ requests == 2.32.3
139
+ requests-oauthlib == 2.0.0
140
+ rich == 13.9.2
141
+ rsa == 4.7.2
142
+ ruff == 0.7.2
143
+ safehttpx == 0.1.1
144
+ safetensors == 0.4.5
145
+ scikit-learn == 1.5.2
146
+ scikit-learn-extra == 0.3.0
147
+ scipy == 1.13.1
148
+ seaborn == 0.13.2
149
+ semantic-version == 2.10.0
150
+ setuptools == 75.1.0
151
+ shellingham == 1.5.4
152
+ six == 1.16.0
153
+ snappy == 1.2.1
154
+ sniffio == 1.3.1
155
+ sqlite == 3.45.3
156
+ starlette == 0.41.2
157
+ statsmodels == 0.14.4
158
+ sympy == 1.13.3
159
+ tabgan == 2.2.1
160
+ tbb == 2021.8.0
161
+ tenacity == 9.0.0
162
+ tensorboard == 2.11.2
163
+ tensorboard-data-server== 0.6.0
164
+ tensorboard-plugin-wit== 1.8.1
165
+ tensorflow == 2.11.0
166
+ tensorflow-estimator == 2.11.0
167
+ tensorflow-intel == 2.11.0
168
+ tensorflow-io-gcs-filesystem== 0.31.0
169
+ termcolor == 2.1.0
170
+ tf-keras == 2.17.0
171
+ threadpoolctl == 3.5.0
172
+ tokenizers == 0.20.0
173
+ tomli == 2.2.1
174
+ tomlkit == 0.12.0
175
+ torch == 2.0.1
176
+ torchvision == 0.15.2
177
+ tqdm == 4.66.5
178
+ transformers == 4.45.2
179
+ typer == 0.12.5
180
+ typing_extensions == 4.11.0
181
+ tzdata == 2024b
182
+ urllib3 == 2.2.3
183
+ uvicorn == 0.32.0
184
+ vc == 14.40
185
+ vs2015_runtime == 14.40.33807
186
+ websockets == 12.0
187
+ werkzeug == 3.0.3
188
+ wheel == 0.44.0
189
+ win_inet_pton == 1.1.0
190
+ wrapt == 1.14.1
191
+ xgboost == 2.1.1
192
+ xxhash == 3.5.0
193
+ xz == 5.4.6
194
+ yarl == 1.11.0
195
+ zlib == 1.2.13