AshmithaIRRI commited on
Commit
5750b4f
·
verified ·
1 Parent(s): 0b3f5ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -19
app.py CHANGED
@@ -27,11 +27,9 @@ import matplotlib.pyplot as plt
27
  import seaborn as sns
28
  #------------------------------------------GRUModel-------------------------------------
29
 
 
30
  def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64, learning_rate=0.0001,
31
- l1_reg=0.001, l2_reg=0.001, dropout_rate=0.2, feature_selection=True, top_k=10):
32
-
33
-
34
-
35
 
36
  # Scale the input data
37
  scaler = MinMaxScaler()
@@ -42,22 +40,17 @@ def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64,
42
  target_scaler = MinMaxScaler()
43
  trainy_scaled = target_scaler.fit_transform(trainy.reshape(-1, 1))
44
 
45
- # Reshape inputs to (samples, timesteps, features)
46
- trainX = trainX_scaled.reshape((trainX.shape[0], 1, trainX.shape[1]))
47
- if testX is not None:
48
- testX = testX_scaled.reshape((testX.shape[0], 1, testX.shape[1]))
49
-
50
  # Model definition
51
  model = Sequential()
52
- model.add(GRU(512, input_shape=(trainX.shape[1], trainX.shape[2]), return_sequences=False,
53
- kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
54
-
55
-
56
- model.add(Dense(512, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
57
  model.add(BatchNormalization())
58
  model.add(Dropout(dropout_rate))
59
  model.add(LeakyReLU(alpha=0.1))
60
 
 
61
  model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
62
  model.add(BatchNormalization())
63
  model.add(Dropout(dropout_rate))
@@ -77,9 +70,11 @@ def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64,
77
  model.add(BatchNormalization())
78
  model.add(Dropout(dropout_rate))
79
  model.add(LeakyReLU(alpha=0.1))
80
-
81
- model.add(Dense(1, activation="relu")) # Output layer
82
 
 
 
 
 
83
  model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
84
 
85
  # Callbacks
@@ -89,11 +84,12 @@ def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64,
89
  ]
90
 
91
  # Train model
92
- history = model.fit(trainX, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1, callbacks=callbacks)
 
93
 
94
  # Predictions
95
- predicted_train = model.predict(trainX).flatten()
96
- predicted_test = model.predict(testX).flatten() if testX is not None else None
97
 
98
  # Inverse transform predictions
99
  predicted_train = target_scaler.inverse_transform(predicted_train.reshape(-1, 1)).flatten()
@@ -102,6 +98,81 @@ def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64,
102
 
103
  return predicted_train, predicted_test, history
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
 
107
 
 
27
  import seaborn as sns
28
  #------------------------------------------GRUModel-------------------------------------
29
 
30
+
31
  def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64, learning_rate=0.0001,
32
+ l1_reg=0.001, l2_reg=0.001, dropout_rate=0.2):
 
 
 
33
 
34
  # Scale the input data
35
  scaler = MinMaxScaler()
 
40
  target_scaler = MinMaxScaler()
41
  trainy_scaled = target_scaler.fit_transform(trainy.reshape(-1, 1))
42
 
 
 
 
 
 
43
  # Model definition
44
  model = Sequential()
45
+
46
+ # Input Layer
47
+ model.add(Dense(512, input_shape=(trainX.shape[1],), kernel_initializer='he_normal',
48
+ kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
 
49
  model.add(BatchNormalization())
50
  model.add(Dropout(dropout_rate))
51
  model.add(LeakyReLU(alpha=0.1))
52
 
53
+ # Hidden Layers
54
  model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
55
  model.add(BatchNormalization())
56
  model.add(Dropout(dropout_rate))
 
70
  model.add(BatchNormalization())
71
  model.add(Dropout(dropout_rate))
72
  model.add(LeakyReLU(alpha=0.1))
 
 
73
 
74
+ # Output Layer
75
+ model.add(Dense(1, activation="relu"))
76
+
77
+ # Compile Model
78
  model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
79
 
80
  # Callbacks
 
84
  ]
85
 
86
  # Train model
87
+ history = model.fit(trainX_scaled, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1,
88
+ verbose=1, callbacks=callbacks)
89
 
90
  # Predictions
91
+ predicted_train = model.predict(trainX_scaled).flatten()
92
+ predicted_test = model.predict(testX_scaled).flatten() if testX is not None else None
93
 
94
  # Inverse transform predictions
95
  predicted_train = target_scaler.inverse_transform(predicted_train.reshape(-1, 1)).flatten()
 
98
 
99
  return predicted_train, predicted_test, history
100
 
101
+ #def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64, learning_rate=0.0001,
102
+ # l1_reg=0.001, l2_reg=0.001, dropout_rate=0.2, feature_selection=True, top_k=10):
103
+
104
+
105
+
106
+
107
+ # Scale the input data
108
+ # scaler = MinMaxScaler()
109
+ #trainX_scaled = scaler.fit_transform(trainX)
110
+ # testX_scaled = scaler.transform(testX) if testX is not None else None
111
+
112
+ # Scale the target variable
113
+ #target_scaler = MinMaxScaler()
114
+ #trainy_scaled = target_scaler.fit_transform(trainy.reshape(-1, 1))
115
+
116
+ # Reshape inputs to (samples, timesteps, features)
117
+ #trainX = trainX_scaled.reshape((trainX.shape[0], 1, trainX.shape[1]))
118
+ #if testX is not None:
119
+ # testX = testX_scaled.reshape((testX.shape[0], 1, testX.shape[1]))
120
+
121
+ # Model definition
122
+ #model = Sequential()
123
+ #model.add(GRU(512, input_shape=(trainX.shape[1], trainX.shape[2]), return_sequences=False,
124
+ #kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
125
+
126
+
127
+ #model.add(Dense(512, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
128
+ #model.add(BatchNormalization())
129
+ #model.add(Dropout(dropout_rate))
130
+ #model.add(LeakyReLU(alpha=0.1))
131
+
132
+ #model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
133
+ #model.add(BatchNormalization())
134
+ #model.add(Dropout(dropout_rate))
135
+ #model.add(LeakyReLU(alpha=0.1))
136
+
137
+ #model.add(Dense(128, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
138
+ #model.add(BatchNormalization())
139
+ #model.add(Dropout(dropout_rate))
140
+ #model.add(LeakyReLU(alpha=0.1))
141
+
142
+ #model.add(Dense(64, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
143
+ #model.add(BatchNormalization())
144
+ #model.add(Dropout(dropout_rate))
145
+ #model.add(LeakyReLU(alpha=0.1))
146
+
147
+ #model.add(Dense(32, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
148
+ #model.add(BatchNormalization())
149
+ #model.add(Dropout(dropout_rate))
150
+ #model.add(LeakyReLU(alpha=0.1))
151
+
152
+ #model.add(Dense(1, activation="relu")) # Output layer
153
+
154
+ #model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
155
+
156
+ # Callbacks
157
+ #callbacks = [
158
+ # ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=1, factor=0.5, min_lr=1e-6),
159
+ # EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
160
+ #]
161
+
162
+ # Train model
163
+ #history = model.fit(trainX, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1, callbacks=callbacks)
164
+
165
+ # Predictions
166
+ #predicted_train = model.predict(trainX).flatten()
167
+ # predicted_test = model.predict(testX).flatten() if testX is not None else None
168
+
169
+ # Inverse transform predictions
170
+ # predicted_train = target_scaler.inverse_transform(predicted_train.reshape(-1, 1)).flatten()
171
+ # if predicted_test is not None:
172
+ # predicted_test = target_scaler.inverse_transform(predicted_test.reshape(-1, 1)).flatten()
173
+
174
+ #return predicted_train, predicted_test, history
175
+
176
 
177
 
178