Amol Kaushik commited on
Commit
7ca3fe8
·
1 Parent(s): 0502784

updated the correct app.py

Browse files
Files changed (1) hide show
  1. app.py +219 -128
app.py CHANGED
@@ -1,60 +1,80 @@
1
  import gradio as gr
2
  import pandas as pd
3
- import numpy as np
4
  import pickle
5
  import os
6
 
7
- # Paths for HuggingFace deployment
8
- MODEL_PATH = "A2/models/champion_model_final_2.pkl"
9
- DATA_PATH = "A2/A2_dataset.csv"
 
10
 
11
  model = None
12
  FEATURE_NAMES = None
13
  MODEL_METRICS = None
14
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def load_champion_model():
17
  global model, FEATURE_NAMES, MODEL_METRICS
18
 
19
- possible_paths = [
20
- MODEL_PATH,
21
- "A2/models/champion_model_final_2.pkl",
22
- "../A2/models/champion_model_final_2.pkl",
23
- ]
24
-
25
- for path in possible_paths:
26
- if os.path.exists(path):
27
- print(f"Loading champion model from {path}")
28
- with open(path, "rb") as f:
29
- artifact = pickle.load(f)
30
-
31
- model = artifact["model"]
32
- FEATURE_NAMES = artifact["feature_columns"]
33
- MODEL_METRICS = artifact.get("test_metrics", {})
34
-
35
- print(f"model loaded successfully")
36
- print(f"Features: {len(FEATURE_NAMES)} columns")
37
- print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
38
- return True
39
-
40
- print("champion model not found")
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  return False
42
 
43
 
44
  load_champion_model()
 
45
 
46
 
47
- # prediction function
48
  def predict_score(*feature_values):
49
  if model is None:
50
- return "Error", "Model not loaded"
51
 
52
- # Convert inputs to dataframe with correct feature names
53
  features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
54
-
55
  raw_score = model.predict(features_df)[0]
56
-
57
- # score to valid range and change to %
58
  score = max(0, min(1, raw_score)) * 100
59
 
60
  if score >= 80:
@@ -66,11 +86,8 @@ def predict_score(*feature_values):
66
  else:
67
  interpretation = "Needs work, focus on proper form"
68
 
69
- # Create output
70
  r2 = MODEL_METRICS.get('r2', 'N/A')
71
  correlation = MODEL_METRICS.get('correlation', 'N/A')
72
-
73
- # Format metrics
74
  r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
75
  corr_str = f"{correlation:.4f}" if isinstance(correlation, (int, float)) else str(correlation)
76
 
@@ -90,45 +107,75 @@ def predict_score(*feature_values):
90
  return f"{score:.1f}%", interpretation, details
91
 
92
 
93
- # load example for tesitng
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  def load_example():
95
  if FEATURE_NAMES is None:
96
  return [0.5] * 35
97
 
98
  try:
99
- possible_paths = [
100
- "Datasets_all/A2_dataset_80.csv",
101
- "A2/A2_dataset.csv",
102
- "../Datasets_all/A2_dataset_80.csv",
103
- ]
104
-
105
- df = None
106
- for path in possible_paths:
107
- if os.path.exists(path):
108
- df = pd.read_csv(path)
109
- print(f"Loaded dataset from {path}")
110
- break
111
-
112
- # Fallback to GitHub raw URL if no local file found
113
- if df is None:
114
- url = "https://raw.githubusercontent.com/othmanreem/Data-intensive-systems/main/Datasets_all/A2_dataset_80.csv"
115
- print(f"Loading dataset from {url}")
116
- df = pd.read_csv(url)
117
-
118
- # Get a random row with only the features we need
119
  available_features = [f for f in FEATURE_NAMES if f in df.columns]
120
- print(f"Found {len(available_features)} matching features")
121
  sample = df[available_features].sample(1).values[0]
122
- # Convert to float list to ensure proper types for Gradio sliders
123
  return [float(x) for x in sample]
124
  except Exception as e:
125
  print(f"Error loading example: {e}")
126
- import traceback
127
- traceback.print_exc()
128
  return [0.5] * len(FEATURE_NAMES)
129
 
130
 
131
- # create gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  def create_interface():
133
  if FEATURE_NAMES is None:
134
  return gr.Interface(
@@ -138,19 +185,17 @@ def create_interface():
138
  title="Error: Model not loaded"
139
  )
140
 
141
- # Create input sliders for features
142
  inputs = []
143
  for name in FEATURE_NAMES:
144
- slider = gr.Slider(
145
- minimum=0,
146
- maximum=1,
147
- value=0.5,
148
- step=0.01,
149
- label=name.replace("_", " "),
150
- )
151
  inputs.append(slider)
152
 
153
- # Build the interface
 
 
 
 
 
154
  description = """
155
  ## Deep Squat Movement Assessment
156
 
@@ -166,78 +211,124 @@ def create_interface():
166
  - 0-39%: Needs improvement
167
  """
168
 
169
- # features into categories
 
 
 
 
 
 
 
 
 
 
170
  angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
171
  nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
172
  time_features = [n for n in FEATURE_NAMES if "Time" in n]
173
 
174
- # Get indices for each category
175
  angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
176
  nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
177
  time_indices = [FEATURE_NAMES.index(f) for f in time_features]
178
 
179
- # Create the main interface
 
 
 
 
 
 
 
180
  with gr.Blocks(title="Deep Squat Assessment") as demo:
181
  gr.Markdown("# Deep Squat Movement Assessment")
182
- gr.Markdown(description)
183
-
184
- with gr.Row():
185
- with gr.Column(scale=2):
186
- gr.Markdown("### Input Features")
187
- gr.Markdown(f"*{len(FEATURE_NAMES)} features loaded from champion model*")
188
- gr.Markdown("*Deviation values: 0 = perfect, 1 = maximum deviation*")
189
-
190
- with gr.Tabs():
191
- with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
192
- for idx in angle_indices:
193
- inputs[idx].render()
194
-
195
- with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
196
- for idx in nasm_indices:
197
- inputs[idx].render()
198
-
199
- with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
200
- for idx in time_indices:
201
- inputs[idx].render()
202
-
203
- with gr.Column(scale=1):
204
- gr.Markdown("### Results")
205
- score_output = gr.Textbox(label="Predicted Score")
206
- interp_output = gr.Textbox(label="Assessment")
207
- details_output = gr.Markdown(label="Details")
208
-
209
- with gr.Row():
210
- submit_btn = gr.Button("Submit", variant="primary")
211
- example_btn = gr.Button("Load Random Example")
212
- clear_btn = gr.Button("Clear")
213
-
214
- submit_btn.click(
215
- fn=predict_score,
216
- inputs=inputs,
217
- outputs=[score_output, interp_output, details_output],
218
- )
219
-
220
- example_btn.click(
221
- fn=load_example,
222
- inputs=[],
223
- outputs=inputs
224
- )
225
-
226
- clear_btn.click(
227
- fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
228
- inputs=[],
229
- outputs=inputs + [score_output, interp_output, details_output],
230
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  return demo
233
 
234
 
235
- # Create the interface
236
  demo = create_interface()
237
 
238
  if __name__ == "__main__":
239
- demo.launch(
240
- share=False,
241
- server_name="0.0.0.0",
242
- server_port=7860,
243
- )
 
1
  import gradio as gr
2
  import pandas as pd
 
3
  import pickle
4
  import os
5
 
6
+ # Paths for HuggingFace deployment (runs from repository root)
7
+ MODEL_PATH = "A3/models/champion_model_final_2.pkl"
8
+ CLASSIFICATION_MODEL_PATH = "A3/models/classification_champion.pkl"
9
+ DATA_PATH = "A3/A3_Data/train_dataset.csv"
10
 
11
  model = None
12
  FEATURE_NAMES = None
13
  MODEL_METRICS = None
14
 
15
+ # Classification model
16
+ classification_model = None
17
+ CLASSIFICATION_FEATURE_NAMES = None
18
+ CLASSIFICATION_CLASSES = None
19
+ CLASSIFICATION_METRICS = None
20
+
21
+ BODY_REGION_RECOMMENDATIONS = {
22
+ 'Upper Body': "Focus on shoulder mobility, thoracic spine extension, and keeping your head neutral.",
23
+ 'Lower Body': "Work on hip mobility, ankle dorsiflexion, and knee tracking over toes."
24
+ }
25
+
26
 
27
  def load_champion_model():
28
  global model, FEATURE_NAMES, MODEL_METRICS
29
 
30
+ if os.path.exists(MODEL_PATH):
31
+ print(f"Loading champion model from {MODEL_PATH}")
32
+ with open(MODEL_PATH, "rb") as f:
33
+ artifact = pickle.load(f)
34
+
35
+ model = artifact["model"]
36
+ FEATURE_NAMES = artifact["feature_columns"]
37
+ MODEL_METRICS = artifact.get("test_metrics", {})
38
+
39
+ print(f"Model loaded: {len(FEATURE_NAMES)} features")
40
+ print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
41
+ return True
42
+
43
+ print(f"Champion model not found at {MODEL_PATH}")
44
+ return False
45
+
46
+
47
+ def load_classification_model():
48
+ global classification_model, CLASSIFICATION_FEATURE_NAMES, CLASSIFICATION_CLASSES, CLASSIFICATION_METRICS
49
+
50
+ if os.path.exists(CLASSIFICATION_MODEL_PATH):
51
+ print(f"Loading classification model from {CLASSIFICATION_MODEL_PATH}")
52
+ with open(CLASSIFICATION_MODEL_PATH, "rb") as f:
53
+ artifact = pickle.load(f)
54
+
55
+ classification_model = artifact["model"]
56
+ CLASSIFICATION_FEATURE_NAMES = artifact["feature_columns"]
57
+ CLASSIFICATION_CLASSES = artifact["classes"]
58
+ CLASSIFICATION_METRICS = artifact.get("test_metrics", {})
59
+
60
+ print(f"Classification model loaded: {len(CLASSIFICATION_FEATURE_NAMES)} features")
61
+ print(f"Classes: {CLASSIFICATION_CLASSES}")
62
+ return True
63
+
64
+ print(f"Classification model not found at {CLASSIFICATION_MODEL_PATH}")
65
  return False
66
 
67
 
68
  load_champion_model()
69
+ load_classification_model()
70
 
71
 
 
72
  def predict_score(*feature_values):
73
  if model is None:
74
+ return "Error", "Model not loaded", ""
75
 
 
76
  features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
 
77
  raw_score = model.predict(features_df)[0]
 
 
78
  score = max(0, min(1, raw_score)) * 100
79
 
80
  if score >= 80:
 
86
  else:
87
  interpretation = "Needs work, focus on proper form"
88
 
 
89
  r2 = MODEL_METRICS.get('r2', 'N/A')
90
  correlation = MODEL_METRICS.get('correlation', 'N/A')
 
 
91
  r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
92
  corr_str = f"{correlation:.4f}" if isinstance(correlation, (int, float)) else str(correlation)
93
 
 
107
  return f"{score:.1f}%", interpretation, details
108
 
109
 
110
+ def predict_weakest_link(*feature_values):
111
+ if classification_model is None:
112
+ return "Error", "Model not loaded", ""
113
+
114
+ features_df = pd.DataFrame([feature_values], columns=CLASSIFICATION_FEATURE_NAMES)
115
+
116
+ prediction = classification_model.predict(features_df)[0]
117
+ probabilities = classification_model.predict_proba(features_df)[0]
118
+
119
+ class_probs = list(zip(CLASSIFICATION_CLASSES, probabilities))
120
+ class_probs.sort(key=lambda x: x[1], reverse=True)
121
+
122
+ confidence = max(probabilities) * 100
123
+ recommendation = BODY_REGION_RECOMMENDATIONS.get(prediction, "Focus on exercises that strengthen this region.")
124
+
125
+ accuracy = CLASSIFICATION_METRICS.get('accuracy', 'N/A')
126
+ f1_weighted = CLASSIFICATION_METRICS.get('f1_weighted', 'N/A')
127
+ acc_str = f"{accuracy:.2%}" if isinstance(accuracy, (int, float)) else str(accuracy)
128
+ f1_str = f"{f1_weighted:.2%}" if isinstance(f1_weighted, (int, float)) else str(f1_weighted)
129
+
130
+ predictions_list = "\n".join([f"{i+1}. **{cp[0]}** - {cp[1]*100:.1f}%" for i, cp in enumerate(class_probs)])
131
+
132
+ details = f"""
133
+ ### Prediction Details
134
+ - **Predicted Body Region:** {prediction}
135
+ - **Confidence:** {confidence:.1f}%
136
+
137
+ ### Probability Distribution
138
+ {predictions_list}
139
+
140
+ ### Recommendation
141
+ {recommendation}
142
+
143
+ ### Model Performance
144
+ - **Test Accuracy:** {acc_str}
145
+ - **Test F1 (weighted):** {f1_str}
146
+ """
147
+
148
+ return prediction, f"Confidence: {confidence:.1f}%", details
149
+
150
+
151
  def load_example():
152
  if FEATURE_NAMES is None:
153
  return [0.5] * 35
154
 
155
  try:
156
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  available_features = [f for f in FEATURE_NAMES if f in df.columns]
 
158
  sample = df[available_features].sample(1).values[0]
 
159
  return [float(x) for x in sample]
160
  except Exception as e:
161
  print(f"Error loading example: {e}")
 
 
162
  return [0.5] * len(FEATURE_NAMES)
163
 
164
 
165
+ def load_classification_example():
166
+ if CLASSIFICATION_FEATURE_NAMES is None:
167
+ return [0.5] * 40
168
+
169
+ try:
170
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
171
+ available_features = [f for f in CLASSIFICATION_FEATURE_NAMES if f in df.columns]
172
+ sample = df[available_features].sample(1).values[0]
173
+ return [float(x) for x in sample]
174
+ except Exception as e:
175
+ print(f"Error loading classification example: {e}")
176
+ return [0.5] * len(CLASSIFICATION_FEATURE_NAMES)
177
+
178
+
179
  def create_interface():
180
  if FEATURE_NAMES is None:
181
  return gr.Interface(
 
185
  title="Error: Model not loaded"
186
  )
187
 
 
188
  inputs = []
189
  for name in FEATURE_NAMES:
190
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.01, label=name.replace("_", " "))
 
 
 
 
 
 
191
  inputs.append(slider)
192
 
193
+ classification_inputs = []
194
+ if CLASSIFICATION_FEATURE_NAMES is not None:
195
+ for name in CLASSIFICATION_FEATURE_NAMES:
196
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.01, label=name.replace("_", " "))
197
+ classification_inputs.append(slider)
198
+
199
  description = """
200
  ## Deep Squat Movement Assessment
201
 
 
211
  - 0-39%: Needs improvement
212
  """
213
 
214
+ classification_description = """
215
+ ## Body Region Classification
216
+
217
+ **How to use:**
218
+ 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
219
+ 2. Click "Predict Body Region" to identify where to focus improvements
220
+ 3. Or click "Load Random Example" to test with real data
221
+
222
+ **Body Regions:** Upper Body, Lower Body
223
+ """
224
+
225
  angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
226
  nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
227
  time_features = [n for n in FEATURE_NAMES if "Time" in n]
228
 
 
229
  angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
230
  nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
231
  time_indices = [FEATURE_NAMES.index(f) for f in time_features]
232
 
233
+ if CLASSIFICATION_FEATURE_NAMES is not None:
234
+ class_angle_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" in n]
235
+ class_nasm_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "NASM" in n]
236
+ class_time_features = [n for n in CLASSIFICATION_FEATURE_NAMES if "Time" in n]
237
+ class_angle_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_angle_features]
238
+ class_nasm_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_nasm_features]
239
+ class_time_indices = [CLASSIFICATION_FEATURE_NAMES.index(f) for f in class_time_features]
240
+
241
  with gr.Blocks(title="Deep Squat Assessment") as demo:
242
  gr.Markdown("# Deep Squat Movement Assessment")
243
+
244
+ with gr.Tabs():
245
+ with gr.TabItem("Movement Scoring"):
246
+ gr.Markdown(description)
247
+
248
+ with gr.Row():
249
+ with gr.Column(scale=2):
250
+ gr.Markdown("### Input Features")
251
+ gr.Markdown(f"*{len(FEATURE_NAMES)} features loaded from champion model*")
252
+ gr.Markdown("*Deviation values: 0 = perfect, 1 = maximum deviation*")
253
+
254
+ with gr.Tabs():
255
+ with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
256
+ for idx in angle_indices:
257
+ inputs[idx].render()
258
+
259
+ with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
260
+ for idx in nasm_indices:
261
+ inputs[idx].render()
262
+
263
+ with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
264
+ for idx in time_indices:
265
+ inputs[idx].render()
266
+
267
+ with gr.Column(scale=1):
268
+ gr.Markdown("### Results")
269
+ score_output = gr.Textbox(label="Predicted Score")
270
+ interp_output = gr.Textbox(label="Assessment")
271
+ details_output = gr.Markdown(label="Details")
272
+
273
+ with gr.Row():
274
+ submit_btn = gr.Button("Submit", variant="primary")
275
+ example_btn = gr.Button("Load Random Example")
276
+ clear_btn = gr.Button("Clear")
277
+
278
+ submit_btn.click(fn=predict_score, inputs=inputs, outputs=[score_output, interp_output, details_output])
279
+ example_btn.click(fn=load_example, inputs=[], outputs=inputs)
280
+ clear_btn.click(
281
+ fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
282
+ inputs=[],
283
+ outputs=inputs + [score_output, interp_output, details_output],
284
+ )
285
+
286
+ if CLASSIFICATION_FEATURE_NAMES is not None:
287
+ with gr.TabItem("Body Region Classification"):
288
+ gr.Markdown(classification_description)
289
+
290
+ with gr.Row():
291
+ with gr.Column(scale=2):
292
+ gr.Markdown("### Input Features")
293
+ gr.Markdown(f"*{len(CLASSIFICATION_FEATURE_NAMES)} features for classification*")
294
+ gr.Markdown("*Deviation values: 0 = perfect, 1 = maximum deviation*")
295
+
296
+ with gr.Tabs():
297
+ with gr.TabItem(f"Angle Deviations ({len(class_angle_indices)})"):
298
+ for idx in class_angle_indices:
299
+ classification_inputs[idx].render()
300
+
301
+ with gr.TabItem(f"NASM Deviations ({len(class_nasm_indices)})"):
302
+ for idx in class_nasm_indices:
303
+ classification_inputs[idx].render()
304
+
305
+ with gr.TabItem(f"Time Deviations ({len(class_time_indices)})"):
306
+ for idx in class_time_indices:
307
+ classification_inputs[idx].render()
308
+
309
+ with gr.Column(scale=1):
310
+ gr.Markdown("### Results")
311
+ class_output = gr.Textbox(label="Predicted Body Region")
312
+ class_interp_output = gr.Textbox(label="Confidence")
313
+ class_details_output = gr.Markdown(label="Details")
314
+
315
+ with gr.Row():
316
+ class_submit_btn = gr.Button("Predict Body Region", variant="primary")
317
+ class_example_btn = gr.Button("Load Random Example")
318
+ class_clear_btn = gr.Button("Clear")
319
+
320
+ class_submit_btn.click(fn=predict_weakest_link, inputs=classification_inputs, outputs=[class_output, class_interp_output, class_details_output])
321
+ class_example_btn.click(fn=load_classification_example, inputs=[], outputs=classification_inputs)
322
+ class_clear_btn.click(
323
+ fn=lambda: [0.5] * len(CLASSIFICATION_FEATURE_NAMES) + ["", "", ""],
324
+ inputs=[],
325
+ outputs=classification_inputs + [class_output, class_interp_output, class_details_output],
326
+ )
327
 
328
  return demo
329
 
330
 
 
331
  demo = create_interface()
332
 
333
  if __name__ == "__main__":
334
+ demo.launch(share=False, server_name="0.0.0.0", server_port=7860)