Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,6 +7,7 @@ import re
|
|
| 7 |
from gtts import gTTS
|
| 8 |
import tempfile
|
| 9 |
import numpy as np
|
|
|
|
| 10 |
|
| 11 |
# Configure API
|
| 12 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
@@ -174,6 +175,64 @@ class CICE_Assessment:
|
|
| 174 |
# Initialize the assessment tool
|
| 175 |
assessor = CICE_Assessment()
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
def process_video(video):
|
| 178 |
"""Process uploaded or recorded video"""
|
| 179 |
|
|
@@ -186,6 +245,15 @@ def process_video(video):
|
|
| 186 |
progress_messages = []
|
| 187 |
|
| 188 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
# Start assessment
|
| 190 |
progress_messages.append("π₯ Starting CICE 2.0 Healthcare Team Assessment...")
|
| 191 |
|
|
@@ -209,7 +277,7 @@ def process_video(video):
|
|
| 209 |
|
| 210 |
# Create visual summary HTML
|
| 211 |
summary_html = f"""
|
| 212 |
-
<div style="max-width:800px; margin:20px auto; padding:30px; border-radius:15px; box-shadow:0 4px 6px rgba(0,0,0,0.1);">
|
| 213 |
<h2 style="text-align:center; color:#1f2937;">CICE 2.0 Assessment Summary</h2>
|
| 214 |
|
| 215 |
<div style="display:flex; justify-content:space-around; margin:30px 0;">
|
|
@@ -295,7 +363,15 @@ def create_interface():
|
|
| 295 |
sources=["upload", "webcam"],
|
| 296 |
format="mp4",
|
| 297 |
include_audio=True,
|
| 298 |
-
interactive=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
)
|
| 300 |
|
| 301 |
analyze_btn = gr.Button("π Analyze Video", variant="primary", size="lg")
|
|
@@ -306,6 +382,10 @@ def create_interface():
|
|
| 306 |
2. Click **Analyze Video** to start the assessment
|
| 307 |
3. Wait for the AI to process (1-2 minutes)
|
| 308 |
4. Review the detailed assessment and listen to audio feedback
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
""")
|
| 310 |
|
| 311 |
with gr.Column(scale=2):
|
|
|
|
| 7 |
from gtts import gTTS
|
| 8 |
import tempfile
|
| 9 |
import numpy as np
|
| 10 |
+
import cv2
|
| 11 |
|
| 12 |
# Configure API
|
| 13 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
|
|
| 175 |
# Initialize the assessment tool
|
| 176 |
assessor = CICE_Assessment()
|
| 177 |
|
| 178 |
+
def compress_video(input_path, output_path, target_width=640, target_height=480, target_fps=15, target_bitrate='500k'):
|
| 179 |
+
"""Compress and resize video to reduce file size and processing time"""
|
| 180 |
+
|
| 181 |
+
try:
|
| 182 |
+
# Open the video
|
| 183 |
+
cap = cv2.VideoCapture(input_path)
|
| 184 |
+
|
| 185 |
+
# Get original properties
|
| 186 |
+
original_fps = cap.get(cv2.CAP_PROP_FPS)
|
| 187 |
+
original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 188 |
+
original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 189 |
+
|
| 190 |
+
# Calculate aspect ratio and new dimensions
|
| 191 |
+
aspect_ratio = original_width / original_height
|
| 192 |
+
if aspect_ratio > target_width / target_height:
|
| 193 |
+
new_width = target_width
|
| 194 |
+
new_height = int(target_width / aspect_ratio)
|
| 195 |
+
else:
|
| 196 |
+
new_height = target_height
|
| 197 |
+
new_width = int(target_height * aspect_ratio)
|
| 198 |
+
|
| 199 |
+
# Set up video writer with compression
|
| 200 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 201 |
+
out = cv2.VideoWriter(output_path, fourcc, min(target_fps, original_fps), (new_width, new_height))
|
| 202 |
+
|
| 203 |
+
frame_skip = max(1, int(original_fps / target_fps))
|
| 204 |
+
frame_count = 0
|
| 205 |
+
|
| 206 |
+
while True:
|
| 207 |
+
ret, frame = cap.read()
|
| 208 |
+
if not ret:
|
| 209 |
+
break
|
| 210 |
+
|
| 211 |
+
# Skip frames to reduce FPS
|
| 212 |
+
if frame_count % frame_skip == 0:
|
| 213 |
+
# Resize frame
|
| 214 |
+
resized_frame = cv2.resize(frame, (new_width, new_height), interpolation=cv2.INTER_AREA)
|
| 215 |
+
out.write(resized_frame)
|
| 216 |
+
|
| 217 |
+
frame_count += 1
|
| 218 |
+
|
| 219 |
+
cap.release()
|
| 220 |
+
out.release()
|
| 221 |
+
|
| 222 |
+
# Get file sizes for comparison
|
| 223 |
+
original_size = os.path.getsize(input_path) / (1024 * 1024) # MB
|
| 224 |
+
compressed_size = os.path.getsize(output_path) / (1024 * 1024) # MB
|
| 225 |
+
|
| 226 |
+
print(f"β
Video compressed: {original_size:.2f}MB β {compressed_size:.2f}MB")
|
| 227 |
+
print(f" Resolution: {original_width}x{original_height} β {new_width}x{new_height}")
|
| 228 |
+
print(f" FPS: {original_fps:.1f} β {min(target_fps, original_fps):.1f}")
|
| 229 |
+
|
| 230 |
+
return output_path
|
| 231 |
+
|
| 232 |
+
except Exception as e:
|
| 233 |
+
print(f"β οΈ Compression failed, using original: {str(e)}")
|
| 234 |
+
return input_path
|
| 235 |
+
|
| 236 |
def process_video(video):
|
| 237 |
"""Process uploaded or recorded video"""
|
| 238 |
|
|
|
|
| 245 |
progress_messages = []
|
| 246 |
|
| 247 |
try:
|
| 248 |
+
# Compress video if needed
|
| 249 |
+
file_size_mb = os.path.getsize(video) / (1024 * 1024)
|
| 250 |
+
|
| 251 |
+
if file_size_mb > 10: # Compress if larger than 10MB
|
| 252 |
+
progress_messages.append(f"π¦ Compressing video ({file_size_mb:.1f}MB)...")
|
| 253 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file:
|
| 254 |
+
compressed_path = tmp_file.name
|
| 255 |
+
video = compress_video(video, compressed_path)
|
| 256 |
+
|
| 257 |
# Start assessment
|
| 258 |
progress_messages.append("π₯ Starting CICE 2.0 Healthcare Team Assessment...")
|
| 259 |
|
|
|
|
| 277 |
|
| 278 |
# Create visual summary HTML
|
| 279 |
summary_html = f"""
|
| 280 |
+
<div style="max-width:800px; margin:20px auto; padding:30px; background:white; border-radius:15px; box-shadow:0 4px 6px rgba(0,0,0,0.1);">
|
| 281 |
<h2 style="text-align:center; color:#1f2937;">CICE 2.0 Assessment Summary</h2>
|
| 282 |
|
| 283 |
<div style="display:flex; justify-content:space-around; margin:30px 0;">
|
|
|
|
| 363 |
sources=["upload", "webcam"],
|
| 364 |
format="mp4",
|
| 365 |
include_audio=True,
|
| 366 |
+
interactive=True,
|
| 367 |
+
webcam_constraints={
|
| 368 |
+
"video": {
|
| 369 |
+
"width": {"ideal": 640, "max": 640},
|
| 370 |
+
"height": {"ideal": 480, "max": 480},
|
| 371 |
+
"frameRate": {"ideal": 15, "max": 24}
|
| 372 |
+
},
|
| 373 |
+
"audio": True
|
| 374 |
+
}
|
| 375 |
)
|
| 376 |
|
| 377 |
analyze_btn = gr.Button("π Analyze Video", variant="primary", size="lg")
|
|
|
|
| 382 |
2. Click **Analyze Video** to start the assessment
|
| 383 |
3. Wait for the AI to process (1-2 minutes)
|
| 384 |
4. Review the detailed assessment and listen to audio feedback
|
| 385 |
+
|
| 386 |
+
**Note:** Videos are automatically optimized for faster processing:
|
| 387 |
+
- Webcam recording: 640x480 @ 15fps
|
| 388 |
+
- Uploaded videos > 10MB will be compressed
|
| 389 |
""")
|
| 390 |
|
| 391 |
with gr.Column(scale=2):
|