Spaces:
Running
Running
| import gradio as gr | |
| import google.generativeai as genai | |
| import os | |
| import time | |
| from datetime import datetime | |
| import re | |
| from gtts import gTTS | |
| import tempfile | |
| import numpy as np | |
| import cv2 | |
| # Configure API | |
| GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
| if GOOGLE_API_KEY: | |
| genai.configure(api_key=GOOGLE_API_KEY) | |
| else: | |
| print("⚠️ Warning: GOOGLE_API_KEY not found in environment variables") | |
| class CICE_Assessment: | |
| def __init__(self): | |
| self.model = genai.GenerativeModel("gemini-2.0-flash-exp") | |
| def analyze_video(self, video_path): | |
| """Analyze video using the 18-point CICE 2.0 assessment with specific behavioral cues""" | |
| try: | |
| # Upload video to Gemini | |
| video_file = genai.upload_file(path=video_path, display_name="healthcare_interaction") | |
| # Wait for processing | |
| max_wait = 300 | |
| wait_time = 0 | |
| while video_file.state.name == "PROCESSING" and wait_time < max_wait: | |
| time.sleep(3) | |
| wait_time += 3 | |
| video_file = genai.get_file(video_file.name) | |
| if video_file.state.name == "FAILED": | |
| raise Exception("Video processing failed") | |
| # ENHANCED PROMPT WITH SPECIFIC BEHAVIORAL CUES | |
| prompt = """Analyze this healthcare team interaction video and provide a comprehensive assessment based on the CICE 2.0 instrument's 18 interprofessional competencies, looking for these SPECIFIC BEHAVIORAL CUES: | |
| For EACH competency, clearly state whether it was "OBSERVED" or "NOT OBSERVED" based on these specific behaviors: | |
| 1. IDENTIFIES FACTORS INFLUENCING HEALTH STATUS | |
| LOOK FOR: Team mentions allergy bracelet, fall-related trauma, multiple injuries, or states airway/breathing/circulation concerns out loud | |
| 2. IDENTIFIES TEAM GOALS FOR THE PATIENT | |
| LOOK FOR: Team verbalizes goals like: stabilize airway, CPR/AED, give epinephrine, control bleeding, preserve tooth, prepare EMS handoff | |
| 3. PRIORITIZES GOALS FOCUSED ON IMPROVING HEALTH OUTCOMES | |
| LOOK FOR: CPR/AED prioritized before bleeding/dental injury, EpiPen administered before addressing secondary injuries | |
| 4. VERBALIZES DISCIPLINE-SPECIFIC ROLE (PRE-BRIEF) | |
| LOOK FOR: Students acknowledge interprofessional communication expectations and scene safety review before scenario begins | |
| 5. OFFERS TO SEEK GUIDANCE FROM COLLEAGUES | |
| LOOK FOR: Peer-to-peer checks (e.g., dental to dental: confirm tooth storage; nursing to nursing: confirm CPR quality) | |
| 6. COMMUNICATES ABOUT COST-EFFECTIVE AND TIMELY CARE | |
| LOOK FOR: Team chooses readily available supplies (AED, saline, tourniquet) without delay, states need for rapid EMS transfer | |
| 7. DIRECTS QUESTIONS TO OTHER HEALTH PROFESSIONALS BASED ON EXPERTISE | |
| LOOK FOR: Asks discipline-specific expertise (e.g., "Dental—what do we do with the tooth?"), invites pharmacy/medical input on epinephrine use | |
| 8. AVOIDS DISCIPLINE-SPECIFIC TERMINOLOGY | |
| LOOK FOR: Uses plain language like "no pulse" instead of "asystole" | |
| 9. EXPLAINS DISCIPLINE-SPECIFIC TERMINOLOGY WHEN NECESSARY | |
| LOOK FOR: Clarifies medical/dental terms for others when necessary | |
| 10. COMMUNICATES ROLES AND RESPONSIBILITIES CLEARLY | |
| LOOK FOR: Announces assignments out loud: "I'll do compressions," "I'll call 911," "I'll document" | |
| 11. ENGAGES IN ACTIVE LISTENING | |
| LOOK FOR: Repeats back instructions ("Everyone clear for shock"), pauses to hear teammates' updates | |
| 12. SOLICITS AND ACKNOWLEDGES PERSPECTIVES | |
| LOOK FOR: Leader asks "Anything else we need to address?", responds to peer input respectfully | |
| 13. RECOGNIZES APPROPRIATE CONTRIBUTIONS | |
| LOOK FOR: Affirms correct actions verbally ("Good catch on allergy bracelet"), non-verbal acknowledgment (nodding, thumbs up) | |
| 14. RESPECTFUL OF OTHER TEAM MEMBERS | |
| LOOK FOR: Listens without interrupting, values input across professions | |
| 15. COLLABORATIVELY WORKS THROUGH INTERPROFESSIONAL CONFLICTS | |
| LOOK FOR: Negotiates intervention priorities (airway vs. bleeding) respectfully | |
| 16. REFLECTS ON STRENGTHS OF TEAM INTERACTIONS (POST-BRIEF) | |
| LOOK FOR: Notes strong teamwork, communication, or role clarity after the scenario | |
| 17. REFLECTS ON CHALLENGES OF TEAM INTERACTIONS (POST-BRIEF) | |
| LOOK FOR: Identifies confusion, delays, or role overlap in debriefing | |
| 18. IDENTIFIES HOW TO IMPROVE TEAM EFFECTIVENESS (POST-BRIEF) | |
| LOOK FOR: Suggests faster role assignment, consistent closed-loop communication, earlier epi use | |
| STRUCTURE YOUR RESPONSE AS FOLLOWS: | |
| ## OVERALL ASSESSMENT | |
| Brief overview of the team interaction quality. | |
| ## DETAILED COMPETENCY EVALUATION | |
| For each of the 18 competencies, format as: | |
| Competency [number]: [name] | |
| Status: [OBSERVED/NOT OBSERVED] | |
| Evidence: [Specific behavioral cue observed or explanation of absence] | |
| ## STRENGTHS | |
| Top 3-5 key strengths with specific examples | |
| ## AREAS FOR IMPROVEMENT | |
| Top 3-5 areas needing work with specific suggestions | |
| ## AUDIO SUMMARY | |
| [Create a 60-second summary focusing on: overall performance level, top 3 strengths, top 3 areas for improvement, and 2 key recommendations] | |
| ## FINAL SCORE | |
| Competencies Observed: X/18 | |
| Overall Performance Level: [Exemplary (85-100%)/Proficient (70-84%)/Developing (50-69%)/Needs Improvement (0-49%)]""" | |
| response = self.model.generate_content([video_file, prompt]) | |
| return response.text | |
| except Exception as e: | |
| return f"Error during analysis: {str(e)}" | |
| def generate_audio_feedback(self, text): | |
| """Generate a concise 1-minute audio feedback summary""" | |
| # Extract the audio summary section from the assessment | |
| audio_summary_match = re.search(r'## AUDIO SUMMARY\s*(.*?)(?=##|\Z)', text, re.DOTALL) | |
| if audio_summary_match: | |
| summary_text = audio_summary_match.group(1).strip() | |
| else: | |
| # Fallback: Create a brief summary from the assessment | |
| summary_text = self.create_brief_summary(text) | |
| # Clean text for speech | |
| clean_text = re.sub(r'[#*_\[\]()]', ' ', summary_text) | |
| clean_text = re.sub(r'\s+', ' ', clean_text) | |
| clean_text = re.sub(r'[-•·]\s+', '', clean_text) | |
| # Add introduction and conclusion for better audio experience | |
| audio_script = f"""CICE Healthcare Team Assessment Summary. | |
| {clean_text} | |
| Please refer to the detailed written report for complete competency evaluation and specific recommendations. | |
| End of audio summary.""" | |
| # Generate audio with gTTS | |
| try: | |
| tts = gTTS(text=audio_script, lang='en', slow=False, tld='com') | |
| # Save to temporary file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as tmp_file: | |
| audio_path = tmp_file.name | |
| tts.save(audio_path) | |
| return audio_path | |
| except Exception as e: | |
| print(f"⚠️ Audio generation failed: {str(e)}") | |
| return None | |
| def create_brief_summary(self, text): | |
| """Create a brief summary if AUDIO SUMMARY section is not found""" | |
| # Parse scores | |
| observed_count = text.lower().count("observed") - text.lower().count("not observed") | |
| total = 18 | |
| percentage = (observed_count / total) * 100 | |
| # Determine performance level | |
| if percentage >= 85: | |
| level = "Exemplary" | |
| elif percentage >= 70: | |
| level = "Proficient" | |
| elif percentage >= 50: | |
| level = "Developing" | |
| else: | |
| level = "Needs Improvement" | |
| # Extract strengths and improvements if possible | |
| strengths = "Strong team communication and role clarity observed" | |
| improvements = "Consider enhancing active listening and conflict resolution skills" | |
| summary = f"""The team demonstrated {level} performance with {observed_count} out of {total} competencies observed, | |
| achieving {percentage:.0f} percent overall. | |
| Key strengths included {strengths}. | |
| Areas for improvement include {improvements}. | |
| The team should focus on pre-briefing protocols and post-scenario debriefing to enhance future performance. | |
| Emphasis should be placed on clear role assignment and closed-loop communication during critical interventions.""" | |
| return summary | |
| def parse_assessment_scores(self, assessment_text): | |
| """Parse assessment text to extract scores""" | |
| observed_count = assessment_text.lower().count("observed") - assessment_text.lower().count("not observed") | |
| total_competencies = 18 | |
| percentage = (observed_count / total_competencies) * 100 | |
| if percentage >= 85: | |
| level = "Exemplary" | |
| color = "#059669" | |
| elif percentage >= 70: | |
| level = "Proficient" | |
| color = "#0891b2" | |
| elif percentage >= 50: | |
| level = "Developing" | |
| color = "#f59e0b" | |
| else: | |
| level = "Needs Improvement" | |
| color = "#dc2626" | |
| return observed_count, total_competencies, percentage, level, color | |
| # Initialize the assessment tool | |
| assessor = CICE_Assessment() | |
| def compress_video(input_path, output_path, target_width=640, target_height=360, target_fps=15, target_bitrate='500k'): | |
| """Compress and resize video to reduce file size and processing time""" | |
| try: | |
| # Open the video | |
| cap = cv2.VideoCapture(input_path) | |
| # Get original properties | |
| original_fps = cap.get(cv2.CAP_PROP_FPS) | |
| original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| # Calculate aspect ratio and new dimensions | |
| aspect_ratio = original_width / original_height | |
| if aspect_ratio > target_width / target_height: | |
| new_width = target_width | |
| new_height = int(target_width / aspect_ratio) | |
| else: | |
| new_height = target_height | |
| new_width = int(target_height * aspect_ratio) | |
| # Set up video writer with compression | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| out = cv2.VideoWriter(output_path, fourcc, min(target_fps, original_fps), (new_width, new_height)) | |
| frame_skip = max(1, int(original_fps / target_fps)) | |
| frame_count = 0 | |
| while True: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Skip frames to reduce FPS | |
| if frame_count % frame_skip == 0: | |
| # Resize frame | |
| resized_frame = cv2.resize(frame, (new_width, new_height), interpolation=cv2.INTER_AREA) | |
| out.write(resized_frame) | |
| frame_count += 1 | |
| cap.release() | |
| out.release() | |
| # Get file sizes for comparison | |
| original_size = os.path.getsize(input_path) / (1024 * 1024) # MB | |
| compressed_size = os.path.getsize(output_path) / (1024 * 1024) # MB | |
| print(f"✅ Video compressed: {original_size:.2f}MB → {compressed_size:.2f}MB") | |
| print(f" Resolution: {original_width}x{original_height} → {new_width}x{new_height}") | |
| print(f" FPS: {original_fps:.1f} → {min(target_fps, original_fps):.1f}") | |
| return output_path | |
| except Exception as e: | |
| print(f"⚠️ Compression failed, using original: {str(e)}") | |
| return input_path | |
| def process_video(video): | |
| """Process uploaded or recorded video""" | |
| if video is None: | |
| return "Please upload or record a video first.", None, None, None | |
| if not GOOGLE_API_KEY: | |
| return "❌ Error: GOOGLE_API_KEY not configured. Please set it in your environment variables.", None, None, None | |
| progress_messages = [] | |
| try: | |
| # Compress video if needed | |
| file_size_mb = os.path.getsize(video) / (1024 * 1024) | |
| if file_size_mb > 10: # Compress if larger than 10MB | |
| progress_messages.append(f"📦 Compressing video ({file_size_mb:.1f}MB)...") | |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file: | |
| compressed_path = tmp_file.name | |
| video = compress_video(video, compressed_path) | |
| # Start assessment | |
| progress_messages.append("🏥 Starting CICE 2.0 Healthcare Team Assessment...") | |
| # Analyze video | |
| progress_messages.append("📤 Uploading video to Gemini AI...") | |
| progress_messages.append("⏳ Processing video (this may take 1-2 minutes)...") | |
| assessment_result = assessor.analyze_video(video) | |
| if "Error" in assessment_result: | |
| return assessment_result, None, None, None | |
| progress_messages.append("✅ Analysis complete!") | |
| # Generate 1-minute audio feedback | |
| progress_messages.append("🔊 Generating 1-minute audio summary...") | |
| audio_path = assessor.generate_audio_feedback(assessment_result) | |
| # Parse scores for visual summary | |
| observed, total, percentage, level, color = assessor.parse_assessment_scores(assessment_result) | |
| # Create enhanced visual summary HTML with behavioral cues | |
| summary_html = f""" | |
| <div style="max-width:800px; margin:20px auto; padding:30px; border-radius:15px; box-shadow:0 4px 6px rgba(0,0,0,0.1);"> | |
| <h2 style="text-align:center; color:#1f2937;">CICE 2.0 Assessment Summary</h2> | |
| <div style="display:flex; justify-content:space-around; margin:30px 0;"> | |
| <div style="text-align:center;"> | |
| <div style="font-size:48px; font-weight:bold; color:{color};">{observed}/{total}</div> | |
| <div style="color:#6b7280;">Competencies Observed</div> | |
| </div> | |
| <div style="text-align:center;"> | |
| <div style="font-size:48px; font-weight:bold; color:{color};">{percentage:.0f}%</div> | |
| <div style="color:#6b7280;">Overall Score</div> | |
| </div> | |
| </div> | |
| <div style="text-align:center; padding:20px; background:#f9fafb; border-radius:10px;"> | |
| <div style="font-size:24px; font-weight:bold; color:{color};">Performance Level: {level}</div> | |
| </div> | |
| <div style="margin-top:30px;"> | |
| <h3>🎯 Key Behavioral Indicators Assessed:</h3> | |
| <div style="background:#f3f4f6; padding:15px; border-radius:10px; margin:15px 0;"> | |
| <h4 style="color:#059669; margin-top:0;">✅ Critical Actions</h4> | |
| <ul style="line-height:1.6; color:#374151;"> | |
| <li>CPR/AED prioritization</li> | |
| <li>Epinephrine administration timing</li> | |
| <li>Clear role assignments ("I'll do compressions")</li> | |
| <li>Closed-loop communication</li> | |
| </ul> | |
| </div> | |
| <div style="background:#f3f4f6; padding:15px; border-radius:10px; margin:15px 0;"> | |
| <h4 style="color:#0891b2; margin-top:0;">🗣️ Communication Markers</h4> | |
| <ul style="line-height:1.6; color:#374151;"> | |
| <li>Plain language use (avoiding medical jargon)</li> | |
| <li>Active listening (repeating back instructions)</li> | |
| <li>Soliciting input ("Anything else we need?")</li> | |
| <li>Recognizing contributions ("Good catch!")</li> | |
| </ul> | |
| </div> | |
| <div style="background:#f3f4f6; padding:15px; border-radius:10px; margin:15px 0;"> | |
| <h4 style="color:#7c3aed; margin-top:0;">🔄 Team Dynamics</h4> | |
| <ul style="line-height:1.6; color:#374151;"> | |
| <li>Pre-brief safety review</li> | |
| <li>Peer-to-peer verification</li> | |
| <li>Respectful conflict resolution</li> | |
| <li>Post-brief reflection on strengths/challenges</li> | |
| </ul> | |
| </div> | |
| </div> | |
| <div style="margin-top:20px; padding:15px; background:#fef3c7; border-radius:10px;"> | |
| <p style="text-align:center; color:#92400e; margin:0;"> | |
| <strong>🔊 Listen to the 1-minute audio summary for key findings and recommendations</strong> | |
| </p> | |
| </div> | |
| </div> | |
| """ | |
| # Save assessment to file | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| report_filename = f"cice_assessment_{timestamp}.txt" | |
| with open(report_filename, "w") as f: | |
| f.write("CICE 2.0 Healthcare Team Interaction Assessment\n") | |
| f.write("="*60 + "\n") | |
| f.write(f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") | |
| f.write("="*60 + "\n\n") | |
| f.write(assessment_result) | |
| return assessment_result, summary_html, audio_path, report_filename | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}", None, None, None | |
| def create_interface(): | |
| """Create the Gradio interface""" | |
| with gr.Blocks(title="CICE 2.0 Healthcare Assessment Tool", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # 🏥 CICE 2.0 Healthcare Team Assessment Tool | |
| **Analyze healthcare team interactions using specific behavioral cues from the 18-point CICE 2.0 framework** | |
| This tool evaluates critical team behaviors including: | |
| - Emergency response prioritization (CPR/AED, epinephrine) | |
| - Clear role communication and closed-loop verification | |
| - Active listening and respectful team dynamics | |
| - Pre-brief and post-brief reflection practices | |
| --- | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 📹 Video Input") | |
| video_input = gr.Video( | |
| label="Upload or Record Video", | |
| sources=["upload", "webcam"], | |
| format="mp4", | |
| include_audio=True, | |
| interactive=True, | |
| webcam_constraints={ | |
| "video": { | |
| "width": {"ideal": 640, "max": 640}, | |
| "height": {"ideal": 360, "max": 360}, | |
| "frameRate": {"ideal": 15, "max": 24} | |
| }, | |
| "audio": True | |
| } | |
| ) | |
| analyze_btn = gr.Button("🔍 Analyze Video", variant="primary", size="lg") | |
| gr.Markdown(""" | |
| ### 📝 Instructions: | |
| 1. **Upload** a pre-recorded video or **Record** using your webcam | |
| 2. Click **Analyze Video** to start the assessment | |
| 3. Wait for the AI to process (1-2 minutes) | |
| 4. Listen to the **1-minute audio summary** for quick insights | |
| 5. Review the detailed written assessment for complete evaluation | |
| **Key Behaviors Assessed:** | |
| - Allergy/medical history identification | |
| - CPR/AED prioritization | |
| - Clear role assignments | |
| - Plain language communication | |
| - Active listening behaviors | |
| - Team respect and conflict resolution | |
| """) | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 📊 Assessment Results") | |
| # Visual summary | |
| summary_output = gr.HTML(label="Visual Summary") | |
| # Audio feedback - now prominently featured | |
| audio_output = gr.Audio( | |
| label="🔊 1-Minute Audio Summary (Listen First!)", | |
| type="filepath", | |
| interactive=False | |
| ) | |
| # Detailed assessment | |
| assessment_output = gr.Textbox( | |
| label="Detailed CICE 2.0 Assessment (Full Report)", | |
| lines=20, | |
| max_lines=30, | |
| interactive=False | |
| ) | |
| # Download report | |
| report_file = gr.File( | |
| label="📥 Download Full Report", | |
| interactive=False | |
| ) | |
| # Footer | |
| gr.Markdown(""" | |
| --- | |
| ### About This Assessment | |
| This tool uses AI to identify specific behavioral markers that indicate effective interprofessional collaboration | |
| in healthcare settings. The assessment focuses on observable actions such as: | |
| - Verbal role assignments ("I'll do compressions") | |
| - Recognition phrases ("Good catch on the allergy bracelet") | |
| - Plain language use instead of medical jargon | |
| - Pre-brief and post-brief team discussions | |
| **Note:** Ensure clear audio capture of team communications for accurate assessment. | |
| """) | |
| # Connect the analyze button | |
| analyze_btn.click( | |
| fn=process_video, | |
| inputs=[video_input], | |
| outputs=[assessment_output, summary_output, audio_output, report_file] | |
| ) | |
| return demo | |
| # Create and launch the app | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch( | |
| share=False, | |
| debug=True, | |
| server_name="0.0.0.0", | |
| server_port=7860 | |
| ) |