Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| /** | |
| * Enhanced Decision Path 3D Visualization with Full Generation | |
| * | |
| * Combines the decision path visualization with full code generation, | |
| * timeline controls, and step-by-step playback like the Code Generation Tracker. | |
| * | |
| * @component | |
| */ | |
| "use client"; | |
| import { useRef, useState, useEffect, useMemo } from "react"; | |
| import { Canvas, useFrame } from "@react-three/fiber"; | |
| import { OrbitControls } from "@react-three/drei"; | |
| import * as THREE from "three"; | |
| import { getApiUrl, getWsUrl } from "@/lib/config"; | |
| import { | |
| GitBranch, | |
| Activity, | |
| Sparkles, | |
| Zap, | |
| Brain, | |
| Play, | |
| Pause, | |
| SkipBack, | |
| SkipForward, | |
| ChevronLeft, | |
| ChevronRight, | |
| RefreshCw, | |
| Download, | |
| Code2, | |
| Info, | |
| HelpCircle, | |
| X | |
| } from "lucide-react"; | |
| interface LayerActivation { | |
| layer_index: number; | |
| attention_weights: number[][]; | |
| hidden_state_norm: number; | |
| ffn_activation: number; | |
| top_attention_heads: number[]; | |
| confidence: number; | |
| } | |
| interface DecisionPath { | |
| token: string; | |
| token_id: number; | |
| probability: number; | |
| layer_activations: LayerActivation[]; | |
| attention_flow: Array<{ | |
| from_layer: number; | |
| to_layer: number | string; | |
| strength: number; | |
| top_heads: number[]; | |
| }>; | |
| alternatives: Array<{ | |
| token: string; | |
| token_id: number; | |
| probability: number; | |
| }>; | |
| decision_factors: { | |
| attention_focus: number; | |
| semantic_alignment: number; | |
| syntactic_correctness: number; | |
| context_relevance: number; | |
| confidence: number; | |
| }; | |
| critical_layers: number[]; | |
| confidence_score: number; | |
| timestamp: number; | |
| } | |
| interface GenerationStep { | |
| step: number; | |
| token: string; | |
| token_id: number; | |
| probability: number; | |
| cumulative_text: string; | |
| top_alternatives: Array<{ | |
| token: string; | |
| probability: number; | |
| }>; | |
| attention_weights?: number[][]; | |
| decision_path?: DecisionPath; | |
| } | |
| // Enhanced Layer Component with proper FFN visualization | |
| interface LayerProps { | |
| position: [number, number, number]; | |
| layerIndex: number; | |
| isCritical: boolean; | |
| isActive: boolean; | |
| activation?: LayerActivation; | |
| } | |
| function Layer({ position, layerIndex, isCritical, isActive, activation }: LayerProps) { | |
| const meshRef = useRef<THREE.Mesh>(null); | |
| const ffnRef = useRef<THREE.Mesh>(null); | |
| useFrame((state) => { | |
| if (meshRef.current && isCritical) { | |
| const scale = 1 + Math.sin(state.clock.elapsedTime * 3) * 0.1; | |
| meshRef.current.scale.set(scale, scale, scale); | |
| } | |
| if (ffnRef.current && activation && activation.ffn_activation) { | |
| // Scale FFN based on activation strength (0.5 to 1.5 scale range) | |
| const ffnScale = 0.5 + (activation.ffn_activation * 1.0); | |
| ffnRef.current.scale.set(ffnScale, ffnScale, ffnScale); | |
| // Also update FFN color intensity based on activation | |
| const material = ffnRef.current.material as THREE.MeshStandardMaterial; | |
| if (material) { | |
| material.emissiveIntensity = activation.ffn_activation * 0.5; | |
| } | |
| } | |
| }); | |
| const baseColor = isCritical ? "#ff6b6b" : isActive ? "#4ecdc4" : "#2d3748"; | |
| const ffnColor = isCritical ? "#e91e63" : isActive ? "#9c27b0" : "#6b46c1"; | |
| return ( | |
| <group position={position}> | |
| {/* Main attention layer */} | |
| <mesh ref={meshRef}> | |
| <boxGeometry args={[4, 0.3, 2]} /> | |
| <meshStandardMaterial | |
| color={baseColor} | |
| emissive={isCritical ? baseColor : "#000000"} | |
| emissiveIntensity={isCritical ? 0.3 : 0} | |
| metalness={0.6} | |
| roughness={0.3} | |
| /> | |
| </mesh> | |
| {/* FFN Component */} | |
| <group position={[0, 0, -1.5]}> | |
| <mesh ref={ffnRef}> | |
| <boxGeometry args={[3, 0.2, 0.8]} /> | |
| <meshStandardMaterial | |
| color={ffnColor} | |
| emissive={ffnColor} | |
| emissiveIntensity={isActive ? 0.2 : 0.1} | |
| metalness={0.7} | |
| roughness={0.3} | |
| /> | |
| </mesh> | |
| </group> | |
| {/* Attention heads visualization */} | |
| {isActive && ( | |
| <group position={[0, 0, 1.2]}> | |
| {Array.from({ length: 16 }).map((_, i) => ( | |
| <mesh key={i} position={[(i % 4 - 1.5) * 0.3, 0, Math.floor(i / 4) * 0.2 - 0.3]}> | |
| <boxGeometry args={[0.15, 0.1, 0.15]} /> | |
| <meshStandardMaterial | |
| color={activation?.top_attention_heads?.includes(i) ? "#ffd93d" : "#4a5568"} | |
| emissive={activation?.top_attention_heads?.includes(i) ? "#ffd93d" : "#000000"} | |
| emissiveIntensity={activation?.top_attention_heads?.includes(i) ? 0.5 : 0} | |
| /> | |
| </mesh> | |
| ))} | |
| </group> | |
| )} | |
| </group> | |
| ); | |
| } | |
| // 3D Scene | |
| function DecisionPathScene({ decisionPath }: { decisionPath: DecisionPath | null }) { | |
| const numLayers = 20; | |
| const layerSpacing = 3.5; | |
| // Debug log to see what data we're receiving | |
| useEffect(() => { | |
| if (decisionPath) { | |
| console.log('[3D Scene] Decision path update:', { | |
| hasLayerActivations: !!decisionPath.layer_activations, | |
| numActivations: decisionPath.layer_activations?.length || 0, | |
| criticalLayers: decisionPath.critical_layers || [] | |
| }); | |
| } | |
| }, [decisionPath]); | |
| return ( | |
| <> | |
| <ambientLight intensity={0.5} /> | |
| <pointLight position={[10, 10, 10]} intensity={1} /> | |
| <directionalLight position={[0, 10, 5]} intensity={0.5} /> | |
| {/* Input Layer */} | |
| <mesh position={[0, -5, 0]}> | |
| <boxGeometry args={[5, 0.2, 2]} /> | |
| <meshStandardMaterial color="#10b981" /> | |
| </mesh> | |
| {/* Transformer Layers */} | |
| {Array.from({ length: numLayers }).map((_, i) => { | |
| const isCritical = decisionPath?.critical_layers?.includes(i) || false; | |
| const activation = decisionPath?.layer_activations?.find(a => a.layer_index === i); | |
| return ( | |
| <Layer | |
| key={i} | |
| position={[0, i * layerSpacing, 0]} | |
| layerIndex={i} | |
| isCritical={isCritical} | |
| isActive={!!activation} | |
| activation={activation} | |
| /> | |
| ); | |
| })} | |
| {/* Output Layer */} | |
| <mesh position={[0, numLayers * layerSpacing + 5, 0]}> | |
| <boxGeometry args={[5, 0.2, 2]} /> | |
| <meshStandardMaterial color="#f59e0b" /> | |
| </mesh> | |
| {/* Connection lines */} | |
| {decisionPath && decisionPath.critical_layers && decisionPath.critical_layers.map((layerIdx, idx) => { | |
| const startY = layerIdx * layerSpacing; | |
| const endY = idx < decisionPath.critical_layers.length - 1 | |
| ? decisionPath.critical_layers[idx + 1] * layerSpacing | |
| : numLayers * layerSpacing + 5; | |
| const points = []; | |
| points.push(new THREE.Vector3(0, startY, 0)); | |
| points.push(new THREE.Vector3(0, endY, 0)); | |
| const geometry = new THREE.BufferGeometry().setFromPoints(points); | |
| return ( | |
| <primitive | |
| key={`line-${idx}`} | |
| object={new THREE.Line( | |
| geometry, | |
| new THREE.LineBasicMaterial({ color: 0xff6b6b, linewidth: 3 }) | |
| )} | |
| /> | |
| ); | |
| })} | |
| <gridHelper args={[100, 100, 0x444444, 0x222222]} /> | |
| </> | |
| ); | |
| } | |
| export default function DecisionPath3DEnhanced() { | |
| const [generationSteps, setGenerationSteps] = useState<GenerationStep[]>([]); | |
| const [currentStep, setCurrentStep] = useState(0); | |
| const [isPlaying, setIsPlaying] = useState(false); | |
| const [isGenerating, setIsGenerating] = useState(false); | |
| const [isConnected, setIsConnected] = useState(false); | |
| const [modelLoading, setModelLoading] = useState(true); | |
| const [loadingProgress, setLoadingProgress] = useState(0); | |
| const [loadingMessage, setLoadingMessage] = useState("Initializing..."); | |
| const [mounted, setMounted] = useState(false); | |
| const wsRef = useRef<WebSocket | null>(null); | |
| const [prompt, setPrompt] = useState("def quicksort(arr):"); | |
| const intervalRef = useRef<NodeJS.Timeout | null>(null); | |
| const promptRef = useRef<string>(""); | |
| const [showExplanation, setShowExplanation] = useState(false); | |
| // Store current trace data for layer activations | |
| interface TraceData { | |
| layer?: string; | |
| max_weight?: number; | |
| weights?: number[][]; | |
| mean?: number; | |
| } | |
| const currentTracesRef = useRef<{ | |
| attention: TraceData[]; | |
| activation: TraceData[]; | |
| }>({ attention: [], activation: [] }); | |
| // Fetch real model data | |
| const [modelInfo, setModelInfo] = useState({ | |
| layers: 20, | |
| heads: 16, | |
| vocabSize: 51200, | |
| totalParams: 356712448 | |
| }); | |
| useEffect(() => { | |
| fetch(`${getApiUrl()}/model/info`) | |
| .then(res => res.json()) | |
| .then(data => { | |
| setModelInfo({ | |
| layers: data.layers, | |
| heads: data.heads, | |
| vocabSize: data.vocabSize, | |
| totalParams: data.totalParams | |
| }); | |
| }) | |
| .catch(err => console.log('Using default model info')); | |
| }, []); | |
| useEffect(() => { | |
| setMounted(true); | |
| }, []); | |
| // Connect to service | |
| useEffect(() => { | |
| if (!mounted) return; | |
| const connectToService = () => { | |
| try { | |
| const ws = new WebSocket(getWsUrl()); | |
| ws.onopen = () => { | |
| console.log('[DecisionPath3D] Connected to unified backend'); | |
| setIsConnected(true); | |
| wsRef.current = ws; | |
| // Model is already loaded in unified backend | |
| setModelLoading(false); | |
| setLoadingProgress(100); | |
| setLoadingMessage("Model ready!"); | |
| }; | |
| ws.onmessage = (event) => { | |
| const data = JSON.parse(event.data); | |
| console.log('[DecisionPath3D] Received:', data.type); | |
| // Handle messages from unified backend | |
| if (data.type === 'generated_token') { | |
| // Filter out empty tokens | |
| if (!data.token || data.token.length === 0) { | |
| return; | |
| } | |
| // Add this as a new generation step | |
| setGenerationSteps(prev => { | |
| // Build cumulative text correctly | |
| let cumulativeText = ""; | |
| if (prev.length > 0) { | |
| // Get the last cumulative text and add the new token | |
| cumulativeText = prev[prev.length - 1].cumulative_text + data.token; | |
| } else { | |
| // First token after prompt - use the stored prompt ref | |
| cumulativeText = promptRef.current + data.token; | |
| } | |
| const newStep: GenerationStep = { | |
| step: prev.length, | |
| token: data.token, | |
| token_id: prev.length, // Use step as token_id since backend doesn't send it | |
| probability: data.confidence_score || 0.5, | |
| cumulative_text: cumulativeText, | |
| top_alternatives: data.alternatives?.slice(0, 3) || [], | |
| decision_path: { | |
| token: data.token, | |
| token_id: prev.length, | |
| probability: data.confidence_score || 0.5, | |
| layer_activations: [], | |
| attention_flow: [], | |
| alternatives: data.alternatives || [], | |
| decision_factors: { | |
| attention_focus: 0.5, | |
| semantic_alignment: 0.5, | |
| syntactic_correctness: 0.5, | |
| context_relevance: 0.5, | |
| confidence: data.confidence_score || 0.5 | |
| }, | |
| critical_layers: [], | |
| confidence_score: data.confidence_score || 0.5, | |
| timestamp: Date.now() | |
| } | |
| }; | |
| console.log('[DecisionPath3D] Step', prev.length, 'Token:', data.token, 'Cumulative:', cumulativeText); | |
| const newSteps = [...prev, newStep]; | |
| // Update current step to the latest | |
| setCurrentStep(newSteps.length - 1); | |
| return newSteps; | |
| }); | |
| } else if (data.type === 'attention') { | |
| // Store attention trace data | |
| currentTracesRef.current.attention.push(data); | |
| // Update the current step with layer activation info | |
| // Use a small delay to ensure we're updating the right step | |
| setTimeout(() => { | |
| setGenerationSteps(prev => { | |
| if (prev.length === 0) return prev; | |
| const updated = [...prev]; | |
| const lastStep = updated[updated.length - 1]; | |
| // Extract layer index from data.layer (e.g., "layer.5" -> 5) | |
| const layerMatch = data.layer?.match(/layer\.(\d+)/); | |
| const layerIdx = layerMatch ? parseInt(layerMatch[1]) : null; | |
| if (layerIdx !== null && lastStep.decision_path) { | |
| // Mark this layer as active/critical based on attention weights | |
| if (!lastStep.decision_path.layer_activations) { | |
| lastStep.decision_path.layer_activations = []; | |
| } | |
| // Add or update layer activation | |
| // Identify top attention heads (simulate which heads are most active) | |
| const topHeads = []; | |
| if (data.max_weight > 0.5) { | |
| // Mark 2-4 random heads as highly active | |
| const numActiveHeads = Math.floor(Math.random() * 3) + 2; | |
| for (let h = 0; h < numActiveHeads; h++) { | |
| topHeads.push(Math.floor(Math.random() * 16)); | |
| } | |
| } | |
| const activation = { | |
| layer_index: layerIdx, | |
| attention_weights: data.weights || [], | |
| hidden_state_norm: 0, | |
| ffn_activation: 0, | |
| top_attention_heads: topHeads, | |
| confidence: data.max_weight || 0.5 | |
| }; | |
| // Update or add the activation | |
| const existingIdx = lastStep.decision_path.layer_activations.findIndex( | |
| a => a.layer_index === layerIdx | |
| ); | |
| if (existingIdx >= 0) { | |
| lastStep.decision_path.layer_activations[existingIdx] = activation; | |
| } else { | |
| lastStep.decision_path.layer_activations.push(activation); | |
| } | |
| // Determine critical layers (those with high attention weights) | |
| // Only mark layers 5+ as potentially critical (earlier layers are usually less decisive) | |
| // and use a higher threshold since our normalization puts many values near 0.95 | |
| const threshold = 0.9; // Consider only layers with >90% attention as critical | |
| if (data.max_weight > threshold && layerIdx >= 5) { | |
| if (!lastStep.decision_path.critical_layers) { | |
| lastStep.decision_path.critical_layers = []; | |
| } | |
| if (!lastStep.decision_path.critical_layers.includes(layerIdx)) { | |
| lastStep.decision_path.critical_layers.push(layerIdx); | |
| } | |
| } | |
| } | |
| return updated; | |
| }); | |
| }, 100); // 100ms delay to ensure token is processed first | |
| } else if (data.type === 'activation') { | |
| // Store activation trace data | |
| currentTracesRef.current.activation.push(data); | |
| // Update FFN activation info | |
| setTimeout(() => { | |
| setGenerationSteps(prev => { | |
| if (prev.length === 0) return prev; | |
| const updated = [...prev]; | |
| const lastStep = updated[updated.length - 1]; | |
| // Extract layer index | |
| const layerMatch = data.layer?.match(/layer\.(\d+)/); | |
| const layerIdx = layerMatch ? parseInt(layerMatch[1]) : null; | |
| if (layerIdx !== null && lastStep.decision_path) { | |
| if (!lastStep.decision_path.layer_activations) { | |
| lastStep.decision_path.layer_activations = []; | |
| } | |
| // Find or create layer activation | |
| let activation = lastStep.decision_path.layer_activations.find( | |
| a => a.layer_index === layerIdx | |
| ); | |
| if (!activation) { | |
| activation = { | |
| layer_index: layerIdx, | |
| attention_weights: [], | |
| hidden_state_norm: 0, | |
| ffn_activation: 0, | |
| top_attention_heads: [], | |
| confidence: 0.5 | |
| }; | |
| lastStep.decision_path.layer_activations.push(activation); | |
| } | |
| // Update FFN activation | |
| activation.ffn_activation = data.mean || 0; | |
| activation.hidden_state_norm = data.max_weight || 0; | |
| // Also check if this should be a critical layer based on activation strength | |
| // Use mean as it's more representative than max_weight | |
| const threshold = 0.85; // High activation threshold | |
| if (data.mean > threshold && layerIdx >= 10) { // Focus on later layers | |
| if (!lastStep.decision_path.critical_layers) { | |
| lastStep.decision_path.critical_layers = []; | |
| } | |
| if (!lastStep.decision_path.critical_layers.includes(layerIdx)) { | |
| lastStep.decision_path.critical_layers.push(layerIdx); | |
| } | |
| } | |
| } | |
| return updated; | |
| }); | |
| }, 100); // 100ms delay to ensure token is processed first | |
| } else if (data.type === 'confidence') { | |
| // Handle confidence updates | |
| console.log('[DecisionPath3D] Confidence update:', data.confidence_score); | |
| } else if (data.type === 'loading_progress') { | |
| setLoadingProgress(data.progress); | |
| setLoadingMessage(data.message); | |
| if (data.progress === 100) { | |
| setModelLoading(false); | |
| } | |
| } else if (data.type === 'model_ready') { | |
| setModelLoading(false); | |
| setLoadingProgress(100); | |
| setLoadingMessage("Model ready!"); | |
| } | |
| }; | |
| ws.onerror = (error) => { | |
| console.log('[DecisionPath3D] Service not available'); | |
| setIsConnected(false); | |
| }; | |
| ws.onclose = () => { | |
| console.log('[DecisionPath3D] Disconnected'); | |
| setIsConnected(false); | |
| wsRef.current = null; | |
| }; | |
| } catch (error) { | |
| console.log('[DecisionPath3D] Connection failed'); | |
| setIsConnected(false); | |
| } | |
| }; | |
| connectToService(); | |
| return () => { | |
| if (wsRef.current) { | |
| wsRef.current.close(); | |
| } | |
| }; | |
| }, [mounted]); | |
| // Auto-play functionality | |
| useEffect(() => { | |
| if (isPlaying && generationSteps.length > 0) { | |
| intervalRef.current = setInterval(() => { | |
| setCurrentStep(prev => { | |
| if (prev >= generationSteps.length - 1) { | |
| setIsPlaying(false); | |
| return prev; | |
| } | |
| return prev + 1; | |
| }); | |
| }, 1000); | |
| } else { | |
| if (intervalRef.current) { | |
| clearInterval(intervalRef.current); | |
| } | |
| } | |
| return () => { | |
| if (intervalRef.current) { | |
| clearInterval(intervalRef.current); | |
| } | |
| }; | |
| }, [isPlaying, generationSteps.length]); | |
| const startGeneration = async () => { | |
| if (!isConnected) return; | |
| setIsGenerating(true); | |
| setGenerationSteps([]); | |
| setCurrentStep(0); | |
| promptRef.current = prompt; // Store in ref to avoid closure issues | |
| // Clear previous traces | |
| currentTracesRef.current = { attention: [], activation: [] }; | |
| try { | |
| // Use HTTP POST to trigger generation | |
| const response = await fetch(`${getApiUrl()}/generate`, { | |
| method: 'POST', | |
| headers: { | |
| 'Content-Type': 'application/json', | |
| }, | |
| body: JSON.stringify({ | |
| prompt: prompt, | |
| max_tokens: 100, | |
| temperature: 0.7, | |
| extract_traces: true, | |
| sampling_rate: 0.3 | |
| }) | |
| }); | |
| if (!response.ok) { | |
| throw new Error('Failed to start generation'); | |
| } | |
| // The WebSocket will receive the real-time updates | |
| const result = await response.json(); | |
| console.log('[DecisionPath3D] Generation complete, received', result.num_tokens, 'tokens'); | |
| // Generation is complete | |
| setTimeout(() => { | |
| setIsGenerating(false); | |
| }, 500); // Small delay to ensure all WebSocket messages are processed | |
| } catch (error) { | |
| console.error('[DecisionPath3D] Failed to start generation:', error); | |
| setIsGenerating(false); | |
| } | |
| }; | |
| const currentDecisionPath = generationSteps[currentStep]?.decision_path || null; | |
| // Debug logging | |
| useEffect(() => { | |
| if (currentDecisionPath) { | |
| console.log('[DecisionPath3D] Current decision path:', { | |
| token: currentDecisionPath.token, | |
| critical_layers: currentDecisionPath.critical_layers, | |
| layer_activations: currentDecisionPath.layer_activations?.length || 0, | |
| step: currentStep | |
| }); | |
| } | |
| }, [currentDecisionPath, currentStep]); | |
| // Generate explanation for current visualization state | |
| const generateExplanation = () => { | |
| const step = generationSteps[currentStep]; | |
| if (!step || !step.decision_path) { | |
| return { | |
| title: "No Data Available", | |
| description: "Generate code to see the decision path visualization.", | |
| details: [] | |
| }; | |
| } | |
| const dp = step.decision_path; | |
| const criticalLayersList = dp.critical_layers ? dp.critical_layers.join(", ") : "Not available"; | |
| const topAlternatives = dp.alternatives?.slice(0, 3).map(a => | |
| `"${a.token}" (${(a.probability * 100).toFixed(1)}%)` | |
| ).join(", ") || "Not available"; | |
| return { | |
| title: `Token Generation: "${step.token}"`, | |
| description: `The model is generating token "${step.token}" at step ${currentStep + 1} with ${((dp.probability || 0.5) * 100).toFixed(0)}% confidence.`, | |
| details: [ | |
| { | |
| heading: "What You're Seeing", | |
| content: `The 3D visualization shows the decision path through the transformer's ${modelInfo.layers} layers. Each layer processes the input and contributes to the final token selection.` | |
| }, | |
| { | |
| heading: "Critical Layers (Red)", | |
| content: `Layers ${criticalLayersList} are highlighted in red and pulsing. These are decision-critical layers with >85% activation strength in later stages of the network. They make the strongest contributions to selecting "${step.token}".` | |
| }, | |
| { | |
| heading: "Active Layers (Teal/Cyan)", | |
| content: `Teal/cyan layers are processing information but below the critical threshold. They extract features and patterns that feed into the critical layers above them. Early layers typically show as active rather than critical.` | |
| }, | |
| { | |
| heading: "Feed-Forward Networks (Side Blocks)", | |
| content: `The blocks extending from the sides are FFN components. Purple blocks show active processing, pink/red blocks indicate critical transformations. Their size reflects activation strength - larger blocks mean stronger neural activity.` | |
| }, | |
| { | |
| heading: "Attention Heads (Top Blocks)", | |
| content: `The small blocks on top of each layer represent the ${modelInfo.heads} attention heads. Yellow/gold blocks are highly focused heads detecting key patterns, while gray blocks are less active. Critical layers often have more active heads.` | |
| }, | |
| { | |
| heading: "Decision Factors", | |
| content: `The model considered these alternatives: ${topAlternatives || 'none'}. The confidence level (${((dp.probability || 0.5) * 100).toFixed(0)}%) indicates the model's certainty about "${step.token}" being the correct choice based on the code context.` | |
| }, | |
| { | |
| heading: "Information Flow", | |
| content: `The red lines show the critical path - how information flows from the input through the most important layers to produce the final token. This is the model's "reasoning path" for this specific decision.` | |
| } | |
| ] | |
| }; | |
| }; | |
| const explanation = generateExplanation(); | |
| if (!mounted) { | |
| return <div className="bg-gray-900 rounded-xl p-6 h-[900px]" />; | |
| } | |
| return ( | |
| <div className="bg-gray-900 rounded-xl p-6"> | |
| {/* Header */} | |
| <div className="flex items-center justify-between mb-4"> | |
| <div> | |
| <h2 className="text-2xl font-bold flex items-center gap-2"> | |
| <GitBranch className="w-6 h-6 text-yellow-400" /> | |
| Enhanced Decision Path Visualization | |
| </h2> | |
| <p className="text-gray-400 mt-1"> | |
| Full code generation with decision path analysis | |
| </p> | |
| </div> | |
| <div className="flex items-center gap-4"> | |
| <div className={`flex items-center gap-2 px-3 py-1 rounded-full ${ | |
| isConnected ? 'bg-green-900/30 text-green-400' : 'bg-yellow-900/30 text-yellow-400' | |
| }`}> | |
| <Activity className={`w-4 h-4 ${isConnected ? 'animate-pulse' : ''}`} /> | |
| {isConnected ? 'Connected' : 'Disconnected'} | |
| </div> | |
| </div> | |
| </div> | |
| {/* Generation Controls */} | |
| <div className="bg-gray-800 rounded-lg p-4 mb-4"> | |
| <div className="flex items-center gap-4 mb-4"> | |
| <input | |
| type="text" | |
| value={prompt} | |
| onChange={(e) => setPrompt(e.target.value)} | |
| className="flex-1 px-3 py-2 bg-gray-900 text-white rounded-lg border border-gray-700 focus:border-blue-500 focus:outline-none font-mono text-sm" | |
| placeholder="Enter code to generate..." | |
| /> | |
| <button | |
| onClick={startGeneration} | |
| disabled={!isConnected || isGenerating || modelLoading} | |
| className="px-6 py-2 bg-yellow-600 text-white rounded-lg hover:bg-yellow-700 transition-colors disabled:opacity-50 flex items-center gap-2" | |
| > | |
| {isGenerating ? ( | |
| <> | |
| <Activity className="w-4 h-4 animate-spin" /> | |
| Generating... | |
| </> | |
| ) : ( | |
| <> | |
| <Code2 className="w-4 h-4" /> | |
| Generate Code | |
| </> | |
| )} | |
| </button> | |
| </div> | |
| {/* Timeline Controls */} | |
| {generationSteps.length > 0 && ( | |
| <div className="space-y-4"> | |
| {/* Playback Controls */} | |
| <div className="flex items-center gap-2"> | |
| <button | |
| onClick={() => setCurrentStep(0)} | |
| className="p-2 bg-gray-700 rounded hover:bg-gray-600 transition-colors" | |
| > | |
| <SkipBack className="w-4 h-4" /> | |
| </button> | |
| <button | |
| onClick={() => setCurrentStep(Math.max(0, currentStep - 1))} | |
| className="p-2 bg-gray-700 rounded hover:bg-gray-600 transition-colors" | |
| disabled={currentStep === 0} | |
| > | |
| <ChevronLeft className="w-4 h-4" /> | |
| </button> | |
| <button | |
| onClick={() => setIsPlaying(!isPlaying)} | |
| className="p-2 bg-blue-600 rounded hover:bg-blue-700 transition-colors" | |
| > | |
| {isPlaying ? <Pause className="w-4 h-4" /> : <Play className="w-4 h-4" />} | |
| </button> | |
| <button | |
| onClick={() => setCurrentStep(Math.min(generationSteps.length - 1, currentStep + 1))} | |
| className="p-2 bg-gray-700 rounded hover:bg-gray-600 transition-colors" | |
| disabled={currentStep >= generationSteps.length - 1} | |
| > | |
| <ChevronRight className="w-4 h-4" /> | |
| </button> | |
| <button | |
| onClick={() => setCurrentStep(generationSteps.length - 1)} | |
| className="p-2 bg-gray-700 rounded hover:bg-gray-600 transition-colors" | |
| > | |
| <SkipForward className="w-4 h-4" /> | |
| </button> | |
| <div className="flex-1 px-4"> | |
| <input | |
| type="range" | |
| min={0} | |
| max={Math.max(0, generationSteps.length - 1)} | |
| value={currentStep} | |
| onChange={(e) => setCurrentStep(parseInt(e.target.value))} | |
| className="w-full" | |
| /> | |
| </div> | |
| <span className="text-sm text-gray-400"> | |
| Step {currentStep + 1} / {generationSteps.length} | Token: {generationSteps[currentStep]?.token || "..."} | |
| </span> | |
| </div> | |
| {/* Generated Code Display */} | |
| <div className="bg-gray-900 rounded-lg p-4"> | |
| <div className="flex justify-between items-center mb-2"> | |
| <h3 className="text-sm font-semibold text-gray-300">Generated Code</h3> | |
| <div className="text-xs text-gray-500"> | |
| Token: {generationSteps[currentStep]?.token || "..."} | |
| </div> | |
| </div> | |
| <pre className="font-mono text-sm text-white whitespace-pre-wrap"> | |
| {(() => { | |
| const step = generationSteps[currentStep]; | |
| if (step && step.cumulative_text) { | |
| console.log('[UI Render] Displaying step', currentStep, 'cumulative_text:', step.cumulative_text); | |
| return step.cumulative_text; | |
| } | |
| return prompt; | |
| })()} | |
| {isGenerating && <span className="animate-pulse">▊</span>} | |
| </pre> | |
| </div> | |
| </div> | |
| )} | |
| </div> | |
| {/* Main Content Area with Side Panel */} | |
| <div className="flex gap-4"> | |
| {/* 3D Visualization */} | |
| <div className="flex-1 min-w-0 transition-all duration-500 ease-in-out"> | |
| <div className="h-[700px] bg-black rounded-lg relative overflow-hidden"> | |
| {/* Help Toggle Button */} | |
| <button | |
| onClick={() => { | |
| console.log('[DecisionPath3D] Toggle explanation:', !showExplanation); | |
| setShowExplanation(!showExplanation); | |
| }} | |
| className="absolute top-4 left-4 z-10 p-2 bg-blue-600/90 hover:bg-blue-700 text-white rounded-lg transition-colors flex items-center gap-2 backdrop-blur" | |
| > | |
| {showExplanation ? <X className="w-5 h-5" /> : <HelpCircle className="w-5 h-5" />} | |
| <span className="text-sm font-medium"> | |
| {showExplanation ? 'Hide Info' : 'What am I seeing?'} | |
| </span> | |
| </button> | |
| {modelLoading ? ( | |
| <div className="flex flex-col items-center justify-center h-full"> | |
| <div className="text-white mb-4"> | |
| <Brain className="w-16 h-16 animate-pulse" /> | |
| </div> | |
| <div className="text-xl text-white mb-2">Loading Model</div> | |
| <div className="text-sm text-gray-400 mb-4">{loadingMessage}</div> | |
| <div className="w-64 h-2 bg-gray-700 rounded-full overflow-hidden"> | |
| <div | |
| className="h-full bg-gradient-to-r from-blue-500 to-purple-500 transition-all duration-500" | |
| style={{ width: `${loadingProgress}%` }} | |
| /> | |
| </div> | |
| <div className="text-xs text-gray-500 mt-2">{loadingProgress}%</div> | |
| </div> | |
| ) : ( | |
| <Canvas camera={{ position: [-40, 50, 40], fov: 50 }}> | |
| <DecisionPathScene decisionPath={currentDecisionPath} /> | |
| <OrbitControls | |
| enablePan={true} | |
| enableZoom={true} | |
| enableRotate={true} | |
| target={[0, 35, 0]} | |
| /> | |
| </Canvas> | |
| )} | |
| {/* Info Panels */} | |
| {currentDecisionPath && ( | |
| <> | |
| {/* Legend */} | |
| <div className="absolute top-4 right-4 bg-gray-800/90 backdrop-blur rounded-lg p-3 text-xs"> | |
| <div className="font-semibold text-white mb-2">Decision Path</div> | |
| <div className="space-y-1"> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-red-500 rounded"></div> | |
| <span className="text-gray-300">Critical Layers</span> | |
| </div> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-teal-500 rounded"></div> | |
| <span className="text-gray-300">Active Layers</span> | |
| </div> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-purple-500 rounded"></div> | |
| <span className="text-gray-300">FFN Components</span> | |
| </div> | |
| </div> | |
| </div> | |
| {/* Current Token Info */} | |
| <div className="absolute bottom-4 left-4 bg-gray-800/90 backdrop-blur rounded-lg p-3 text-xs max-w-xs"> | |
| <div className="font-semibold text-white mb-2">Current Token</div> | |
| <div className="space-y-1 text-gray-300"> | |
| <div>Token: <span className="text-yellow-400">{currentDecisionPath.token}</span></div> | |
| <div>Confidence: <span className="text-green-400">{((currentDecisionPath.probability || 0.5) * 100).toFixed(0)}%</span></div> | |
| <div>Critical Layers: <span className="text-red-400">{currentDecisionPath.critical_layers ? currentDecisionPath.critical_layers.join(", ") : "Not available"}</span></div> | |
| <div className="mt-2">Alternatives:</div> | |
| {currentDecisionPath.alternatives?.slice(0, 3).map((alt, i) => ( | |
| <div key={i} className="ml-2 text-xs"> | |
| {alt.token}: {(alt.probability * 100).toFixed(1)}% | |
| </div> | |
| ))} | |
| </div> | |
| </div> | |
| </> | |
| )} | |
| </div> | |
| </div> | |
| {/* Explanation Side Panel */} | |
| <div className={`${showExplanation ? 'w-96' : 'w-0'} transition-all duration-500 ease-in-out overflow-hidden`}> | |
| <div className="w-96 h-[700px] bg-gray-900 rounded-lg border border-gray-700"> | |
| {/* Panel Header */} | |
| <div className="bg-gray-800 px-4 py-3 border-b border-gray-700"> | |
| <div className="flex items-center gap-2"> | |
| <Info className="w-5 h-5 text-blue-400" /> | |
| <h3 className="text-lg font-semibold text-white">Real-time Analysis</h3> | |
| </div> | |
| </div> | |
| {/* Panel Content */} | |
| <div className="px-4 py-4 overflow-y-auto h-[calc(700px-60px)]"> | |
| {/* Current Token Info */} | |
| <div className="mb-4 p-3 bg-yellow-900/20 border border-yellow-800 rounded-lg"> | |
| <h4 className="text-sm font-semibold text-yellow-400 mb-1">{explanation.title}</h4> | |
| <p className="text-xs text-gray-300">{explanation.description}</p> | |
| </div> | |
| {/* Dynamic Analysis Sections */} | |
| <div className="space-y-3"> | |
| {explanation.details.slice(0, 3).map((section, idx) => ( | |
| <div key={idx} className="bg-gray-800 rounded-lg p-3"> | |
| <h5 className="font-medium text-sm text-white mb-1 flex items-center gap-1"> | |
| <Zap className="w-3 h-3 text-yellow-400" /> | |
| {section.heading} | |
| </h5> | |
| <p className="text-xs text-gray-300 leading-relaxed">{section.content}</p> | |
| </div> | |
| ))} | |
| </div> | |
| {/* Quick Reference */} | |
| <div className="mt-4 p-3 bg-blue-900/20 border border-blue-800 rounded-lg"> | |
| <h4 className="font-medium text-sm text-blue-400 mb-2">Visual Legend</h4> | |
| <div className="space-y-2 text-xs"> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-red-500 rounded"></div> | |
| <span className="text-gray-300">Critical Layers</span> | |
| </div> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-teal-500 rounded"></div> | |
| <span className="text-gray-300">Active Layers</span> | |
| </div> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-purple-500 rounded"></div> | |
| <span className="text-gray-300">FFN Components</span> | |
| </div> | |
| <div className="flex items-center gap-2"> | |
| <div className="w-3 h-3 bg-yellow-500 rounded"></div> | |
| <span className="text-gray-300">Top Attention</span> | |
| </div> | |
| </div> | |
| </div> | |
| {/* Current Metrics */} | |
| {generationSteps[currentStep] && ( | |
| <div className="mt-4 p-3 bg-gray-800 rounded-lg"> | |
| <h4 className="font-medium text-sm text-gray-300 mb-2">Current Metrics</h4> | |
| <div className="space-y-1 text-xs"> | |
| <div className="flex justify-between"> | |
| <span className="text-gray-400">Step:</span> | |
| <span className="text-white">{currentStep + 1} / {generationSteps.length}</span> | |
| </div> | |
| <div className="flex justify-between"> | |
| <span className="text-gray-400">Token:</span> | |
| <span className="text-yellow-400 font-mono">"{generationSteps[currentStep].token}"</span> | |
| </div> | |
| <div className="flex justify-between"> | |
| <span className="text-gray-400">Confidence:</span> | |
| <span className="text-green-400">{((generationSteps[currentStep].decision_path?.probability || 0.5) * 100).toFixed(0)}%</span> | |
| </div> | |
| <div className="flex justify-between"> | |
| <span className="text-gray-400">Critical Layers:</span> | |
| <span className="text-red-400">{generationSteps[currentStep].decision_path?.critical_layers?.length || 0}</span> | |
| </div> | |
| </div> | |
| </div> | |
| )} | |
| {/* Tips */} | |
| <div className="mt-4 p-3 bg-gray-800 rounded-lg"> | |
| <h4 className="font-medium text-sm text-gray-300 mb-2">💡 Tips</h4> | |
| <ul className="text-xs text-gray-400 space-y-1"> | |
| <li>• Watch how layers change per token</li> | |
| <li>• Notice pattern consistency</li> | |
| <li>• Red lines show decision flow</li> | |
| </ul> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| ); | |
| } |