File size: 14,838 Bytes
016e3e3
 
 
 
 
 
 
 
 
 
 
 
9593f18
 
016e3e3
9593f18
 
 
 
 
 
 
 
 
016e3e3
 
 
 
 
 
 
 
 
 
9593f18
016e3e3
 
9593f18
 
016e3e3
 
9593f18
 
 
 
 
 
 
 
 
 
 
016e3e3
9593f18
016e3e3
9593f18
 
 
 
 
 
016e3e3
9593f18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
9593f18
 
 
 
 
 
 
 
 
 
 
 
016e3e3
9593f18
 
016e3e3
9593f18
 
 
 
 
 
016e3e3
9593f18
016e3e3
 
 
9593f18
 
 
 
016e3e3
 
9593f18
016e3e3
 
9593f18
 
 
016e3e3
 
9593f18
 
016e3e3
 
 
9593f18
 
 
016e3e3
 
 
 
 
9593f18
016e3e3
 
 
 
9593f18
016e3e3
 
 
9593f18
016e3e3
 
 
 
 
 
9593f18
016e3e3
 
9593f18
 
016e3e3
 
 
 
 
 
 
9593f18
 
016e3e3
9593f18
 
 
016e3e3
 
 
9593f18
016e3e3
9593f18
 
 
 
016e3e3
 
 
9593f18
 
016e3e3
 
 
9593f18
016e3e3
 
 
 
 
 
 
 
5df31f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9593f18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
 
 
 
 
ee7a82d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
9593f18
 
016e3e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9593f18
 
016e3e3
9593f18
 
016e3e3
 
9593f18
016e3e3
 
9593f18
016e3e3
 
5df31f1
016e3e3
9593f18
5df31f1
9593f18
 
5df31f1
 
 
016e3e3
 
9593f18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
 
 
ee7a82d
 
 
 
 
 
 
 
 
 
7659820
ee7a82d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
016e3e3
 
 
 
 
 
 
 
 
9593f18
016e3e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
#!/usr/bin/env python3
"""
AnySecret Chat Assistant - HuggingFace Spaces Gradio Interface
A specialized AI assistant for AnySecret configuration management
"""

import os
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
import logging
import sys
from huggingface_hub import login

# Configure logging to show in HF Spaces
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout),
        logging.StreamHandler(sys.stderr)
    ]
)
logger = logging.getLogger(__name__)

# Model configuration
BASE_MODEL = "meta-llama/Llama-3.2-3B-Instruct"
PEFT_MODEL = "anysecret-io/anysecret-assistant"

# Global variables for model and tokenizer
model = None
tokenizer = None
device = None
model_error = None

def load_model():
    """Load the model and tokenizer with improved error handling"""
    global model, tokenizer, device, model_error
    
    try:
        logger.info("๐Ÿš€ Starting model loading process...")
        
        # Check HuggingFace authentication
        hf_token = os.environ.get('HF_TOKEN')
        if hf_token:
            logger.info("๐Ÿ”‘ HuggingFace token found, logging in...")
            login(token=hf_token)
        else:
            logger.warning("โš ๏ธ No HF_TOKEN found in environment")
        
        # Determine device and log system info
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        logger.info(f"๐Ÿ–ฅ๏ธ Using device: {device}")
        
        if torch.cuda.is_available():
            gpu_name = torch.cuda.get_device_name(0)
            total_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
            logger.info(f"๐ŸŽฎ GPU: {gpu_name} ({total_memory:.1f}GB)")
        else:
            logger.info("๐Ÿ’ป Running on CPU")
        
        # Load tokenizer first
        logger.info(f"๐Ÿ“š Loading tokenizer from {BASE_MODEL}...")
        try:
            tokenizer = AutoTokenizer.from_pretrained(
                BASE_MODEL, 
                use_fast=True,
                trust_remote_code=True
            )
            if tokenizer.pad_token is None:
                tokenizer.pad_token = tokenizer.eos_token
            tokenizer.padding_side = "left"
            logger.info("โœ… Tokenizer loaded successfully")
        except Exception as e:
            logger.error(f"โŒ Failed to load tokenizer: {e}")
            model_error = f"Tokenizer loading failed: {str(e)}"
            return False
        
        # Load base model with CPU optimizations
        logger.info(f"๐Ÿค– Loading base model from {BASE_MODEL}...")
        try:
            base_model = AutoModelForCausalLM.from_pretrained(
                BASE_MODEL,
                torch_dtype=torch.float32,  # Use float32 for better CPU compatibility
                device_map=None,  # Don't use auto device mapping on CPU
                trust_remote_code=True,
                low_cpu_mem_usage=True,
                use_cache=False  # Disable KV cache to save memory
            )
            logger.info("โœ… Base model loaded successfully")
        except Exception as e:
            logger.error(f"โŒ Failed to load base model: {e}")
            model_error = f"Base model loading failed: {str(e)}"
            return False
        
        # Load LoRA adapter
        logger.info(f"๐Ÿ”— Loading LoRA adapter from {PEFT_MODEL}...")
        try:
            model = PeftModel.from_pretrained(
                base_model, 
                PEFT_MODEL,
                torch_dtype=torch.float32
            )
            logger.info("โœ… LoRA adapter loaded successfully")
        except Exception as e:
            logger.error(f"โŒ Failed to load LoRA adapter: {e}")
            model_error = f"LoRA adapter loading failed: {str(e)}"
            return False
        
        # Move to device and set eval mode
        try:
            model = model.to(device)
            model.eval()
            logger.info("โœ… Model moved to device and set to eval mode")
        except Exception as e:
            logger.error(f"โŒ Failed to move model to device: {e}")
            model_error = f"Device placement failed: {str(e)}"
            return False
        
        logger.info("๐ŸŽ‰ Model loaded successfully!")
        return True
        
    except Exception as e:
        logger.error(f"๐Ÿ’ฅ Critical error during model loading: {e}")
        model_error = f"Critical loading error: {str(e)}"
        import traceback
        traceback.print_exc()
        return False

def generate_response(message, history, max_new_tokens=256, temperature=0.1, top_p=0.9):
    """Generate response from the model"""
    if model is None or tokenizer is None:
        if model_error:
            return f"โŒ Model loading failed: {model_error}"
        return "โณ Model is still loading. Please try again in a moment."
    
    try:
        logger.info(f"๐Ÿ’ฌ Generating response for: {message[:50]}...")
        
        # Format the conversation with proper prompt structure
        conversation = ""
        
        # Add conversation history (limit to prevent memory issues)
        recent_history = history[-3:] if len(history) > 3 else history
        for user_msg, assistant_msg in recent_history:
            conversation += f"### Instruction:\n{user_msg}\n\n### Response:\n{assistant_msg}\n\n"
        
        # Add current message
        conversation += f"### Instruction:\n{message}\n\n### Response:\n"
        
        # Tokenize with length limits
        inputs = tokenizer(
            conversation, 
            return_tensors="pt", 
            truncation=True, 
            max_length=512,  # Reduced for memory efficiency
            padding=True
        ).to(device)
        
        # Generate with conservative settings
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=max_new_tokens,
                temperature=temperature,
                top_p=top_p,
                do_sample=True if temperature > 0 else False,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,
                repetition_penalty=1.1,
                no_repeat_ngram_size=3
            )
        
        # Decode response
        full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Extract just the new response
        if "### Response:\n" in full_response:
            response_parts = full_response.split("### Response:\n")
            response = response_parts[-1].strip()
        else:
            # Fallback: get text after the input
            input_text = tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
            response = full_response[len(input_text):].strip()
        
        # Clean up response
        response = response.replace("### Instruction:", "").strip()
        response = response.split("### Instruction:")[0].strip()
        
        if not response:
            response = "I apologize, but I couldn't generate a proper response. Could you rephrase your question?"
        
        logger.info(f"โœ… Generated response: {response[:50]}...")
        return response
        
    except Exception as e:
        logger.error(f"๐Ÿ’ฅ Error generating response: {e}")
        return f"Sorry, I encountered an error while generating a response: {str(e)}"

def chat_interface(message, history):
    """Main chat interface function for Gradio"""
    response = generate_response(message, history, max_new_tokens=256)
    return response

# Custom CSS for AnySecret branding
css = """
.gradio-container {
    max-width: 1000px !important;
}

/* Increase chat window height */
.chatbot {
    min-height: 500px !important;
    max-height: 600px !important;
}

/* Style for GPU link */
.gpu-link {
    background: linear-gradient(135deg, #10b981 0%, #059669 100%);
    color: white;
    padding: 12px 20px;
    border-radius: 8px;
    text-decoration: none;
    font-weight: bold;
    display: inline-block;
    margin: 10px 5px;
    transition: transform 0.2s;
}

.gpu-link:hover {
    transform: translateY(-2px);
    color: white;
}

.header {
    text-align: center;
    padding: 20px 0;
    background: linear-gradient(135deg, #6366f1 0%, #818cf8 100%);
    color: white;
    margin-bottom: 20px;
    border-radius: 10px;
}

.header h1 {
    margin: 0;
    font-size: 2.5em;
    font-weight: bold;
}

.header p {
    margin: 10px 0 0 0;
    font-size: 1.1em;
    opacity: 0.9;
}

.error-container {
    background-color: #fee2e2;
    border: 1px solid #fecaca;
    border-radius: 8px;
    padding: 16px;
    margin: 16px 0;
    color: #dc2626;
}

.loading-container {
    background-color: #fef3c7;
    border: 1px solid #fde68a;
    border-radius: 8px;
    padding: 16px;
    margin: 16px 0;
    color: #d97706;
}

.footer {
    text-align: center;
    padding: 20px 0;
    color: #666;
    font-size: 0.9em;
}

.footer-section {
    margin: 15px 0;
}

.footer-section h4 {
    margin: 10px 0 5px 0;
    color: #333;
    font-size: 0.95em;
    font-weight: bold;
}

.model-links {
    display: flex;
    justify-content: center;
    flex-wrap: wrap;
    gap: 8px;
    margin: 8px 0;
}

.model-link {
    background-color: #f3f4f6;
    color: #374151;
    padding: 6px 12px;
    border-radius: 6px;
    text-decoration: none;
    font-size: 0.8em;
    transition: background-color 0.2s;
    display: inline-flex;
    align-items: center;
    gap: 4px;
}

.model-link:hover {
    background-color: #e5e7eb;
    color: #374151;
}

.model-link.disabled {
    opacity: 0.5;
    cursor: not-allowed;
}

.hf-icon::before {
    content: '๐Ÿค—';
}

.replicate-icon::before {
    content: '๐Ÿ”„';
}
"""

# Start model loading
logger.info("๐Ÿš€ Initializing AnySecret Chat Assistant...")
model_loaded = load_model()

# Create Gradio interface
with gr.Blocks(css=css, title="AnySecret Chat Assistant") as demo:
    # Header
    gr.HTML("""
    <div class="header">
        <h1>๐Ÿ” AnySecret Chat Assistant</h1>
        <p>Your AI assistant for configuration management across any cloud provider</p>
    </div>
    """)
    
    if model_loaded:
        # Main chat interface
        chatbot = gr.ChatInterface(
            fn=chat_interface,
            title="",
            description="Ask me anything about AnySecret configuration management, CLI commands, cloud integrations, or best practices!",
            examples=[
                "How do I configure AnySecret for AWS?",
                "What's the difference between secrets and parameters?", 
                "Show me a GitHub Actions workflow example",
                "How do I set up AnySecret with Kubernetes?",
                "What are best practices for production secrets?",
                "How do I migrate from AWS Parameter Store?"
            ],
            retry_btn="๐Ÿ”„ Retry",
            undo_btn="โ†ฉ๏ธ Undo", 
            clear_btn="๐Ÿ—‘๏ธ Clear Chat",
            submit_btn="Send",
            stop_btn="โน๏ธ Stop"
        )
        
        # Status info and GPU link
        gr.HTML("""
        <div style="text-align: center; padding: 10px; background-color: #dcfce7; border-radius: 8px; margin: 10px 0;">
            <p style="color: #166534; margin: 0 0 10px 0;">
                โœ… Model loaded successfully! Running on CPU for optimal compatibility.
            </p>
            <a href="https://huggingface.co/anysecret-io/anysecret-assistant" target="_blank" class="gpu-link">
                ๐Ÿš€ Run the open AnySecret Assistant on your GPU
            </a>
        </div>
        """)
        
    else:
        # Error state with details
        error_html = f"""
        <div class="error-container">
            <h2>โš ๏ธ Model Loading Failed</h2>
            <p><strong>Error:</strong> {model_error if model_error else 'Unknown error occurred'}</p>
            <p>This is likely due to:</p>
            <ul>
                <li>Memory constraints on the free tier</li>
                <li>Model access permissions</li>
                <li>Temporary HuggingFace issues</li>
            </ul>
            <p><strong>Solutions:</strong></p>
            <ul>
                <li>Try refreshing the page in a few minutes</li>
                <li>Check that both models exist and are accessible</li>
                <li>Contact support if the issue persists</li>
            </ul>
            <p style="font-size: 0.9em; margin-top: 15px;">
                <strong>Models:</strong><br>
                Base: {BASE_MODEL}<br>
                LoRA: {PEFT_MODEL}
            </p>
        </div>
        """
        gr.HTML(error_html)
    
    # Footer
    gr.HTML("""
    <div class="footer">
        <div class="footer-section">
            <p>
                Powered by <strong>AnySecret.io</strong> โ€ข 
                <a href="https://anysecret.io" target="_blank">Website</a> โ€ข 
                <a href="https://docs.anysecret.io" target="_blank">Documentation</a> โ€ข 
                <a href="https://github.com/anysecret-io/anysecret-lib" target="_blank">GitHub</a>
            </p>
        </div>
        
        <div class="footer-section">
            <h4>๐Ÿค– LLM Assistant</h4>
            <div class="model-links">
                <a href="https://huggingface.co/anysecret-io/anysecret-assistant" target="_blank" class="model-link">
                    <span class="hf-icon"></span> 3B Model
                </a>
                <a href="#" class="model-link disabled" title="Coming Soon">
                    <span class="hf-icon"></span> 7B Model
                </a>
                <a href="#" class="model-link disabled" title="Coming Soon">
                    <span class="hf-icon"></span> 13B Model
                </a>
                <a href="https://huggingface.co/spaces/anysecret-io/anysecret-chat" target="_blank" class="model-link">
                    <span class="hf-icon"></span> Chat
                </a>
                <a href="#" class="model-link disabled" title="Coming Soon">
                    <span class="replicate-icon"></span> Replicate
                </a>
            </div>
        </div>
        
        <div class="footer-section">
            <p style="font-size: 0.8em; opacity: 0.7;">
                This assistant is trained on AnySecret documentation and best practices. 
                For production support, please visit our official channels.
            </p>
        </div>
    </div>
    """)

# Launch configuration
if __name__ == "__main__":
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=False,
        debug=True,  # Enable debug mode
        show_error=True,
        quiet=False
    )