Spaces:
Sleeping
Sleeping
| '''import gradio as gr | |
| from inference import get_evo_response, get_gpt_response | |
| from logger import log_feedback | |
| # Default demo | |
| default_question = "What should I do if my house is on fire?" | |
| default_option1 = "I hide under the bed" | |
| default_option2 = "I run for dear life" | |
| default_context = "" | |
| def run_comparison(question, option1, option2, context): | |
| options = [option1.strip(), option2.strip()] | |
| evo_answer, evo_reasoning, evo_conf, evo_context = get_evo_response(question, options, context) | |
| gpt_answer = get_gpt_response(question, context) | |
| evo_output = f"""**β Evo's Suggestion:** {evo_answer}\n\n**Why?** {evo_reasoning}\n\n**Context Used:** {evo_context[:500]}...""" | |
| gpt_output = f"""**π€ GPT-3.5's Suggestion:**\n\n{gpt_answer}""" | |
| return evo_output, gpt_output | |
| def handle_feedback(evo_was_correct, question, option1, option2, context, evo_output): | |
| if evo_was_correct is not None: | |
| log_feedback(question, option1, option2, context, evo_output, evo_was_correct) | |
| return "β Feedback recorded. Evo will retrain based on this soon!" | |
| return "β οΈ Please select feedback before submitting." | |
| with gr.Blocks(theme=gr.themes.Base(), title="EvoRAG - Smarter Than GPT?") as demo: | |
| gr.Markdown("## π§ EvoRAG") | |
| gr.Markdown("**General-Purpose Adaptive AI that Thinks, Reads, and Evolves β Powered by Real-Time Web Search**") | |
| gr.Markdown("> Compare reasoning between Evo (which learns) and GPT-3.5 (which doesn't). You decide who wins.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| question = gr.Textbox(label="β Your Question", placeholder="e.g. What should I do if my house is on fire?", value=default_question) | |
| option1 = gr.Textbox(label="πΉ Option 1", placeholder="e.g. I hide under the bed", value=default_option1) | |
| option2 = gr.Textbox(label="πΈ Option 2", placeholder="e.g. I run for dear life", value=default_option2) | |
| context = gr.Textbox(label="π Extra Notes or Context (Optional)", placeholder="Paste news, user context, or background information", lines=4, value=default_context) | |
| compare_btn = gr.Button("π Think & Compare") | |
| with gr.Column(): | |
| evo_out = gr.Markdown(label="𧬠EvoRAG's Response") | |
| gpt_out = gr.Markdown(label="π€ GPT-3.5's Suggestion") | |
| compare_btn.click(fn=run_comparison, inputs=[question, option1, option2, context], outputs=[evo_out, gpt_out]) | |
| gr.Markdown("---") | |
| gr.Markdown("### π§ Help Evo Get Smarter β Give Feedback") | |
| feedback = gr.Radio(["π Evo was correct. Retrain from this.", "π Evo was wrong."], label="What did you think of Evo's answer?") | |
| submit_feedback = gr.Button("π¬ Submit Feedback / Retrain Evo") | |
| feedback_status = gr.Textbox(label="", interactive=False) | |
| submit_feedback.click( | |
| fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb == "π Evo was correct. Retrain from this.", q, o1, o2, ctx, eo), | |
| inputs=[feedback, question, option1, option2, context, evo_out], | |
| outputs=[feedback_status] | |
| ) | |
| demo.launch()''' | |
| import gradio as gr | |
| import os | |
| from inference import get_evo_response, get_gpt_response | |
| from logger import log_feedback | |
| import csv | |
| import subprocess | |
| # Load Hall of Fame entries | |
| def load_hall_of_fame(): | |
| entries = [] | |
| if os.path.exists("feedback_log.csv"): | |
| with open("feedback_log.csv", newline='', encoding='utf-8') as f: | |
| reader = csv.DictReader(f) | |
| for row in reader: | |
| if row.get("evo_was_correct", "").lower() == "yes": | |
| entries.append(row) | |
| return entries[-10:][::-1] # last 10, reversed | |
| # Process question & get answers | |
| def handle_query(question, option1, option2, context): | |
| options = [option1, option2] | |
| evo_answer, evo_reasoning, evo_score, evo_context = get_evo_response(question, options, context) | |
| gpt_answer = get_gpt_response(question, context) | |
| evo_display = ( | |
| f"β Evo's Suggestion: **{evo_answer}**\n\n" | |
| f"Why? {evo_reasoning}\n\n" | |
| f"Context Used (truncated): {evo_context[:400]}..." | |
| ) | |
| gpt_display = f"{gpt_answer}" | |
| evo_output_summary = f"{question} | {context} | {evo_answer}" | |
| return evo_display, gpt_display, evo_output_summary | |
| # Feedback handler | |
| def handle_feedback(feedback_text, question, option1, option2, context, evo_output): | |
| is_helpful = "π" in feedback_text | |
| log_feedback(question, option1, option2, context, evo_output, is_helpful) | |
| return "β Feedback logged. Evo will improve." | |
| # Trigger retrain (placeholder command) | |
| def retrain_evo(): | |
| try: | |
| result = subprocess.run(["python3", "watchdog.py"], capture_output=True, text=True, timeout=60) | |
| return f"π Retraining started:\n{result.stdout[:300]}" | |
| except Exception as e: | |
| return f"β οΈ Error starting retraining: {str(e)}" | |
| # Render Hall of Fame | |
| def render_hof(): | |
| entries = load_hall_of_fame() | |
| if not entries: | |
| return "No Hall of Fame entries yet. Submit feedback!" | |
| return "\n\n".join([ | |
| f"π **Q:** {e['question']}\n**Evo A:** {e['evo_output']}\n**Feedback:** β \n**Context:** {e['context'][:200]}..." | |
| for e in entries | |
| ]) | |
| # Header / Description | |
| description = """ | |
| # π§ EvoRAG β Real-Time Adaptive Reasoning AI | |
| **What is Evo?** | |
| EvoTransformer is a lightweight, evolving transformer (~28M params). | |
| It adapts on the fly, learns from feedback, uses live web + user context to reason. | |
| **Why Evo over GPT?** | |
| β Evolves from human input | |
| β Architecturally updatable | |
| β Transparent and fine-tunable | |
| β Efficient on modest hardware | |
| **Hardware:** Google Colab CPU/GPU | |
| **Max Tokens per input:** 128 | |
| **Benchmarks:** PIQA, HellaSwag, ARC | |
| **Version:** Evo v2.2 β Memory + Retrieval + Feedback Learning | |
| """ | |
| # Build Interface | |
| with gr.Blocks(title="EvoRAG β Evo vs GPT Reasoning") as demo: | |
| gr.Markdown(description) | |
| with gr.Row(): | |
| question = gr.Textbox(label="π Ask any question", placeholder="e.g., Whatβs the best way to escape a house fire?") | |
| with gr.Row(): | |
| option1 = gr.Textbox(label="Option A", placeholder="e.g., Run outside") | |
| option2 = gr.Textbox(label="Option B", placeholder="e.g., Hide under bed") | |
| context = gr.Textbox(label="π Optional Context", placeholder="Paste relevant info, article, user context", lines=3) | |
| submit_btn = gr.Button("π Run Comparison") | |
| with gr.Row(): | |
| evo_output = gr.Markdown(label="π§ EvoRAG's Reasoned Answer") | |
| gpt_output = gr.Markdown(label="π€ GPT-3.5's Suggestion") | |
| feedback = gr.Radio( | |
| ["π Evo was correct. Retrain from this.", "π Evo was wrong. Don't retrain."], | |
| label="Was Evoβs answer better?", | |
| value=None | |
| ) | |
| submit_feedback = gr.Button("π¬ Submit Feedback") | |
| feedback_status = gr.Textbox(label="Feedback Status", interactive=False) | |
| with gr.Accordion("π Evo Hall of Fame (Top Reasoning Entries)", open=False): | |
| hof_display = gr.Markdown(render_hof()) | |
| with gr.Accordion("π Live Evo Retraining (Manual Trigger)", open=False): | |
| retrain_btn = gr.Button("Retrain Evo from Feedback Now") | |
| retrain_status = gr.Textbox(label="Retrain Status", interactive=False) | |
| # Events | |
| submit_btn.click(fn=handle_query, inputs=[question, option1, option2, context], outputs=[evo_output, gpt_output, feedback_status]) | |
| submit_feedback.click( | |
| fn=lambda fb, q, o1, o2, ctx, eo: handle_feedback(fb, q, o1, o2, ctx, eo), | |
| inputs=[feedback, question, option1, option2, context, feedback_status], | |
| outputs=[feedback_status] | |
| ) | |
| retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[retrain_status]) | |
| demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | |