File size: 3,400 Bytes
4c289a7
 
 
 
 
 
 
 
adcbb74
4c289a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adcbb74
 
 
4c289a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import json
from dotenv import load_dotenv
from pydantic import BaseModel, ValidationError
from typing import List
from prompts import PROMPTS
import google.generativeai as genai
from fastapi import FastAPI, HTTPException
import uvicorn

# Setup
load_dotenv()
google_api_key = os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
model = genai.GenerativeModel(os.getenv("LLM_MODEL", "gemini-pro"))

# Models
class TopicRequest(BaseModel):
    topic: str
    num_questions: int = 10

class GeneratedQuestionModel(BaseModel):
    question_language: str
    question_list: List[str]

# Functions
def chat_with_model(prompt: str) -> str:
    try:
        response = model.generate_content(prompt)
        return response.text if response.text else "Error: Empty response"
    except Exception as e:
        return f"Error: {e}"

def clean_json_text(text: str) -> str:
    if text.startswith("Error:"):
        return text
    if text.startswith("```"):
        lines = text.split('\n')
        text = '\n'.join(lines[1:-1]) if len(lines) > 2 else text.strip("`").replace("json", "", 1).strip()
    first, last = text.find("{"), text.rfind("}")
    return text[first:last+1] if first != -1 and last != -1 else text

def validate_answer(raw_output: str):
    cleaned = clean_json_text(raw_output)
    if cleaned.startswith("Error:"):
        return {"error": cleaned, "question_language": "Odia", "question_list": []}
    try:
        return GeneratedQuestionModel.model_validate_json(cleaned).model_dump()
    except ValidationError:
        try:
            return GeneratedQuestionModel(**json.loads(cleaned)).model_dump()
        except:
            return {"error": "Invalid JSON", "question_language": "Odia", "question_list": []}

def final_pipeline(user_input: str, num_questions: int = 10):
    prompt = PROMPTS["questions_only"].format(language="Odia", topic=user_input, num_questions=num_questions)
    return validate_answer(chat_with_model(prompt))

# API
app = FastAPI()

@app.get("/health")
async def health_check():
    try:
        # Test model connectivity
        test_response = model.generate_content("Test")
        return {
            "status": "healthy",
            "model": os.getenv("LLM_MODEL", "gemini-pro"),
            "api_configured": bool(google_api_key)
        }
    except Exception as e:
        return {
            "status": "unhealthy",
            "error": str(e),
            "api_configured": bool(google_api_key)
        }
@app.get("/")
async def root():
    return {"message": "Odia Question Generating API is running", "status": "healthy"}


@app.post("/generate-questions")
async def generate_questions(request: TopicRequest):
    if not request.topic.strip():
        raise HTTPException(status_code=400, detail="Topic cannot be empty")
    if not 1 <= request.num_questions <= 50:
        raise HTTPException(status_code=400, detail="Questions must be between 1-50")
    
    result = final_pipeline(request.topic.strip(), request.num_questions)
    
    if "error" in result and "Error:" in result["error"]:
        raise HTTPException(status_code=500, detail=result["error"])
    
    return {"success": True, "data": result}

if __name__ == "__main__":
    host = os.getenv("QUESTION_SERVICE_HOST", "0.0.0.0")
    port = int(os.getenv("QUESTION_SERVICE_PORT", "8000"))
    uvicorn.run(app, host=0.0.0.0, port=8000)