Spaces:
Sleeping
Sleeping
| from transformers import pipeline | |
| import gradio as gr | |
| # 1) Load your fine-tuned QA model from the Hub | |
| MODEL_ID = "omarbayoumi2/bert-base-qa-squad-colab" | |
| qa = pipeline( | |
| "question-answering", | |
| model=MODEL_ID, | |
| tokenizer=MODEL_ID, | |
| ) | |
| # 2) Inference function | |
| def answer(question, context): | |
| if not question or not context: | |
| return "Please provide both a question and a context." | |
| result = qa(question=question, context=context) | |
| # result is a dict: {'score': ..., 'start': ..., 'end': ..., 'answer': ...} | |
| return result["answer"] | |
| # 3) Build Gradio interface | |
| iface = gr.Interface( | |
| fn=answer, | |
| inputs=[ | |
| gr.Textbox(label="Question", placeholder="Ask a question about the context..."), | |
| gr.Textbox(label="Context", lines=8, placeholder="Paste the context paragraph here..."), | |
| ], | |
| outputs=gr.Textbox(label="Answer"), | |
| title="BERT-base SQuAD QA Demo", | |
| description=( | |
| "Fine-tuned `bert-base-uncased` on SQuAD v1.1.\n" | |
| "Model: omarbayoumi2/bert-base-qa-squad-colab" | |
| ), | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |