Spaces:
Runtime error
Runtime error
| # Import the required libraries | |
| import streamlit as st | |
| from phi.assistant import Assistant | |
| from phi.tools.arxiv_toolkit import ArxivToolkit | |
| from huggingface_hub import InferenceClient | |
| # Define a wrapper for Hugging Face LLM | |
| class HuggingFaceLLM: | |
| def __init__(self, client): | |
| self.client = client | |
| def chat_completion(self, messages, max_tokens=512, stream=False, temperature=0.7, top_p=0.95): | |
| response = self.client.post( | |
| payload={ | |
| "inputs": messages, | |
| "parameters": { | |
| "max_tokens": max_tokens, | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| }, | |
| "stream": stream, | |
| } | |
| ) | |
| return response | |
| # Initialize the Hugging Face Inference Client | |
| raw_client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta") | |
| client = HuggingFaceLLM(client=raw_client) # Wrap the client properly | |
| # Set up the Streamlit app | |
| st.set_page_config(page_title="Chat with Research Papers", layout="wide") | |
| st.title("Chat with Research Papers ππ€") | |
| st.caption("This app allows you to chat with arXiv research papers using the Zephyr model hosted on Hugging Face.") | |
| # Sidebar Configuration | |
| st.sidebar.header("Settings") | |
| temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7, 0.1) | |
| top_p = st.sidebar.slider("Top-p", 0.0, 1.0, 0.95, 0.05) | |
| max_tokens = st.sidebar.slider("Max Tokens", 100, 1024, 512, 50) | |
| # # Initialize Assistant with Arxiv Toolkit | |
| # assistant = Assistant(llm=client, tools=[ArxivToolkit()]) | |
| from phi.llms.huggingface import HuggingFaceLLM as BaseHuggingFaceLLM # Check the actual import path for your framework | |
| # Ensure compatibility with the expected LLM interface | |
| llm_instance = BaseHuggingFaceLLM(model=raw_client) | |
| # Initialize the Assistant | |
| assistant = Assistant(llm=llm_instance, tools=[ArxivToolkit()]) | |
| # Get the search query from the user | |
| query = st.text_input("Enter your research query or topic:") | |
| if st.button("Search") and query: | |
| with st.spinner("Searching arXiv and generating a response..."): | |
| # Prepare messages for the chat | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful assistant for arXiv research."}, | |
| {"role": "user", "content": query} | |
| ] | |
| # Generate response using Zephyr | |
| response = "" | |
| for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p): | |
| token = message["choices"][0]["delta"]["content"] | |
| response += token | |
| # Search arXiv and parse results | |
| arxiv_results = assistant.run(f"Search arxiv for '{query}'", stream=False) | |
| # Display the response | |
| st.subheader("Model Response") | |
| st.write(response) | |
| # Display arXiv results | |
| st.subheader("ArXiv Search Results") | |
| if arxiv_results: | |
| for paper in arxiv_results: | |
| with st.expander(paper['title']): | |
| st.write(f"**Authors:** {', '.join(paper['authors'])}") | |
| st.write(f"**Abstract:** {paper['summary']}") | |
| st.write(f"[Read More]({paper['link']})") | |
| else: | |
| st.write("No results found.") | |