Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -138,6 +138,13 @@ body {
|
|
| 138 |
.stSpinner > div > div {
|
| 139 |
border-top-color: var(--primary) !important;
|
| 140 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
</style>
|
| 142 |
""", unsafe_allow_html=True)
|
| 143 |
|
|
@@ -179,13 +186,26 @@ def process_pdf(pdf_file):
|
|
| 179 |
return vector_store
|
| 180 |
|
| 181 |
# Setup QA Chain
|
| 182 |
-
def setup_qa_chain(vector_store):
|
| 183 |
-
# Use
|
| 184 |
-
repo_id = "
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
memory = ConversationBufferMemory(
|
| 191 |
memory_key="chat_history",
|
|
@@ -201,6 +221,15 @@ def setup_qa_chain(vector_store):
|
|
| 201 |
|
| 202 |
return qa_chain
|
| 203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
# File upload section
|
| 205 |
st.markdown("""
|
| 206 |
<div class="upload-area">
|
|
@@ -214,8 +243,9 @@ st.markdown("</div>", unsafe_allow_html=True)
|
|
| 214 |
if uploaded_file:
|
| 215 |
with st.spinner("Processing PDF..."):
|
| 216 |
st.session_state.vector_store = process_pdf(uploaded_file)
|
| 217 |
-
st.session_state.qa_chain = setup_qa_chain(st.session_state.vector_store)
|
| 218 |
-
|
|
|
|
| 219 |
|
| 220 |
# Chat interface
|
| 221 |
st.markdown("""
|
|
@@ -233,6 +263,10 @@ if prompt := st.chat_input("Your question..."):
|
|
| 233 |
if not st.session_state.vector_store:
|
| 234 |
st.warning("Please upload a PDF first")
|
| 235 |
st.stop()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
# Add user message to chat history
|
| 238 |
st.session_state.chat_history.append({"role": "user", "content": prompt})
|
|
@@ -242,8 +276,11 @@ if prompt := st.chat_input("Your question..."):
|
|
| 242 |
# Get assistant response
|
| 243 |
with st.chat_message("assistant"):
|
| 244 |
with st.spinner("Thinking..."):
|
| 245 |
-
|
| 246 |
-
|
|
|
|
|
|
|
|
|
|
| 247 |
st.markdown(answer)
|
| 248 |
|
| 249 |
# Add assistant response to chat history
|
|
@@ -254,6 +291,6 @@ st.markdown("</div>", unsafe_allow_html=True)
|
|
| 254 |
# Footer
|
| 255 |
st.markdown("""
|
| 256 |
<div class="footer">
|
| 257 |
-
<p>EduQuery - Helping students learn smarter β’ Powered by
|
| 258 |
</div>
|
| 259 |
""", unsafe_allow_html=True)
|
|
|
|
| 138 |
.stSpinner > div > div {
|
| 139 |
border-top-color: var(--primary) !important;
|
| 140 |
}
|
| 141 |
+
|
| 142 |
+
.token-input {
|
| 143 |
+
background: var(--light);
|
| 144 |
+
padding: 1rem;
|
| 145 |
+
border-radius: 15px;
|
| 146 |
+
margin-bottom: 1rem;
|
| 147 |
+
}
|
| 148 |
</style>
|
| 149 |
""", unsafe_allow_html=True)
|
| 150 |
|
|
|
|
| 186 |
return vector_store
|
| 187 |
|
| 188 |
# Setup QA Chain
|
| 189 |
+
def setup_qa_chain(vector_store, hf_token=None):
|
| 190 |
+
# Use free open-source model that doesn't require authentication
|
| 191 |
+
repo_id = "google/flan-t5-xxl" # Free model that doesn't require token
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
if hf_token:
|
| 195 |
+
llm = HuggingFaceHub(
|
| 196 |
+
repo_id=repo_id,
|
| 197 |
+
huggingfacehub_api_token=hf_token,
|
| 198 |
+
model_kwargs={"temperature": 0.5, "max_new_tokens": 500}
|
| 199 |
+
)
|
| 200 |
+
else:
|
| 201 |
+
# Try without token (works for some open models)
|
| 202 |
+
llm = HuggingFaceHub(
|
| 203 |
+
repo_id=repo_id,
|
| 204 |
+
model_kwargs={"temperature": 0.5, "max_new_tokens": 500}
|
| 205 |
+
)
|
| 206 |
+
except Exception as e:
|
| 207 |
+
st.error(f"Error loading model: {str(e)}")
|
| 208 |
+
return None
|
| 209 |
|
| 210 |
memory = ConversationBufferMemory(
|
| 211 |
memory_key="chat_history",
|
|
|
|
| 221 |
|
| 222 |
return qa_chain
|
| 223 |
|
| 224 |
+
# Hugging Face Token Input
|
| 225 |
+
st.markdown("""
|
| 226 |
+
<div class="token-input">
|
| 227 |
+
<h3>π Hugging Face Token (Optional)</h3>
|
| 228 |
+
<p>For better models like Mistral, enter your <a href="https://huggingface.co/settings/tokens" target="_blank">Hugging Face token</a></p>
|
| 229 |
+
""", unsafe_allow_html=True)
|
| 230 |
+
hf_token = st.text_input("", type="password", label_visibility="collapsed", placeholder="hf_xxxxxxxxxxxxxxxxxx")
|
| 231 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
| 232 |
+
|
| 233 |
# File upload section
|
| 234 |
st.markdown("""
|
| 235 |
<div class="upload-area">
|
|
|
|
| 243 |
if uploaded_file:
|
| 244 |
with st.spinner("Processing PDF..."):
|
| 245 |
st.session_state.vector_store = process_pdf(uploaded_file)
|
| 246 |
+
st.session_state.qa_chain = setup_qa_chain(st.session_state.vector_store, hf_token)
|
| 247 |
+
if st.session_state.qa_chain:
|
| 248 |
+
st.success("PDF processed successfully! You can now ask questions.")
|
| 249 |
|
| 250 |
# Chat interface
|
| 251 |
st.markdown("""
|
|
|
|
| 263 |
if not st.session_state.vector_store:
|
| 264 |
st.warning("Please upload a PDF first")
|
| 265 |
st.stop()
|
| 266 |
+
|
| 267 |
+
if not st.session_state.qa_chain:
|
| 268 |
+
st.error("Model not initialized. Please check your Hugging Face token or try again.")
|
| 269 |
+
st.stop()
|
| 270 |
|
| 271 |
# Add user message to chat history
|
| 272 |
st.session_state.chat_history.append({"role": "user", "content": prompt})
|
|
|
|
| 276 |
# Get assistant response
|
| 277 |
with st.chat_message("assistant"):
|
| 278 |
with st.spinner("Thinking..."):
|
| 279 |
+
try:
|
| 280 |
+
response = st.session_state.qa_chain({"question": prompt})
|
| 281 |
+
answer = response["answer"]
|
| 282 |
+
except Exception as e:
|
| 283 |
+
answer = f"Error: {str(e)}"
|
| 284 |
st.markdown(answer)
|
| 285 |
|
| 286 |
# Add assistant response to chat history
|
|
|
|
| 291 |
# Footer
|
| 292 |
st.markdown("""
|
| 293 |
<div class="footer">
|
| 294 |
+
<p>EduQuery - Helping students learn smarter β’ Powered by Flan-T5 and LangChain</p>
|
| 295 |
</div>
|
| 296 |
""", unsafe_allow_html=True)
|