Maheen001 commited on
Commit
50077b3
·
verified ·
1 Parent(s): 865c655

Update agent/agent_core.py

Browse files
Files changed (1) hide show
  1. agent/agent_core.py +102 -169
agent/agent_core.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
  LifeAdmin AI - Core Agent Logic
3
- Autonomous planning, tool orchestration, and execution
4
  """
5
 
6
  import asyncio
@@ -16,9 +16,9 @@ from agent.memory import MemoryStore
16
  from utils.llm_utils import get_llm_response
17
 
18
 
19
- # -------------------------
20
- # ENUMS & MODELS
21
- # -------------------------
22
 
23
  class TaskStatus(Enum):
24
  PENDING = "pending"
@@ -29,9 +29,8 @@ class TaskStatus(Enum):
29
 
30
  @dataclass
31
  class AgentThought:
32
- """Represents a thought/step in agent reasoning"""
33
  step: int
34
- type: str # planning | tool_call | reflection | answer
35
  content: str
36
  tool_name: Optional[str] = None
37
  tool_args: Optional[Dict] = None
@@ -45,7 +44,6 @@ class AgentThought:
45
 
46
  @dataclass
47
  class AgentTask:
48
- """Represents an atomic MCP operation"""
49
  id: str
50
  description: str
51
  tool: str
@@ -55,9 +53,9 @@ class AgentTask:
55
  error: Optional[str] = None
56
 
57
 
58
- # -------------------------
59
  # MAIN AGENT CLASS
60
- # -------------------------
61
 
62
  class LifeAdminAgent:
63
 
@@ -66,58 +64,47 @@ class LifeAdminAgent:
66
  self.rag_engine = RAGEngine()
67
  self.memory = MemoryStore()
68
  self.thoughts: List[AgentThought] = []
69
- self.current_context = {}
70
 
71
- # -------------------------------------------
72
- # RESET
73
- # -------------------------------------------
74
 
75
- def reset_context(self):
76
  self.thoughts = []
77
- self.current_context = {}
78
 
79
- # -------------------------------------------
80
- # PLANNING PHASE
81
- # -------------------------------------------
82
 
83
- async def plan(self, user_request: str, available_files: List[str] = None) -> List[AgentTask]:
84
 
85
  self.thoughts.append(AgentThought(
86
  step=len(self.thoughts) + 1,
87
  type="planning",
88
- content=f"Analyzing request: {user_request}"
89
  ))
90
 
91
- # List tools available through MCP
92
  tools = await self.mcp_client.list_tools()
93
- tool_descriptions = "\n".join([
94
- f"- {tool['name']}: {tool.get('description', '')}" for tool in tools
95
- ])
96
 
97
- # RAG context
98
- relevant_docs = []
99
  if user_request.strip():
100
- relevant_docs = await self.rag_engine.search(user_request, k=3)
101
 
102
- rag_context = "\n".join(
103
- [doc["text"][:200] for doc in relevant_docs]
104
- ) if relevant_docs else "No relevant documents"
105
 
106
- # Memory
107
  memory_context = self.memory.get_relevant_memories(user_request)
108
 
109
- # Build plan prompt
110
- planning_prompt = f"""
111
- You are an autonomous assistant. Create a JSON task plan.
112
 
113
- USER REQUEST:
114
  {user_request}
115
 
116
- AVAILABLE FILES:
117
- {', '.join(available_files) if available_files else 'None'}
118
 
119
- AVAILABLE TOOLS:
120
- {tool_descriptions}
121
 
122
  RAG CONTEXT:
123
  {rag_context}
@@ -125,66 +112,54 @@ RAG CONTEXT:
125
  MEMORY:
126
  {memory_context}
127
 
128
- Return ONLY valid JSON list of tasks like:
129
  [
130
  {{
131
- "id": "t1",
132
  "description": "Extract text",
133
  "tool": "ocr_extract_text",
134
- "args": {{"file_path": "invoice.pdf"}}
135
  }}
136
  ]
137
  """
138
 
139
- self.thoughts.append(AgentThought(
140
- step=len(self.thoughts) + 1,
141
- type="planning",
142
- content="Generating plan with LLM..."
143
- ))
144
-
145
- try:
146
- raw = await get_llm_response(planning_prompt, temperature=0.2)
147
- txt = raw.strip()
148
-
149
- # Remove markdown wrappers
150
- if "```json" in txt:
151
- txt = txt.split("```json")[1].split("```")[0].strip()
152
- elif "```" in txt:
153
- txt = txt.split("```")[1].split("```")[0].strip()
154
 
155
- plan_json = json.loads(txt)
156
-
157
- tasks = [
158
- AgentTask(**task, status=TaskStatus.PENDING)
159
- for task in plan_json
160
- ]
161
 
 
 
 
 
162
  self.thoughts.append(AgentThought(
163
- step=len(self.thoughts) + 1,
164
  type="planning",
165
- content=f"Plan created: {len(tasks)} tasks"
166
  ))
 
167
 
168
- return tasks
 
 
 
 
169
 
170
- except Exception as e:
171
- self.thoughts.append(AgentThought(
172
- step=len(self.thoughts) + 1,
173
- type="planning",
174
- content=f"Planning failed: {e}"
175
- ))
176
- return []
177
 
178
- # -------------------------------------------
179
- # TOOL EXECUTION PHASE
180
- # -------------------------------------------
181
 
182
- async def execute_task(self, task: AgentTask) -> AgentTask:
183
 
184
  self.thoughts.append(AgentThought(
185
- step=len(self.thoughts) + 1,
186
  type="tool_call",
187
- content=f"Executing task: {task.description}",
188
  tool_name=task.tool,
189
  tool_args=task.args
190
  ))
@@ -193,14 +168,13 @@ Return ONLY valid JSON list of tasks like:
193
 
194
  try:
195
  result = await self.mcp_client.call_tool(task.tool, task.args)
196
-
197
- task.status = TaskStatus.COMPLETED
198
  task.result = result
 
199
 
200
  self.thoughts.append(AgentThought(
201
- step=len(self.thoughts) + 1,
202
  type="tool_call",
203
- content=f"✓ Completed: {task.description}",
204
  tool_name=task.tool,
205
  tool_result=result
206
  ))
@@ -210,128 +184,87 @@ Return ONLY valid JSON list of tasks like:
210
  task.error = str(e)
211
 
212
  self.thoughts.append(AgentThought(
213
- step=len(self.thoughts) + 1,
214
  type="tool_call",
215
- content=f"✗ Failed: {task.description} — {e}",
216
  tool_name=task.tool
217
  ))
218
 
219
  return task
220
 
221
- # -------------------------------------------
222
- # REFLECTION PHASE
223
- # -------------------------------------------
224
 
225
- async def reflect(self, tasks: List[AgentTask], request: str) -> str:
226
 
227
  self.thoughts.append(AgentThought(
228
- step=len(self.thoughts) + 1,
229
  type="reflection",
230
- content="Analyzing final results..."
231
  ))
232
 
233
- results_string = []
234
  for t in tasks:
235
  if t.status == TaskStatus.COMPLETED:
236
- short = str(t.result)[:200]
237
- results_string.append(f"✓ {t.description}: {short}")
238
  else:
239
- results_string.append(f"✗ {t.description}: {t.error}")
240
 
241
- reflection_prompt = f"""
242
- Summarize the final results of the following tasks:
243
 
244
  REQUEST:
245
- {request}
246
 
247
  RESULTS:
248
- {chr(10).join(results_string)}
249
 
250
- Give a clear, helpful answer:
251
- - What succeeded
252
- - What failed
253
- - What files/events/emails were produced
254
- - Next steps
255
  """
256
 
257
- try:
258
- answer = await get_llm_response(reflection_prompt, temperature=0.5)
259
-
260
- self.thoughts.append(AgentThought(
261
- step=len(self.thoughts) + 1,
262
- type="answer",
263
- content=answer
264
- ))
265
-
266
- # Write to memory
267
- self.memory.add_memory(
268
- f"Request: {request}\nAnswer: {answer}",
269
- metadata={"type": "task_completion", "timestamp": time.time()}
270
- )
271
 
272
- return answer
 
 
 
 
273
 
274
- except Exception as e:
275
- errmsg = f"Reflection failed: {e}"
 
 
276
 
277
- self.thoughts.append(AgentThought(
278
- step=len(self.thoughts) + 1,
279
- type="answer",
280
- content=errmsg
281
- ))
282
 
283
- return errmsg
 
 
284
 
285
- # -------------------------------------------
286
- # STREAMING EXECUTION LOOP (FIXED)
287
- # -------------------------------------------
288
-
289
- async def execute(self, request: str, files: List[str] = None, stream_thoughts=False):
290
  """
291
- If stream_thoughts=True yields AgentThought objects
292
- If stream_thoughts=Falsereturns (answer, thoughts)
293
  """
294
 
295
- self.reset_context()
296
-
297
- # --- PLANNING ---
298
- tasks = await self.plan(request, files)
299
- if stream_thoughts:
300
- for th in self.thoughts:
301
- yield th
302
 
 
303
  if not tasks:
304
- # DO NOT return a value — async generator cannot return a value
305
- thought = AgentThought(
306
- step=len(self.thoughts) + 1,
307
- type="answer",
308
- content="Could not create plan. Try rephrasing."
309
- )
310
- self.thoughts.append(thought)
311
- if stream_thoughts:
312
- yield thought
313
- return
314
-
315
- # --- EXECUTION ---
316
  executed = []
317
  for t in tasks:
318
- done = await self.execute_task(t)
319
- executed.append(done)
320
- if stream_thoughts:
321
- yield self.thoughts[-1]
322
-
323
- # --- REFLECTION ---
324
- final_answer = await self.reflect(executed, request)
325
- if stream_thoughts:
326
- yield self.thoughts[-1]
327
- return
328
-
329
- # If NOT streaming: return normal output
330
  return final_answer, self.thoughts
331
 
332
- # -------------------------------------------
333
- # UTILITY
334
- # -------------------------------------------
335
 
336
- def get_thought_trace(self) -> List[Dict]:
337
  return [asdict(t) for t in self.thoughts]
 
1
  """
2
  LifeAdmin AI - Core Agent Logic
3
+ Final stable version (No async-generators – fully HF compatible)
4
  """
5
 
6
  import asyncio
 
16
  from utils.llm_utils import get_llm_response
17
 
18
 
19
+ # =============================
20
+ # DATA MODELS
21
+ # =============================
22
 
23
  class TaskStatus(Enum):
24
  PENDING = "pending"
 
29
 
30
  @dataclass
31
  class AgentThought:
 
32
  step: int
33
+ type: str # 'planning', 'tool_call', 'reflection', 'answer'
34
  content: str
35
  tool_name: Optional[str] = None
36
  tool_args: Optional[Dict] = None
 
44
 
45
  @dataclass
46
  class AgentTask:
 
47
  id: str
48
  description: str
49
  tool: str
 
53
  error: Optional[str] = None
54
 
55
 
56
+ # =============================
57
  # MAIN AGENT CLASS
58
+ # =============================
59
 
60
  class LifeAdminAgent:
61
 
 
64
  self.rag_engine = RAGEngine()
65
  self.memory = MemoryStore()
66
  self.thoughts: List[AgentThought] = []
 
67
 
68
+ # -----------------------
69
+ # UTIL
70
+ # -----------------------
71
 
72
+ def reset(self):
73
  self.thoughts = []
 
74
 
75
+ # -----------------------
76
+ # PLANNING
77
+ # -----------------------
78
 
79
+ async def plan(self, user_request: str, files: List[str] = None) -> List[AgentTask]:
80
 
81
  self.thoughts.append(AgentThought(
82
  step=len(self.thoughts) + 1,
83
  type="planning",
84
+ content=f"Analyzing: {user_request}"
85
  ))
86
 
 
87
  tools = await self.mcp_client.list_tools()
88
+ tool_desc = "\n".join([f"- {t['name']}: {t['description']}" for t in tools])
 
 
89
 
90
+ rag_docs = []
 
91
  if user_request.strip():
92
+ rag_docs = await self.rag_engine.search(user_request, k=3)
93
 
94
+ rag_context = "\n".join([d["text"][:200] for d in rag_docs]) if rag_docs else "None"
 
 
95
 
 
96
  memory_context = self.memory.get_relevant_memories(user_request)
97
 
98
+ prompt = f"""
99
+ You are a task planner.
 
100
 
101
+ REQUEST:
102
  {user_request}
103
 
104
+ FILES: {files or []}
 
105
 
106
+ TOOLS:
107
+ {tool_desc}
108
 
109
  RAG CONTEXT:
110
  {rag_context}
 
112
  MEMORY:
113
  {memory_context}
114
 
115
+ Return ONLY JSON list:
116
  [
117
  {{
118
+ "id": "task1",
119
  "description": "Extract text",
120
  "tool": "ocr_extract_text",
121
+ "args": {{"file_path": "x.pdf"}}
122
  }}
123
  ]
124
  """
125
 
126
+ response = await get_llm_response(prompt, temperature=0.2)
127
+ text = response.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ if "```json" in text:
130
+ text = text.split("```json")[1].split("```")[0].strip()
131
+ elif "```" in text:
132
+ text = text.split("```")[1].split("```")[0].strip()
 
 
133
 
134
+ try:
135
+ plan_data = json.loads(text)
136
+ tasks = [AgentTask(**t) for t in plan_data]
137
+ except:
138
  self.thoughts.append(AgentThought(
139
+ step=len(self.thoughts)+1,
140
  type="planning",
141
+ content="Planning failed (invalid JSON)"
142
  ))
143
+ return []
144
 
145
+ self.thoughts.append(AgentThought(
146
+ step=len(self.thoughts)+1,
147
+ type="planning",
148
+ content=f"Created {len(tasks)} tasks"
149
+ ))
150
 
151
+ return tasks
 
 
 
 
 
 
152
 
153
+ # -----------------------
154
+ # EXECUTION
155
+ # -----------------------
156
 
157
+ async def execute_task(self, task: AgentTask):
158
 
159
  self.thoughts.append(AgentThought(
160
+ step=len(self.thoughts)+1,
161
  type="tool_call",
162
+ content=f"Executing: {task.description}",
163
  tool_name=task.tool,
164
  tool_args=task.args
165
  ))
 
168
 
169
  try:
170
  result = await self.mcp_client.call_tool(task.tool, task.args)
 
 
171
  task.result = result
172
+ task.status = TaskStatus.COMPLETED
173
 
174
  self.thoughts.append(AgentThought(
175
+ step=len(self.thoughts)+1,
176
  type="tool_call",
177
+ content=f"✓ Completed",
178
  tool_name=task.tool,
179
  tool_result=result
180
  ))
 
184
  task.error = str(e)
185
 
186
  self.thoughts.append(AgentThought(
187
+ step=len(self.thoughts)+1,
188
  type="tool_call",
189
+ content=f"✗ Failed: {e}",
190
  tool_name=task.tool
191
  ))
192
 
193
  return task
194
 
195
+ # -----------------------
196
+ # REFLECTION
197
+ # -----------------------
198
 
199
+ async def reflect(self, tasks: List[AgentTask], original: str) -> str:
200
 
201
  self.thoughts.append(AgentThought(
202
+ step=len(self.thoughts)+1,
203
  type="reflection",
204
+ content="Summarizing results..."
205
  ))
206
 
207
+ results = []
208
  for t in tasks:
209
  if t.status == TaskStatus.COMPLETED:
210
+ results.append(f"✓ {t.description}: {str(t.result)[:200]}")
 
211
  else:
212
+ results.append(f"✗ {t.description}: {t.error}")
213
 
214
+ prompt = f"""
215
+ Provide a helpful summary for the user.
216
 
217
  REQUEST:
218
+ {original}
219
 
220
  RESULTS:
221
+ {chr(10).join(results)}
222
 
223
+ Write a clear, friendly answer.
 
 
 
 
224
  """
225
 
226
+ answer = await get_llm_response(prompt, temperature=0.4)
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
+ self.thoughts.append(AgentThought(
229
+ step=len(self.thoughts)+1,
230
+ type="answer",
231
+ content=answer
232
+ ))
233
 
234
+ self.memory.add_memory(
235
+ content=f"Request: {original}\nAnswer: {answer}",
236
+ metadata={"timestamp": time.time()}
237
+ )
238
 
239
+ return answer
 
 
 
 
240
 
241
+ # -----------------------
242
+ # MAIN EXECUTION (FULLY FIXED)
243
+ # -----------------------
244
 
245
+ async def execute(self, user_request: str, files: List[str] = None):
 
 
 
 
246
  """
247
+ A simple coroutine returning final_answer, thoughts
248
+ No yieldsNo async generator → No syntax errors
249
  """
250
 
251
+ self.reset()
 
 
 
 
 
 
252
 
253
+ tasks = await self.plan(user_request, files)
254
  if not tasks:
255
+ # return is allowed now
256
+ return "Could not generate plan. Try rephrasing.", self.thoughts
257
+
 
 
 
 
 
 
 
 
 
258
  executed = []
259
  for t in tasks:
260
+ executed.append(await self.execute_task(t))
261
+
262
+ final_answer = await self.reflect(executed, user_request)
 
 
 
 
 
 
 
 
 
263
  return final_answer, self.thoughts
264
 
265
+ # -----------------------
266
+ # EXPORT THOUGHTS
267
+ # -----------------------
268
 
269
+ def get_thought_trace(self):
270
  return [asdict(t) for t in self.thoughts]