Skip to content

Commit 2e7217c

Browse files
committed
Polishing loggings conventions
1 parent d6d243c commit 2e7217c

File tree

3 files changed

+31
-25
lines changed

3 files changed

+31
-25
lines changed

src/parlant/core/engines/alpha/guideline_proposer.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -222,14 +222,19 @@ async def _process_guideline_batch(
222222
with self._logger.operation(
223223
f"[GuidelineProposer] Guideline evaluation batch ({len(guidelines_dict)} guidelines)"
224224
):
225-
propositions_generation_response = await self._schematic_generator.generate(
225+
self._logger.debug(f"[GuidelineProposer][Prompt] {prompt}")
226+
227+
inference = await self._schematic_generator.generate(
226228
prompt=prompt,
227229
hints={"temperature": 0.3},
228230
)
229231

232+
log_message = json.dumps(inference.content.model_dump(mode="json"), indent=2)
233+
self._logger.debug(f"[GuidelineProposer][Completion] {log_message}")
234+
230235
propositions = []
231236

232-
for proposition in propositions_generation_response.content.checks:
237+
for proposition in inference.content.checks:
233238
guideline = guidelines_dict[int(proposition.guideline_number)]
234239

235240
self._logger.debug(
@@ -258,7 +263,7 @@ async def _process_guideline_batch(
258263
)
259264
)
260265

261-
return propositions_generation_response.info, propositions
266+
return inference.info, propositions
262267

263268
async def shots(self) -> Sequence[GuidelinePropositionShot]:
264269
return await shot_collection.list()

src/parlant/core/engines/alpha/message_event_generator.py

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,7 @@ async def generate_events(
138138
return []
139139

140140
self._logger.debug(
141-
<<<<<<< HEAD
142-
f"""Guidelines applied: {
141+
f"""[MessageEventGenerator] Guidelines applied: {
143142
json.dumps(
144143
[
145144
{
@@ -156,14 +155,6 @@ async def generate_events(
156155
indent=2,
157156
)
158157
}"""
159-
=======
160-
f"""[MessageEventGenerator] Guidelines applied: {json.dumps([{
161-
"condition": p.guideline.content.condition,
162-
"action": p.guideline.content.action,
163-
"rationale": p.rationale,
164-
"score": p.score}
165-
for p in chain(ordinary_guideline_propositions, tool_enabled_guideline_propositions.keys())], indent=2)}"""
166-
>>>>>>> a3660ffc (Add [MessageEventGenerator] to logs in messge_event_generator)
167158
)
168159

169160
prompt = self._format_prompt(
@@ -178,8 +169,6 @@ async def generate_events(
178169
shots=await self.shots(),
179170
)
180171

181-
self._logger.debug(f"[MessageEventGenerator][Prompt]:\n{prompt}")
182-
183172
last_known_event_offset = interaction_history[-1].offset if interaction_history else -1
184173

185174
await event_emitter.emit_status_event(
@@ -199,6 +188,8 @@ async def generate_events(
199188

200189
last_generation_exception: Exception | None = None
201190

191+
self._logger.debug(f"[MessageEventGenerator][Prompt] \n{prompt}")
192+
202193
for generation_attempt in range(3):
203194
try:
204195
generation_info, response_message = await self._generate_response_message(
@@ -209,7 +200,9 @@ async def generate_events(
209200
)
210201

211202
if response_message is not None:
212-
self._logger.debug(f'[MessageEventGenerator][Result]: "{response_message}"')
203+
self._logger.debug(
204+
f'[MessageEventGenerator][GeneratedMessage] "{response_message}"'
205+
)
213206

214207
event = await event_emitter.emit_message_event(
215208
correlation_id=self._correlator.correlation_id,
@@ -530,6 +523,9 @@ async def _generate_response_message(
530523
hints={"temperature": temperature},
531524
)
532525

526+
log_message = json.dumps(message_event_response.content.model_dump(mode="json"), indent=2)
527+
self._logger.debug(f"[MessageEventGenerator][Completion] \n{log_message}")
528+
533529
if not message_event_response.content.produced_reply:
534530
self._logger.debug(
535531
f"[MessageEventGenerator] produced no reply: {message_event_response}"
@@ -538,12 +534,12 @@ async def _generate_response_message(
538534

539535
if message_event_response.content.evaluation_for_each_instruction:
540536
self._logger.debug(
541-
"[MessageEventGenerator][Evaluations]: "
537+
"[MessageEventGenerator][Evaluations] "
542538
f"{json.dumps([e.model_dump(mode='json') for e in message_event_response.content.evaluation_for_each_instruction], indent=2)}"
543539
)
544540

545541
self._logger.debug(
546-
"[MessageEventGenerator][Revisions]: "
542+
"[MessageEventGenerator][Revisions] "
547543
f"{json.dumps([r.model_dump(mode='json') for r in message_event_response.content.revisions], indent=2)}"
548544
)
549545

src/parlant/core/engines/alpha/tool_caller.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -548,16 +548,15 @@ async def _run_inference(
548548
self,
549549
prompt: str,
550550
) -> tuple[GenerationInfo, Sequence[ToolCallEvaluation]]:
551-
self._logger.debug(f"[ToolCaller][Prompt]: {prompt}")
551+
self._logger.debug(f"[ToolCaller][Prompt] \n{prompt}")
552552

553553
inference = await self._schematic_generator.generate(
554554
prompt=prompt,
555555
hints={"temperature": 0.0},
556556
)
557557

558558
log_message = json.dumps(inference.content.model_dump(mode="json"), indent=2)
559-
560-
self._logger.debug(f"[ToolCaller][RequestResults]: {log_message}")
559+
self._logger.debug(f"[ToolCaller][Completion] \n{log_message}")
561560

562561
return inference.info, inference.content.tool_calls_for_candidate_tool
563562

@@ -569,7 +568,12 @@ async def _run_tool(
569568
) -> ToolCallResult:
570569
try:
571570
self._logger.debug(
572-
f"[ToolCaller] Tool call executing: {tool_call.tool_id.to_string()}/{tool_call.id}"
571+
f"[ToolCaller][ToolCall] {tool_call.tool_id.to_string()}/{tool_call.id}, "
572+
+ (
573+
f"arguments=\n{json.dumps(tool_call.arguments, indent=2)}"
574+
if tool_call.arguments
575+
else ""
576+
)
573577
)
574578
service = await self._service_registry.read_tool_service(tool_id.service_name)
575579
result = await service.call_tool(
@@ -578,7 +582,7 @@ async def _run_tool(
578582
tool_call.arguments,
579583
)
580584
self._logger.debug(
581-
f"[ToolCaller][ToolResult]: {tool_call.tool_id.to_string()}/{tool_call.id}: {json.dumps(asdict(result), indent=2)}"
585+
f"[ToolCaller][ToolResult] {tool_call.tool_id.to_string()}/{tool_call.id}\n{json.dumps(asdict(result), indent=2)}"
582586
)
583587

584588
return ToolCallResult(
@@ -592,8 +596,9 @@ async def _run_tool(
592596
)
593597
except Exception as e:
594598
self._logger.error(
595-
f"[ToolCaller] Tool execution error (tool='{tool_call.tool_id.to_string()}', "
596-
f"arguments={tool_call.arguments}): " + "\n".join(traceback.format_exception(e)),
599+
f"[ToolCaller][ExecutionError] (tool='{tool_call.tool_id.to_string()}', "
600+
f"arguments=\n{json.dumps(tool_call.arguments, indent=2)}"
601+
+ "\n".join(traceback.format_exception(e)),
597602
)
598603

599604
return ToolCallResult(

0 commit comments

Comments
 (0)