ui enhancements, model tuning

This commit is contained in:
Steve Androulakis
2025-01-03 09:25:50 -08:00
parent 193dad0d77
commit dde076f3ed
6 changed files with 28 additions and 25 deletions

View File

@@ -19,6 +19,7 @@ export default function ChatWindow({ conversation, loading, onConfirm }) {
} }
const filtered = conversation.filter((msg) => { const filtered = conversation.filter((msg) => {
console.log(conversation[conversation.length - 1].actor)
const { actor, response } = msg; const { actor, response } = msg;
if (actor === "user") { if (actor === "user") {
@@ -61,6 +62,11 @@ export default function ChatWindow({ conversation, loading, onConfirm }) {
<LoadingIndicator /> <LoadingIndicator />
</div> </div>
)} )}
{conversation.length > 0 && conversation[conversation.length - 1].actor === "tool_result_to_llm" && (
<div className="flex justify-center">
<LoadingIndicator />
</div>
)}
</div> </div>
); );
} }

View File

@@ -4,8 +4,6 @@ import LoadingIndicator from "./LoadingIndicator";
export default function ConfirmInline({ data, confirmed, onConfirm }) { export default function ConfirmInline({ data, confirmed, onConfirm }) {
const { args, tool } = data || {}; const { args, tool } = data || {};
console.log("ConfirmInline rendered with confirmed:", confirmed);
if (confirmed) { if (confirmed) {
// Once confirmed, show "Running..." state in the same container // Once confirmed, show "Running..." state in the same container
return ( return (

View File

@@ -2,7 +2,7 @@ import React from "react";
export default function LoadingIndicator() { export default function LoadingIndicator() {
return ( return (
<div className="flex items-center justify-center space-x-2"> <div className="flex items-center justify-center space-x-2 pb-4">
<div className="w-2 h-2 rounded-full bg-blue-600 animate-ping"></div> <div className="w-2 h-2 rounded-full bg-blue-600 animate-ping"></div>
<div className="w-2 h-2 rounded-full bg-blue-600 animate-ping delay-100"></div> <div className="w-2 h-2 rounded-full bg-blue-600 animate-ping delay-100"></div>
<div className="w-2 h-2 rounded-full bg-blue-600 animate-ping delay-200"></div> <div className="w-2 h-2 rounded-full bg-blue-600 animate-ping delay-200"></div>

View File

@@ -60,7 +60,7 @@ export default function App() {
try { try {
await fetch("http://127.0.0.1:8000/end-chat", { method: "POST" }); await fetch("http://127.0.0.1:8000/end-chat", { method: "POST" });
// sleep for a bit to allow the server to process the end-chat request // sleep for a bit to allow the server to process the end-chat request
await new Promise((resolve) => setTimeout(resolve, 4000)); // todo make less dodgy await new Promise((resolve) => setTimeout(resolve, 1000)); // todo make less dodgy
await fetch( await fetch(
`http://127.0.0.1:8000/send-prompt?prompt=${encodeURIComponent("I'd like to travel to an event.")}`, `http://127.0.0.1:8000/send-prompt?prompt=${encodeURIComponent("I'd like to travel to an event.")}`,
{ method: "POST" } { method: "POST" }
@@ -71,6 +71,12 @@ export default function App() {
} }
}; };
const handleKeyPress = (e) => {
if (e.key === "Enter") {
handleSendMessage();
}
};
return ( return (
<div className="flex flex-col min-h-screen"> <div className="flex flex-col min-h-screen">
<NavBar title="Temporal AI Agent" /> <NavBar title="Temporal AI Agent" />
@@ -91,6 +97,7 @@ export default function App() {
placeholder="Type your message..." placeholder="Type your message..."
value={userInput} value={userInput}
onChange={(e) => setUserInput(e.target.value)} onChange={(e) => setUserInput(e.target.value)}
onKeyPress={handleKeyPress}
/> />
<button <button
onClick={handleSendMessage} onClick={handleSendMessage}

View File

@@ -73,8 +73,10 @@ def generate_genai_prompt(
prompt_lines.append( prompt_lines.append(
"6. Use 'next': 'question' if you lack any required arguments based on the history and prompt. " "6. Use 'next': 'question' if you lack any required arguments based on the history and prompt. "
"Use 'next': 'confirm' only if NO arguments are missing. " "Use 'next': 'confirm' only if NO arguments are missing. "
"Use 'next': 'done' and tool: 'null' if you have successfully completed all tools." "Use 'next': 'done' and tool: 'null' if you have successfully completed all tools "
"Don't offer any additional tools beyond the tools listed. " "Don't offer any additional tools beyond the tools listed. "
"DON'T editorialize or add extra information to a 'done' response. "
"Example done response: {'response': 'All tools completed.', 'next': 'done', 'tool': 'null', 'args': {}} "
) )
prompt_lines.append( prompt_lines.append(
"7. Keep 'response' user-friendly with no extra commentary. Stick to valid JSON syntax. " "7. Keep 'response' user-friendly with no extra commentary. Stick to valid JSON syntax. "

View File

@@ -1,6 +1,6 @@
from collections import deque from collections import deque
from datetime import timedelta from datetime import timedelta
from typing import Dict, Any, Union, List, Optional, Tuple, Deque from typing import Dict, Any, Union, List, Optional, Deque
from temporalio.common import RetryPolicy from temporalio.common import RetryPolicy
from temporalio import workflow from temporalio import workflow
@@ -25,6 +25,7 @@ class ToolWorkflow:
self.tool_data = None self.tool_data = None
self.max_turns_before_continue: int = 250 self.max_turns_before_continue: int = 250
self.confirm = False self.confirm = False
self.tool_results: List[Dict[str, Any]] = []
@workflow.run @workflow.run
async def run(self, combined_input: CombinedInput) -> str: async def run(self, combined_input: CombinedInput) -> str:
@@ -50,22 +51,9 @@ class ToolWorkflow:
# 1) If chat_ended was signaled, handle end and return # 1) If chat_ended was signaled, handle end and return
if self.chat_ended: if self.chat_ended:
# possibly do a summary if multiple turns
if len(self.conversation_history["messages"]) > 1: workflow.logger.info("Chat ended.")
summary_context, summary_prompt = self.prompt_summary_with_history() return f"{self.conversation_history}"
summary_input = ToolPromptInput(
prompt=summary_prompt, context_instructions=summary_context
)
self.conversation_summary = await workflow.start_activity_method(
ToolActivities.prompt_llm,
summary_input,
schedule_to_close_timeout=timedelta(seconds=20),
)
workflow.logger.info(
"Chat ended. Conversation summary:\n"
+ f"{self.conversation_summary}"
)
return f"{self.conversation_summary}"
# 2) If we received a confirm signal: # 2) If we received a confirm signal:
if self.confirm and waiting_for_confirm and current_tool: if self.confirm and waiting_for_confirm and current_tool:
@@ -86,13 +74,15 @@ class ToolWorkflow:
schedule_to_close_timeout=timedelta(seconds=20), schedule_to_close_timeout=timedelta(seconds=20),
) )
dynamic_result["tool"] = current_tool dynamic_result["tool"] = current_tool
self.add_message(f"tool_result", dynamic_result) self.add_message(
"tool_result", {"tool": current_tool, "result": dynamic_result}
)
# Enqueue a follow-up prompt for the LLM # Enqueue a follow-up prompt for the LLM
self.prompt_queue.append( self.prompt_queue.append(
f"### The '{current_tool}' tool completed successfully with {dynamic_result}. " f"### The '{current_tool}' tool completed successfully with {dynamic_result}. "
"INSTRUCTIONS: Use this tool result, and the conversation history to figure out next steps. " "INSTRUCTIONS: Use this tool result, and the conversation history to figure out next steps. "
"IMPORTANT: If all listed tools have run, you are up to the final step. Mark 'next':'done' and respond with your final confirmation." "IMPORTANT: If all listed tools have run, you are up to the final step. Mark 'next':'done' and respond with 'All tools run' or similar."
) )
# Loop around again # Loop around again
continue continue