90 lines
3.7 KiB
Python
90 lines
3.7 KiB
Python
import os, sys, json
|
|
import config
|
|
from llm_client import LLMClient
|
|
from tools import coder
|
|
from script import gadget
|
|
|
|
tools_definition = [
|
|
gadget.tools_mapping( coder.schema_read_file, coder.read_file ),
|
|
gadget.tools_mapping( coder.schema_write_file, coder.write_file ),
|
|
gadget.tools_mapping( coder.schema_edit_file, coder.edit_file ),
|
|
gadget.tools_mapping( coder.schema_run_bash, coder.run_bash ),
|
|
gadget.tools_mapping( coder.schema_search_code, coder.search_code ),
|
|
gadget.tools_mapping( coder.schema_git_operation, coder.git_operation ),
|
|
]
|
|
|
|
TOOLS = [t["schema"] for t in tools_definition] # Schemas
|
|
TOOL_HANDLERS = {t["name"]: t["handler"] for t in tools_definition} # Map
|
|
|
|
SYSTEM_PROMPT = """You are a coding agent that assists with software engineering tasks. You have access to the following tools:
|
|
|
|
1. read_file: Read file contents with line numbers
|
|
2. write_file: Write content to a file (overwrites existing)
|
|
3. edit_file: Replace text in a file
|
|
4. run_bash: Execute bash commands
|
|
5. search_code: Search for files (glob) or file contents (regex)
|
|
6. git_operation: Run git commands
|
|
|
|
Use tools by returning tool calls when needed. After receiving tool results, continue your reasoning. When you have the final answer, return it as plain text without tool calls."""
|
|
|
|
def agent_loop(user_query, llm_client):
|
|
messages = [
|
|
{"role": "system" , "content": SYSTEM_PROMPT },
|
|
{"role": "user" , "content": user_query }
|
|
]
|
|
for _ in range(config.AGENT_MAX_ITERATIONS):
|
|
response = llm_client.chat(messages, tools=TOOLS)
|
|
if response.tool_calls:
|
|
assistant_msg = {
|
|
"role": "assistant",
|
|
"content": response.content,
|
|
"tool_calls": response.tool_calls
|
|
}
|
|
messages.append(assistant_msg)
|
|
for tool_call in response.tool_calls:
|
|
tool_name = tool_call['function']['name']
|
|
tool_args = json.loads(tool_call['function']['arguments'])
|
|
handler = TOOL_HANDLERS.get(tool_name)
|
|
if not handler:
|
|
result = f"Tool {tool_name} not found"
|
|
else:
|
|
try:
|
|
if tool_name == "search_code":
|
|
result = handler(
|
|
pattern=tool_args["pattern"],
|
|
search_type=tool_args["search_type"],
|
|
path=tool_args.get("path", ".")
|
|
)
|
|
elif tool_name == "git_operation":
|
|
result = handler(args=tool_args["args"])
|
|
else:
|
|
result = handler(**tool_args)
|
|
except Exception as e:
|
|
result = f"Error executing tool: {str(e)}"
|
|
messages.append({
|
|
"role": "tool",
|
|
"tool_call_id": tool_call['id'],
|
|
"content": str(result)
|
|
})
|
|
else:
|
|
return response.content
|
|
return "Max iterations reached without final answer."
|
|
|
|
def main():
|
|
llm_client = LLMClient(base_url=config.LLM_BASE_URL, model=config.LLM_MODEL, api_key=config.LLM_API_KEY)
|
|
if len(sys.argv) > 1:
|
|
user_query = " ".join(sys.argv[1:])
|
|
else:
|
|
print("Enter your query (Ctrl+D to submit):")
|
|
user_query = sys.stdin.read().strip()
|
|
if not user_query:
|
|
print("No query provided.")
|
|
return
|
|
print("Thinking...")
|
|
final_answer = agent_loop(user_query, llm_client)
|
|
print("\nFinal Answer:")
|
|
print(final_answer)
|
|
|
|
if __name__ == "__main__":
|
|
main()
|