-
Notifications
You must be signed in to change notification settings - Fork 0
/
exa.py
40 lines (31 loc) · 1.51 KB
/
exa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_experimental.llms.ollama_functions import OllamaFunctions
from langchain_core.tools import tool
from langchain.prompts import ChatPromptTemplate
# model = ChatOllama(model='llama3')
model = OllamaFunctions(model='mistral', temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant, return in JSON format."),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
@tool
def multiply(first_int: int, second_int: int) -> int:
"""Multiply two integers together."""
return first_int * second_int
@tool
def add(first_int: int, second_int: int) -> int:
"Add two integers."
return first_int + second_int
@tool
def exponentiate(base: int, exponent: int) -> int:
"Exponentiate the base to the exponent power."
return base**exponent
tools = [multiply, add, exponentiate]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "Multiply 3 by 18, add the result to 30, and finally exponentiate that result to 8." +
"Remember return in JSON format. In your response, include one tool and one tool only. For process require a follow up tool use, I will reply you the tool result in the next chat."})