-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
0921a42
commit a24d621
Showing
1 changed file
with
195 additions
and
148 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,202 +1,249 @@ | ||
// @ts-nocheck | ||
import { LettaEnvironment } from "../src/environments"; | ||
import { LettaClient } from "../src/Client"; | ||
import { AgentState, LettaUsageStatistics, UserMessageOutput } from "../src/api"; | ||
import { LettaStreamingResponse, MessagesListResponseItem } from "../src/api/resources/agents"; | ||
|
||
const client = new LettaClient({ | ||
environment: LettaEnvironment.LettaCloud, | ||
token: process.env.LETTA_API_KEY ?? "", | ||
}); | ||
|
||
const globalAgentTracker = { | ||
ids: new Set<string>(), | ||
}; | ||
|
||
async function createAndVerifyAgent(createOptions: Parameters<typeof client.agents.create>[0]): Promise<AgentState> { | ||
const agent = await client.agents.create(createOptions); | ||
|
||
expect(agent).toBeDefined(); | ||
globalAgentTracker.ids.add(agent.id); | ||
|
||
const agents = await client.agents.list(); | ||
expect(agents.some((a: AgentState) => a.id === agent.id)).toBe(true); | ||
|
||
return agent; | ||
} | ||
import { | ||
AgentState, | ||
SystemMessage, | ||
UserMessage, | ||
ToolCallMessage, | ||
ToolReturnMessage, | ||
AssistantMessage, | ||
} from "../src/api"; | ||
|
||
describe("Letta Client", () => { | ||
it("should create multiple agent with shared memory", async () => { | ||
// Initialize client (to run locally, override using LettaEnvironment.SelfHosted) | ||
const client = new LettaClient({ | ||
environment: LettaEnvironment.SelfHosted, | ||
token: process.env.LETTA_API_KEY ?? "", | ||
}); | ||
|
||
async function cleanupAllAgents() { | ||
for (const agentId of globalAgentTracker.ids) { | ||
try { | ||
await client.agents.delete(agentId); | ||
} catch (error) { | ||
console.error(`Failed to delete agent ${agentId}:`, error); | ||
} | ||
} | ||
// Create shared memory block | ||
let block = await client.blocks.create({ | ||
value: "name: caren", | ||
label: "human", | ||
}); | ||
|
||
globalAgentTracker.ids.clear(); | ||
} | ||
// Create agents and attach block | ||
const agent1 = await client.agents.create({ | ||
model: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
client.agents.coreMemory.attachBlock(agent1.id, block.id!); | ||
|
||
afterAll(async () => { | ||
await cleanupAllAgents(); | ||
}); | ||
const agent2 = await client.agents.create({ | ||
model: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
client.agents.coreMemory.attachBlock(agent2.id, block.id!); | ||
|
||
describe.skip("Create agent", () => { | ||
it("should create an agent from default parameters", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
memoryBlocks: [ | ||
await client.agents.messages.create(agent1.id, { | ||
messages: [ | ||
{ | ||
value: "username: caren", | ||
label: "human", | ||
role: "user", | ||
content: "Actually, my name is Sarah.", | ||
}, | ||
], | ||
llmConfig: { | ||
model: "gpt-4", | ||
modelEndpointType: "openai", | ||
modelEndpoint: "https://api.openai.com/v1", | ||
contextWindow: 8192, | ||
}, | ||
embeddingConfig: { | ||
embeddingModel: "text-embedding-ada-002", | ||
embeddingEndpointType: "openai", | ||
embeddingEndpoint: "https://api.openai.com/v1", | ||
embeddingDim: 1536, | ||
embeddingChunkSize: 300, | ||
}, | ||
}); | ||
}); | ||
|
||
it("should create an agent with handle", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
memoryBlocks: [ | ||
// Validate memory has been updated for agent2 | ||
block = await client.blocks.retrieve(block.id!); | ||
expect(block.value.toLowerCase()).toContain("sarah"); | ||
|
||
block = await client.agents.coreMemory.retrieveBlock(agent2.id, "human"); | ||
expect(block.value.toLowerCase()).toContain("sarah"); | ||
|
||
// Ask agent to confirm memory update | ||
const response = await client.agents.messages.create(agent2.id, { | ||
messages: [ | ||
{ | ||
value: "username: caren", | ||
label: "human", | ||
role: "user", | ||
content: "What's my name?", | ||
}, | ||
], | ||
llm: "openai/gpt-4", | ||
}); | ||
|
||
// Validate send message response contains new name | ||
expect(((response.messages[0] as AssistantMessage).content as string).toLowerCase()).toContain("sarah"); | ||
|
||
// Delete agents | ||
await client.agents.delete(agent1.id); | ||
await client.agents.delete(agent2.id); | ||
}, 100000); | ||
|
||
it("create agent with custom tool", async () => { | ||
// Initialize client (to run locally, override using LettaEnvironment.SelfHosted) | ||
const client = new LettaClient({ | ||
environment: LettaEnvironment.SelfHosted, | ||
token: process.env.LETTA_API_KEY ?? "", | ||
}); | ||
|
||
const agent = await client.agents.create({ | ||
model: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
}); | ||
|
||
it("should create an agent from template", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
memoryBlocks: [ | ||
{ | ||
value: "username: caren", | ||
label: "human", | ||
}, | ||
], | ||
fromTemplate: "fern-testing:latest", | ||
const custom_tool_source_code = ` | ||
def custom_tool(): | ||
"""Return a greeting message.""" | ||
return "Hello world!" | ||
`.trim(); | ||
|
||
const tool = await client.tools.create({ | ||
sourceCode: custom_tool_source_code, | ||
}); | ||
}); | ||
}); | ||
|
||
describe.skip("Delete agent", () => { | ||
it("should delete an agent successfully", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
memoryBlocks: [ | ||
await client.agents.tools.attach(agent.id, tool.id!); | ||
|
||
const response = await client.agents.messages.create(agent.id, { | ||
messages: [ | ||
{ | ||
value: "username: caren", | ||
label: "human", | ||
role: "user", | ||
content: "Run custom tool and tell me what it returns", | ||
}, | ||
], | ||
llm: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
|
||
await client.agents.delete(agent.id); | ||
globalAgentTracker.ids.delete(agent.id); | ||
// Validate send message response contains expected return value | ||
expect(response.messages).toHaveLength(1); | ||
expect(((response.messages[0] as AssistantMessage).content as string).toLowerCase()).toContain("hello world"); | ||
}, 100000); | ||
|
||
const agents = await client.agents.list(); | ||
expect(agents.some((a: AgentState) => a.id === agent.id)).toBe(false); | ||
}); | ||
}); | ||
it("should create single agent and send messages", async () => { | ||
// Initialize client (to run locally, override using LettaEnvironment.SelfHosted) | ||
const client = new LettaClient({ | ||
environment: LettaEnvironment.LettaCloud, | ||
token: process.env.LETTA_API_KEY ?? "", | ||
}); | ||
|
||
describe.skip("Send message", () => { | ||
it("Should send a message", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
// Create agent with basic memory block | ||
const agent = await client.agents.create({ | ||
memoryBlocks: [ | ||
{ | ||
value: "username: caren", | ||
value: "name: caren", | ||
label: "human", | ||
}, | ||
], | ||
llm: "openai/gpt-4", | ||
model: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
const messageText = "Hello, how are you today?"; | ||
|
||
// Validate agent persistence | ||
let agents = await client.agents.list(); | ||
expect(agents.some((a: AgentState) => a.id === agent.id)).toBe(true); | ||
let messages = await client.agents.messages.list(agent.id); | ||
expect(messages.length).toBeGreaterThan(0); | ||
|
||
// Send greeting message | ||
let messageText = "Hello, how are you today?"; | ||
const response = await client.agents.messages.create(agent.id, { | ||
messages: [ | ||
{ | ||
role: "user", | ||
text: messageText, | ||
content: messageText, | ||
}, | ||
], | ||
}); | ||
|
||
expect(response.messages).toHaveLength(3); | ||
// Validate send message response contains single assistant message | ||
expect(response.usage.stepCount).toEqual(1); | ||
expect(response.messages.map((message) => (message as { messageType?: string }).messageType)).toEqual([ | ||
"reasoning_message", | ||
"tool_call_message", | ||
"tool_return_message", | ||
]); | ||
expect(response.messages).toHaveLength(1); | ||
expect(response.messages[0]).toHaveProperty("messageType", "assistant_message"); | ||
|
||
const messages = await client.agents.messages.list(agent.id); | ||
expect(messages.length).toBeGreaterThan(0); | ||
const lastUserMessage = [...messages] | ||
.reverse() | ||
.find( | ||
(message) => (message as MessagesListResponseItem).messageType === "user_message" | ||
) as UserMessageOutput; | ||
expect(lastUserMessage).toBeDefined(); | ||
expect(lastUserMessage?.message).toContain(messageText); | ||
}, 10000); | ||
|
||
it("Should send a streaming message", async () => { | ||
const agent = await createAndVerifyAgent({ | ||
memoryBlocks: [ | ||
{ | ||
value: "username: caren", | ||
label: "human", | ||
}, | ||
], | ||
llm: "openai/gpt-4", | ||
embedding: "openai/text-embedding-ada-002", | ||
}); | ||
const messageText = "Hello, how are you today?"; | ||
const response = await client.agents.messages.stream(agent.id, { | ||
// Validate message history | ||
let cursor = messages[messages.length - 1].id; | ||
messages = await client.agents.messages.list(agent.id, { after: cursor }); | ||
expect(messages).toHaveLength(3); | ||
|
||
// 1. User message that was just sent | ||
expect(messages[0]).toHaveProperty("messageType", "user_message"); | ||
expect((messages[0] as UserMessage).content).toContain(messageText); | ||
|
||
// 2. Tool call for sending the assistant message back | ||
expect(messages[1]).toHaveProperty("messageType", "tool_call_message"); | ||
expect((messages[1] as ToolCallMessage).toolCall.name).toEqual("send_message"); | ||
|
||
// 3. Tool return message that contains success/failure of tool call | ||
expect(messages[2]).toHaveProperty("messageType", "tool_return_message"); | ||
expect((messages[2] as ToolReturnMessage).status).toEqual("success"); | ||
|
||
// Send message with streaming | ||
messageText = "Actually, my name is Sarah."; | ||
const streamResponse = await client.agents.messages.createStream(agent.id, { | ||
messages: [ | ||
{ | ||
role: "user", | ||
text: messageText, | ||
content: messageText, | ||
}, | ||
], | ||
}); | ||
|
||
const responses: LettaStreamingResponse[] = []; | ||
for await (const chunk of response) { | ||
responses.push(chunk); | ||
// Validate streaming response | ||
for await (const chunk of streamResponse) { | ||
switch (chunk.messageType) { | ||
// 1. Reasoning message with the agent's internal monologue | ||
case "reasoning_message": | ||
expect(chunk.reasoning.toLowerCase()).toContain("sarah"); | ||
break; | ||
|
||
// 2. Tool call to update core memory content | ||
case "tool_call_message": | ||
expect(chunk.toolCall.name).toEqual("core_memory_replace"); | ||
break; | ||
|
||
// 3. Tool return message that contains success/failure of tool call | ||
case "tool_return_message": | ||
expect(chunk.status).toEqual("success"); | ||
break; | ||
|
||
// 4. Assistant message that gets sent back as a reply to the original user message | ||
case "assistant_message": | ||
expect((chunk.content as string).toLowerCase()).toContain("sarah"); | ||
break; | ||
|
||
// 5. Usage statistics message for the interaction capturing token and step count | ||
case "usage_statistics": | ||
expect(chunk.stepCount).toEqual(2); | ||
break; | ||
|
||
default: | ||
throw new Error(`Unexpected message type: ${chunk.messageType}`); | ||
} | ||
} | ||
|
||
expect(responses).toHaveLength(4); | ||
expect((responses.pop() as LettaUsageStatistics).stepCount).toEqual(1); | ||
expect(responses.map((message) => message.messageType)).toEqual([ | ||
"reasoning_message", | ||
"tool_call_message", | ||
"tool_return_message", | ||
]); | ||
// Validate message history | ||
cursor = messages[messages.length - 1].id; | ||
messages = await client.agents.messages.list(agent.id, { after: cursor }); | ||
expect(messages).toHaveLength(7); | ||
|
||
const messages = await client.agents.messages.list(agent.id); | ||
expect(messages.length).toBeGreaterThan(0); | ||
const lastUserMessage = [...messages] | ||
.reverse() | ||
.find( | ||
(message) => (message as MessagesListResponseItem).messageType === "user_message" | ||
) as UserMessageOutput; | ||
expect(lastUserMessage).toBeDefined(); | ||
expect(lastUserMessage?.message).toContain(messageText); | ||
}, 10000); | ||
}); | ||
// 1. User message that was just sent | ||
expect(messages[0]).toHaveProperty("messageType", "user_message"); | ||
expect((messages[0] as UserMessage).content).toContain(messageText); | ||
|
||
// 2. Tool call to update core memory content and send system message with update | ||
expect(messages[1]).toHaveProperty("messageType", "tool_call_message"); | ||
expect((messages[1] as ToolCallMessage).toolCall.name).toEqual("core_memory_replace"); | ||
|
||
// 3. System message with core memory update | ||
expect(messages[2]).toHaveProperty("messageType", "system_message"); | ||
expect(((messages[2] as SystemMessage).content as string).toLowerCase()).toContain("name: sarah"); | ||
|
||
// 4. Tool return message that contains success/failure of tool call | ||
expect(messages[3]).toHaveProperty("messageType", "tool_return_message"); | ||
expect((messages[3] as ToolReturnMessage).status).toEqual("success"); | ||
|
||
// 5. Tool call for sending the assistant message back | ||
expect(messages[4]).toHaveProperty("messageType", "user_message"); | ||
expect((messages[4] as UserMessage).content).toContain("heartbeat"); | ||
|
||
// 6. Tool return message that contains success/failure of tool call | ||
expect(messages[5]).toHaveProperty("messageType", "tool_call_message"); | ||
expect((messages[5] as ToolCallMessage).toolCall.name).toEqual("send_message"); | ||
|
||
// 7. Tool return message that contains success/failure of tool call | ||
expect(messages[6]).toHaveProperty("messageType", "tool_return_message"); | ||
expect((messages[6] as ToolReturnMessage).status).toEqual("success"); | ||
|
||
// Delete agent | ||
await client.agents.delete(agent.id); | ||
}, 100000); | ||
}); |