Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DX-1510: Agents #51

Merged
merged 16 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified bun.lockb
Binary file not shown.
7 changes: 0 additions & 7 deletions examples/nextjs/app/sleep/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,4 @@ export const { POST } = serve<string>(async (context) => {
console.log('step 2 input', result1, 'output', output)
return output
})

await context.sleep('sleep2', 2)

await context.run('step3', async () => {
const output = someWork(result2)
console.log('step 3 input', result2, 'output', output)
})
})
2 changes: 1 addition & 1 deletion examples/nextjs/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,4 @@
"tailwindcss": "^3.4.1",
"typescript": "^5.6.2"
}
}
}
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,10 @@
"typescript-eslint": "^8.18.0"
},
"dependencies": {
"@upstash/qstash": "^2.7.20"
"@ai-sdk/openai": "^1.0.15",
"@upstash/qstash": "^2.7.20",
"ai": "^4.0.30",
"zod": "^3.24.1"
},
"directories": {
"example": "examples"
Expand Down
138 changes: 138 additions & 0 deletions src/agents/adapters.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/**
* this file contains adapters which convert tools and models
* to workflow tools and models.
*/
import { createOpenAI } from "@ai-sdk/openai";
import { HTTPMethods } from "@upstash/qstash";
import { WorkflowContext } from "../context";
import { tool } from "ai";
import { AISDKTool, LangchainTool } from "./types";

export type ToolParams = Parameters<typeof tool>[0];

/**
* header we pass to generateText to designate the agent name
*
* this allows us to access the agent name when naming the context.call step,
* inside fetch implementation
*/
export const AGENT_NAME_HEADER = "upstash-agent-name";

/**
* creates an AI SDK openai client with a custom
* fetch implementation which uses context.call.
*
* @param context workflow context
* @returns ai sdk openai
*/
export const createWorkflowOpenAI = (context: WorkflowContext) => {
return createOpenAI({
compatibility: "strict",
fetch: async (input, init) => {
try {
// Prepare headers from init.headers
const headers = init?.headers
? Object.fromEntries(new Headers(init.headers).entries())
: {};

// Prepare body from init.body
const body = init?.body ? JSON.parse(init.body as string) : undefined;

// create step name
const agentName = headers[AGENT_NAME_HEADER] as string | undefined;
const stepName = agentName ? `Call Agent ${agentName}` : "Call Agent";

// Make network call
const responseInfo = await context.call(stepName, {
url: input.toString(),
method: init?.method as HTTPMethods,
headers,
body,
});

// Construct headers for the response
const responseHeaders = new Headers(
Object.entries(responseInfo.header).reduce(
(acc, [key, values]) => {
acc[key] = values.join(", ");
fahreddinozcan marked this conversation as resolved.
Show resolved Hide resolved
return acc;
},
{} as Record<string, string>
)
);

// Return the constructed response
return new Response(JSON.stringify(responseInfo.body), {
status: responseInfo.status,
headers: responseHeaders,
});
} catch (error) {
if (error instanceof Error && error.name === "WorkflowAbort") {
throw error;
} else {
console.error("Error in fetch implementation:", error);
throw error; // Rethrow error for further handling
}
}
},
});
};

/**
* converts LangChain tools to AI SDK tools and updates
* the execute method of these tools by wrapping it with
* context.run.
*
* @param context workflow context
* @param tools map of AI SDK or LangChain tools and their names
* @returns
*/
export const wrapTools = ({
context,
tools,
}: {
context: WorkflowContext;
tools: Record<string, AISDKTool | LangchainTool>;
}): Record<string, AISDKTool> => {
return Object.fromEntries(
Object.entries(tools).map((toolInfo) => {
const [toolName, tool] = toolInfo;
const aiSDKTool: AISDKTool = convertToAISDKTool(tool);

const execute = aiSDKTool.execute;
if (execute) {
fahreddinozcan marked this conversation as resolved.
Show resolved Hide resolved
const wrappedExecute = (...params: Parameters<typeof execute>) => {
return context.run(`Run tool ${toolName}`, () => execute(...params));
};
aiSDKTool.execute = wrappedExecute;
}

return [toolName, aiSDKTool];
})
);
};

/**
* Converts tools to AI SDK tool if it already isn't
*
* @param tool LangChain or AI SDK Tool
* @returns AI SDK Tool
*/
const convertToAISDKTool = (tool: AISDKTool | LangchainTool): AISDKTool => {
const isLangchainTool = "invoke" in tool;
return isLangchainTool ? convertLangchainTool(tool as LangchainTool) : (tool as AISDKTool);
};

/**
* converts a langchain tool to AI SDK tool
*
* @param langchainTool
* @returns AI SDK Tool
*/
const convertLangchainTool = (langchainTool: LangchainTool): AISDKTool => {
return tool({
description: langchainTool.description,
parameters: langchainTool.schema,
execute: async (...param: unknown[]) => langchainTool.invoke(...param),
});
};
153 changes: 153 additions & 0 deletions src/agents/agent.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import { z } from "zod";
import { AGENT_NAME_HEADER } from "./adapters";

import { generateText, tool, ToolExecutionError } from "ai";
import { AgentParameters, AISDKTool, Model } from "./types";

/**
* An Agent which utilizes the model and tools available to it
* to achieve a given task
*
* @param name Name of the agent
* @param background Background of the agent
* @param model LLM model to use
* @param tools tools available to the agent
* @param maxSteps number of times the agent can call the LLM at most. If
* the agent abruptly stops execution after calling tools, you may need
* to increase maxSteps
* @param temparature temparature used when calling the LLM
*/
export class Agent {
public readonly name: AgentParameters["name"];
public readonly tools: AgentParameters["tools"];
public readonly maxSteps: AgentParameters["maxSteps"];
public readonly background: AgentParameters["background"];
public readonly model: AgentParameters["model"];
public readonly temparature: AgentParameters["temparature"];

constructor({ tools, maxSteps, background, name, model, temparature = 0.1 }: AgentParameters) {
this.name = name;
this.tools = tools ?? {};
this.maxSteps = maxSteps;
this.background = background;
this.model = model;
this.temparature = temparature;
}

/**
* Trigger the agent by passing a prompt
*
* @param prompt task to assign to the agent
* @returns Response as `{ text: string }`
*/
public async call({ prompt }: { prompt: string }) {
try {
const result = await generateText({
model: this.model,
tools: this.tools,
maxSteps: this.maxSteps,
system: this.background,
prompt,
headers: {
[AGENT_NAME_HEADER]: this.name,
},
temperature: this.temparature,
});
return { text: result.text };
} catch (error) {
if (error instanceof ToolExecutionError) {
if (error.cause instanceof Error && error.cause.name === "WorkflowAbort") {
throw error.cause;
} else if (
error.cause instanceof ToolExecutionError &&
error.cause.cause instanceof Error &&
error.cause.cause.name === "WorkflowAbort"
) {
throw error.cause.cause;
} else {
throw error;
}
} else {
throw error;
}
}
}

/**
* Convert the agent to a tool which can be used by other agents.
*
* @returns the agent as a tool
*/
public asTool(): AISDKTool {
const toolDescriptions = Object.values(this.tools)
// @ts-expect-error description exists but can't be resolved
fahreddinozcan marked this conversation as resolved.
Show resolved Hide resolved
.map((tool) => tool.description)
.join("\n");
return tool({
parameters: z.object({ prompt: z.string() }),
execute: async ({ prompt }) => {
return await this.call({ prompt });
},
description:
`An AI Agent with the following background: ${this.background}` +
`Has access to the following tools: ${toolDescriptions}`,
});
}
}

type ManagerAgentParameters = {
fahreddinozcan marked this conversation as resolved.
Show resolved Hide resolved
/**
* agents which will coordinate to achieve a given task
*/
agents: Agent[];
/**
* model to use when coordinating the agents
*/
model: Model;
} & Pick<Partial<AgentParameters>, "name" | "background"> &
Pick<AgentParameters, "maxSteps">;

const MANAGER_AGENT_PROMPT = `You are an agent orchestrating other AI Agents.
fahreddinozcan marked this conversation as resolved.
Show resolved Hide resolved

These other agents have tools available to them.

Given a prompt, utilize these agents to address requests.

Don't always call all the agents provided to you at the same time. You can call one and use it's response to call another.

Avoid calling the same agent twice in one turn. Instead, prefer to call it once but provide everything
you need from that agent.
`;

export class ManagerAgent extends Agent {
public agents: ManagerAgentParameters["agents"];

/**
* A manager agent which coordinates agents available to it to achieve a
* given task
*
* @param name Name of the agent
* @param background Background of the agent. If not passed, default will be used.
* @param model LLM model to use
* @param agents: List of agents available to the agent
* @param maxSteps number of times the manager agent can call the LLM at most.
* If the agent abruptly stops execution after calling other agents, you may
* need to increase maxSteps
*/
constructor({
agents,
background = MANAGER_AGENT_PROMPT,
model,
maxSteps,
name = "manager llm",
}: ManagerAgentParameters) {
super({
background,
maxSteps,
tools: Object.fromEntries(agents.map((agent) => [agent.name, agent.asTool()])),
name,
model,
});
this.agents = agents;
}
}
Loading
Loading