Agentic (@obayd/agentic) is a powerful, lightweight framework for building LLM agents in Node.js. It simplifies managing conversations, defining tools (function calling), handling streaming responses, and integrating with various Large Language Models.
- Fluent Tool Definition: Easily define tools (functions) the LLM can call using a clean, chainable API (
Tool.make()...
). - Toolpacks: Group related tools and manage their availability.
- Streaming First: Built with streaming in mind. Process LLM responses and tool events chunk by chunk using async generators.
- Flexible LLM Integration: Works with any LLM API that provides streaming responses via a simple async generator function. Includes a helper
fetchResponseToStream
for common SSE formats. - Conversation Management: Automatically handles message history, system prompts, and the flow between user messages, assistant responses, and tool calls/results.
- Dynamic System Prompts: Define system prompt content using strings, tools, toolpacks, or even functions for dynamic generation.
- Type-Safe: Includes TypeScript declaration files (
.d.ts
) for excellent autocompletion and type safety, even when used in JavaScript projects. - Zero-Dependency: No external dependencies, making it easy to integrate with your existing project.
npm install @obayd/agentic
# or
npm install obaydmerz/agentic # Will install directly from github ( Not recommended )
import { Conversation, Tool, fetchResponseToStream } from "@obayd/agentic";
// 1. Define your LLM callback (Example using fetch and a helper)
// Replace with your actual LLM API call logic
async function* llmCallback(messages, options) {
// You can use OpenRouter or OpenAI here
const response = await fetch("YOUR_LLM_API_ENDPOINT", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer YOUR_API_KEY`,
},
body: JSON.stringify({
model: "YOUR_MODEL_NAME",
messages: messages, // Pass the formatted messages without modification
stream: true, // Ensure streaming is enabled
// Add any other required parameters for your API
}),
});
// Use the provided helper to process Server-Sent Events (SSE)
yield* fetchResponseToStream(response);
}
// 2. Define Tools
const getCurrentWeather = Tool.make("get_current_weather")
.description("Gets the current weather for a specified location.")
.param("location", "The city and state, e.g., San Francisco, CA", {
required: true,
type: "string",
})
.param("unit", "Temperature unit", {
enum: ["celsius", "fahrenheit"],
required: false,
type: "string",
})
.action(async (params) => {
// Simulate API call
await new Promise((resolve) => setTimeout(resolve, 50)); // Simulate delay
const location = params.location.toLowerCase();
const unit = params.unit || "celsius";
let temperature;
if (location.includes("tokyo")) temperature = 15;
else if (location.includes("san francisco")) temperature = 12;
else temperature = 20; // Default
if (unit === "fahrenheit") {
temperature = (temperature * 9) / 5 + 32;
}
return JSON.stringify({
// Return data for the LLM
temperature: temperature,
unit: unit,
condition: "Sunny", // Keep it simple for the example
location: params.location,
});
});
// 3. Create a Conversation instance
const conversation = new Conversation(llmCallback);
// 4. Define conversation content (system prompt, tools)
conversation.content([
"You are a helpful assistant.",
getCurrentWeather, // Add the tool
]);
// 5. Send a message and process the response stream
async function runConversation() {
const userInput = "What's the weather like in Tokyo?";
console.log(`\nUSER: ${userInput}`);
console.log("\nASSISTANT:");
let fullAssistantResponse = "";
try {
const stream = conversation.send(userInput);
for await (const event of stream) {
switch (event.type) {
case "assistant":
process.stdout.write(event.content); // Stream text to console
fullAssistantResponse += event.content;
break;
case "tool.generating":
// Optionally show that the LLM is generating raw tool input
// console.log(`\n[Tool Generating Raw Chunk (${event.name})]: ${event.rawChunk}`);
break;
case "tool.calling":
process.stdout.write(
`\n[Calling Tool: ${event.name} with params: ${JSON.stringify(
event.params
)}]`
);
if (event.raw) {
process.stdout.write(` [Raw: ${event.raw}]`);
}
process.stdout.write("\n");
break;
case "tool": // Tool result is back
console.log(`\n[Tool Result (${event.name})]:`);
console.log(event.result); // Log the raw result object
// The conversation loop automatically sends this result back to the LLM
console.log("\nASSISTANT (after tool):"); // Indicate LLM will respond next
break;
case "error":
console.error(`\n[Conversation Error]: ${event.content}`);
break;
}
}
console.log("\n\n--- Conversation End ---");
// console.log("Full Assistant Response:", fullAssistantResponse);
// console.log("Final Message History:", conversation.messages);
} catch (error) {
console.error("\n[Error Running Conversation]:", error);
}
}
runConversation();
For detailed usage, guides, and API reference, please visit the Full Documentation GitBook.
Contributions are welcome! Please feel free to open an issue or submit a pull request.
- Fork the repository.
- Create your feature branch (git checkout -b feature/AmazingFeature).
- Commit your changes (git commit -m 'Add some AmazingFeature').
- Push to the branch (git push origin feature/AmazingFeature).
- Open a Pull Request.
Distributed under the MIT License. See LICENSE file for more information. Author Abderrahmene Merzoug (obaydmerz@gmail.com)